(improvement)(launcher)Introduce supersonic-env.sh to incorporate LLM-related configs.

This commit is contained in:
jerryjzhang
2024-05-20 11:16:47 +08:00
parent cbafff0935
commit 542121210e
7 changed files with 63 additions and 169 deletions

View File

@@ -1,3 +1,10 @@
server:
port: 9080
compression:
enabled: true
min-response-size: 1024
mime-types: application/javascript,application/json,application/xml,text/html,text/xml,text/plain,text/css,image/*
spring:
h2:
console:
@@ -12,15 +19,8 @@ spring:
username: root
password: semantic
demo:
enabled: true
server:
port: 9080
compression:
enabled: true
min-response-size: 1024
mime-types: application/javascript,application/json,application/xml,text/html,text/xml,text/plain,text/css,image/*
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
authentication:
enable: true
@@ -31,6 +31,16 @@ authentication:
header:
key: Authorization
demo:
enabled: true
query:
optimizer:
enable: true
multi:
turn: false
time:
threshold: 100
@@ -39,20 +49,19 @@ dimension:
metric:
topn: 20
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
corrector:
additional:
information: true
pyllm:
url: http://127.0.0.1:9092
llm:
parser:
url: ${pyllm.url}
embedding:
url: ${pyllm.url}
functionCall:
url: ${pyllm.url}
@@ -60,7 +69,6 @@ text2sql:
example:
num: 1
#langchain4j config
s2:
langchain4j:
#1.chat-model
@@ -68,20 +76,21 @@ s2:
provider: open_ai
openai:
# Replace with your LLM configs
# Note: Below API key `demo` is provided by langchain4j community which limits 1000 tokens per request.
base-url: https://api.openai.com/v1
api-key: demo
model-name: gpt-3.5-turbo
temperature: 0.0
timeout: PT60S
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
#2.embedding-model
#2.1 in_memory(default)
embedding-model:
provider: in_process
# inProcess:
# modelPath: /data/model.onnx
# vocabularyPath: /data/onnx_vocab.txt
# shibing624/text2vec-base-chinese
# inProcess:
# modelPath: /data/model.onnx
# vocabularyPath: /data/onnx_vocab.txt
# shibing624/text2vec-base-chinese
#2.2 open_ai
# embedding-model:
# provider: open_ai
@@ -105,11 +114,4 @@ logging:
inMemoryEmbeddingStore:
persistent:
path: /tmp
query:
optimizer:
enable: true
multi:
turn: false
num: 5
path: /tmp