(improvement)(config) Remove the supersonic-env configuration file and use the llm-config.yaml file uniformly. (#1238)

This commit is contained in:
lexluo09
2024-06-27 14:27:46 +08:00
committed by GitHub
parent ff20ae4006
commit 90f7a79380
10 changed files with 42 additions and 81 deletions

View File

@@ -4,7 +4,6 @@ chcp 65001
set "sbinDir=%~dp0"
call %sbinDir%/supersonic-common.bat %*
call %sbinDir%/../conf/supersonic-env.bat %*
set "command=%~1"
set "service=%~2"

View File

@@ -3,10 +3,6 @@
sbinDir=$(cd "$(dirname "$0")"; pwd)
source $sbinDir/supersonic-common.sh
set -a
source $sbinDir/../conf/supersonic-env.sh
set +a
command=$1
service=$2
if [ -z "$service" ]; then

View File

@@ -85,33 +85,4 @@ swagger:
name:
email:
url: ''
version: 3.0
langchain4j:
open-ai:
chat-model:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
# embedding-model:
# base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
# api-key: ${OPENAI_API_KEY:demo}
# dashscope:
# chat-model:
# api-key: ${OPENAI_API_KEY:demo}
# model-name: qwen-max-1201
# embedding-model:
# api-key: ${OPENAI_API_KEY:demo}
in-memory:
embedding-model:
model-name: bge-small-zh
#modelPath: /data/model.onnx
#vocabularyPath: /data/onnx_vocab.txt
embedding-store:
file-path: /tmp
version: 3.0

View File

@@ -3,5 +3,8 @@ spring:
active: local
application:
name: chat
config:
import:
- classpath:llm-config.yaml
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml

View File

@@ -0,0 +1,16 @@
langchain4j:
open-ai:
chat-model:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
in-memory:
embedding-model:
model-name: bge-small-zh
embedding-store:
file-path: /tmp

View File

@@ -1,9 +0,0 @@
rem Replace with your LLM configs
rem Note: The default API key `demo` is provided by langchain4j community
rem which limits 1000 tokens per request.
set OPENAI_API_BASE=http://langchain4j.dev/demo/openai/v1
set OPENAI_API_KEY=demo
set OPENAI_MODEL_NAME=gpt-3.5-turbo
set OPENAI_TEMPERATURE=0.0
set OPENAI_TIMEOUT=PT60S

View File

@@ -1,10 +0,0 @@
#!/usr/bin/env bash
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
OPENAI_API_BASE=http://langchain4j.dev/demo/openai/v1
OPENAI_API_KEY=demo
OPENAI_MODEL_NAME=gpt-3.5-turbo
OPENAI_TEMPERATURE=0.0
OPENAI_TIMEOUT=PT60S

View File

@@ -89,28 +89,4 @@ swagger:
name:
email:
url: ''
version: 3.0
langchain4j:
open-ai:
chat-model:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
# java.lang.RuntimeException: dev.ai4j.openai4j.OpenAiHttpException: Too many requests
# embedding-model:
# base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
# api-key: ${OPENAI_API_KEY:demo}
in-memory:
embedding-model:
model-name: bge-small-zh
#modelPath: /data/model.onnx
#vocabularyPath: /data/onnx_vocab.txt
embedding-store:
file-path: /tmp
version: 3.0

View File

@@ -3,5 +3,8 @@ spring:
active: local
application:
name: chat
config:
import:
- classpath:llm-config.yaml
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml

View File

@@ -0,0 +1,16 @@
langchain4j:
open-ai:
chat-model:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
in-memory:
embedding-model:
model-name: bge-small-zh
embedding-store:
file-path: /tmp