(improvement)(chat) Remove langchain4j configuration file and perform all configuration for the large model through the UI interface. (#1442)

This commit is contained in:
lexluo09
2024-07-20 21:30:46 +08:00
committed by GitHub
parent 3797cc2ce8
commit d64ed02df9
12 changed files with 35 additions and 170 deletions

View File

@@ -80,15 +80,4 @@ com.tencent.supersonic.auth.authentication.interceptor.AuthenticationInterceptor
com.tencent.supersonic.auth.authentication.interceptor.DefaultAuthenticationInterceptor
com.tencent.supersonic.auth.api.authentication.adaptor.UserAdaptor=\
com.tencent.supersonic.auth.authentication.adaptor.DefaultUserAdaptor
### common SPIs
org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
dev.langchain4j.spring.LangChain4jAutoConfig,\
dev.langchain4j.openai.spring.AutoConfig,\
dev.langchain4j.ollama.spring.AutoConfig,\
dev.langchain4j.azure.openai.spring.AutoConfig,\
dev.langchain4j.azure.aisearch.spring.AutoConfig,\
dev.langchain4j.anthropic.spring.AutoConfig
com.tencent.supersonic.auth.authentication.adaptor.DefaultUserAdaptor

View File

@@ -11,7 +11,4 @@ spring:
h2:
console:
path: /h2-console/semantic
enabled: true
config:
import:
- classpath:langchain4j-local.yaml
enabled: true

View File

@@ -3,7 +3,4 @@ spring:
url: jdbc:mysql://${DB_HOST}:${DB_PORT:3306}/${DB_NAME}?useUnicode=true&characterEncoding=UTF-8&useSSL=false&allowMultiQueries=true&allowPublicKeyRetrieval=true
username: ${DB_USERNAME}
password: ${DB_PASSWORD}
driver-class-name: com.mysql.jdbc.Driver
config:
import:
- classpath:langchain4j-prd.yaml
driver-class-name: com.mysql.jdbc.Driver

View File

@@ -1,42 +0,0 @@
langchain4j:
# Replace `open_ai` with ollama/zhipu/azure/dashscope as needed.
# Note:
# 1. `open_ai` is commonly used to connect to cloud-based models;
# 2. `ollama` is commonly used to connect to local models.
open-ai:
chat-model:
# It is recommended to replace with your API key in production.
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
# embedding-model:
# base-url: https://api.openai.com/v1
# api-key: demo
# model-name: text-embedding-3-small
# timeout: PT60S
in-memory:
embedding-model:
model-name: bge-small-zh
embedding-store:
persist-path: /tmp
# chroma:
# embedding-store:
# baseUrl: http://0.0.0.0:8000
# timeout: 120s
# milvus:
# embedding-store:
# host: localhost
# port: 2379
# uri: http://0.0.0.0:19530
# token: demo
# dimension: 512
# timeout: 120s

View File

@@ -1,42 +0,0 @@
langchain4j:
# Replace `open_ai` with ollama/zhipu/azure/dashscope as needed.
# Note:
# 1. `open_ai` is commonly used to connect to cloud-based models;
# 2. `ollama` is commonly used to connect to local models.
open-ai:
chat-model:
# It is recommended to replace with your API key in production.
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
# embedding-model:
# base-url: https://api.openai.com/v1
# api-key: demo
# model-name: text-embedding-3-small
# timeout: PT60S
in-memory:
embedding-model:
model-name: bge-small-zh
# embedding-store:
# persist-path: /tmp
chroma:
embedding-store:
baseUrl: http://${CHROMA_HOST}:8000
timeout: 120s
# milvus:
# embedding-store:
# host: localhost
# port: 2379
# uri: http://0.0.0.0:19530
# token: demo
# dimension: 512
# timeout: 120s

View File

@@ -11,7 +11,4 @@ spring:
h2:
console:
path: /h2-console/semantic
enabled: true
config:
import:
- classpath:langchain4j-local.yaml
enabled: true

View File

@@ -1,42 +0,0 @@
langchain4j:
# Replace `open_ai` with ollama/zhipu/azure/dashscope as needed.
# Note:
# 1. `open_ai` is commonly used to connect to cloud-based models;
# 2. `ollama` is commonly used to connect to local models.
open-ai:
chat-model:
# It is recommended to replace with your API key in production.
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
# embedding-model:
# base-url: https://api.openai.com/v1
# api-key: demo
# model-name: text-embedding-3-small
# timeout: PT60S
in-memory:
embedding-model:
model-name: bge-small-zh
embedding-store:
persist-path: /tmp
# chroma:
# embedding-store:
# baseUrl: http://0.0.0.0:8000
# timeout: 120s
# milvus:
# embedding-store:
# host: localhost
# port: 2379
# uri: http://0.0.0.0:19530
# token: demo
# dimension: 512
# timeout: 120s