(improvement)(common) Upgraded langchain4j to version 0.31. (#1174)

This commit is contained in:
lexluo09
2024-06-20 17:31:19 +08:00
committed by GitHub
parent 1df1fe5ad6
commit 92db381b6f
37 changed files with 241 additions and 888 deletions

View File

@@ -1,3 +1,5 @@
### headless-chat SPIs
com.tencent.supersonic.headless.chat.mapper.SchemaMapper=\
com.tencent.supersonic.headless.chat.mapper.EmbeddingMapper, \
com.tencent.supersonic.headless.chat.mapper.KeywordMapper, \
@@ -9,20 +11,20 @@ com.tencent.supersonic.headless.chat.parser.SemanticParser=\
com.tencent.supersonic.headless.chat.parser.llm.LLMSqlParser, \
com.tencent.supersonic.headless.chat.parser.QueryTypeParser
com.tencent.supersonic.chat.server.parser.ChatParser=\
com.tencent.supersonic.chat.server.parser.NL2PluginParser, \
com.tencent.supersonic.chat.server.parser.MultiTurnParser, \
com.tencent.supersonic.chat.server.parser.NL2SQLParser
com.tencent.supersonic.chat.server.executor.ChatExecutor=\
com.tencent.supersonic.chat.server.executor.PluginExecutor, \
com.tencent.supersonic.chat.server.executor.SqlExecutor
com.tencent.supersonic.headless.chat.corrector.SemanticCorrector=\
com.tencent.supersonic.headless.chat.corrector.SchemaCorrector, \
com.tencent.supersonic.headless.chat.corrector.TimeCorrector, \
com.tencent.supersonic.headless.chat.corrector.GrammarCorrector
com.tencent.supersonic.headless.chat.knowledge.file.FileHandler=\
com.tencent.supersonic.headless.chat.knowledge.file.FileHandlerImpl
com.tencent.supersonic.headless.chat.parser.llm.DataSetResolver=\
com.tencent.supersonic.headless.chat.parser.llm.HeuristicDataSetResolver
### headless-core SPIs
com.tencent.supersonic.headless.core.parser.converter.HeadlessConverter=\
com.tencent.supersonic.headless.core.parser.converter.DefaultDimValueConverter,\
com.tencent.supersonic.headless.core.parser.converter.SqlVariableParseConverter,\
@@ -41,18 +43,24 @@ com.tencent.supersonic.headless.core.parser.SqlParser=\
com.tencent.supersonic.headless.core.cache.QueryCache=\
com.tencent.supersonic.headless.core.cache.DefaultQueryCache
### headless-server SPIs
com.tencent.supersonic.headless.server.processor.ResultProcessor=\
com.tencent.supersonic.headless.server.processor.ParseInfoProcessor, \
com.tencent.supersonic.headless.server.processor.SqlInfoProcessor
com.tencent.supersonic.headless.chat.parser.llm.DataSetResolver=\
com.tencent.supersonic.headless.chat.parser.llm.HeuristicDataSetResolver
com.tencent.supersonic.auth.authentication.interceptor.AuthenticationInterceptor=\
com.tencent.supersonic.auth.authentication.interceptor.DefaultAuthenticationInterceptor
### chat-server SPIs
com.tencent.supersonic.auth.api.authentication.adaptor.UserAdaptor=\
com.tencent.supersonic.auth.authentication.adaptor.DefaultUserAdaptor
com.tencent.supersonic.chat.server.parser.ChatParser=\
com.tencent.supersonic.chat.server.parser.NL2PluginParser, \
com.tencent.supersonic.chat.server.parser.MultiTurnParser,\
com.tencent.supersonic.chat.server.parser.NL2SQLParser
com.tencent.supersonic.chat.server.executor.ChatExecutor=\
com.tencent.supersonic.chat.server.executor.PluginExecutor, \
com.tencent.supersonic.chat.server.executor.SqlExecutor
com.tencent.supersonic.chat.server.plugin.recognize.PluginRecognizer=\
com.tencent.supersonic.chat.server.plugin.recognize.embedding.EmbeddingRecallRecognizer
@@ -67,5 +75,16 @@ com.tencent.supersonic.chat.server.processor.execute.ExecuteResultProcessor=\
com.tencent.supersonic.chat.server.processor.execute.DimensionRecommendProcessor,\
com.tencent.supersonic.chat.server.processor.execute.MetricRatioProcessor
### auth-authentication SPIs
com.tencent.supersonic.auth.authentication.interceptor.AuthenticationInterceptor=\
com.tencent.supersonic.auth.authentication.interceptor.DefaultAuthenticationInterceptor
com.tencent.supersonic.auth.api.authentication.adaptor.UserAdaptor=\
com.tencent.supersonic.auth.authentication.adaptor.DefaultUserAdaptor
### common SPIs
dev.langchain4j.store.embedding.S2EmbeddingStore=\
dev.langchain4j.store.embedding.InMemoryS2EmbeddingStore

View File

@@ -17,7 +17,14 @@ spring:
url: jdbc:h2:mem:semantic;DATABASE_TO_UPPER=false
username: root
password: semantic
autoconfigure:
exclude:
- spring.dev.langchain4j.spring.LangChain4jAutoConfig
- spring.dev.langchain4j.openai.spring.AutoConfig
- spring.dev.langchain4j.ollama.spring.AutoConfig
- spring.dev.langchain4j.azure.openai.spring.AutoConfig
- spring.dev.langchain4j.azure.aisearch.spring.AutoConfig
- spring.dev.langchain4j.anthropic.spring.AutoConfig
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
@@ -36,7 +43,11 @@ logging:
dev.ai4j.openai4j: DEBUG
s2:
pyllm:
url: http://127.0.0.1:9092
parser:
url: ${s2.pyllm.url}
strategy: ONE_PASS_SELF_CONSISTENCY
exemplar-recall:
number: 10
@@ -50,6 +61,14 @@ s2:
corrector:
additional:
information: true
date: true
functionCall:
url: ${s2.pyllm.url}
embedding:
url: ${s2.pyllm.url}
persistent:
path: /tmp
demo:
names: S2VisitsDemo,S2ArtistDemo
@@ -59,24 +78,6 @@ s2:
cache:
enable: false
langchain4j:
#1.chat-model
chat-model:
provider: open_ai
openai:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
#2.embedding-model
#2.1 in_memory(default)
embedding-model:
provider: in_process
# swagger配置
swagger:
title: 'SuperSonic平台接口文档'
@@ -88,4 +89,21 @@ swagger:
name:
email:
url: ''
version: 3.0
version: 3.0
langchain4j:
open-ai:
chat-model:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
# java.lang.RuntimeException: dev.ai4j.openai4j.OpenAiHttpException: Too many requests
# embedding-model:
# base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
# api-key: ${OPENAI_API_KEY:demo}