(improvement)(common) Upgraded langchain4j to version 0.31. (#1174)

This commit is contained in:
lexluo09
2024-06-20 17:31:19 +08:00
committed by GitHub
parent 1df1fe5ad6
commit 92db381b6f
37 changed files with 241 additions and 888 deletions

View File

@@ -1,20 +1,17 @@
package com.tencent.supersonic;
import dev.langchain4j.S2LangChain4jAutoConfiguration;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.autoconfigure.data.mongo.MongoDataAutoConfiguration;
import org.springframework.boot.autoconfigure.mongo.MongoAutoConfiguration;
import org.springframework.context.annotation.Import;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.annotation.EnableScheduling;
import springfox.documentation.swagger2.annotations.EnableSwagger2;
@SpringBootApplication(scanBasePackages = {"com.tencent.supersonic"},
@SpringBootApplication(scanBasePackages = {"com.tencent.supersonic", "dev.langchain4j.model"},
exclude = {MongoAutoConfiguration.class, MongoDataAutoConfiguration.class})
@EnableScheduling
@EnableAsync
@Import(S2LangChain4jAutoConfiguration.class)
@EnableSwagger2
public class StandaloneLauncher {

View File

@@ -88,3 +88,11 @@ com.tencent.supersonic.auth.api.authentication.adaptor.UserAdaptor=\
dev.langchain4j.store.embedding.S2EmbeddingStore=\
dev.langchain4j.store.embedding.InMemoryS2EmbeddingStore
org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
dev.langchain4j.spring.LangChain4jAutoConfig,\
dev.langchain4j.openai.spring.AutoConfig,\
dev.langchain4j.ollama.spring.AutoConfig,\
dev.langchain4j.azure.openai.spring.AutoConfig,\
dev.langchain4j.azure.aisearch.spring.AutoConfig,\
dev.langchain4j.anthropic.spring.AutoConfig

View File

@@ -17,7 +17,14 @@ spring:
url: jdbc:h2:mem:semantic;DATABASE_TO_UPPER=false
username: root
password: semantic
autoconfigure:
exclude:
- spring.dev.langchain4j.spring.LangChain4jAutoConfig
- spring.dev.langchain4j.openai.spring.AutoConfig
- spring.dev.langchain4j.ollama.spring.AutoConfig
- spring.dev.langchain4j.azure.openai.spring.AutoConfig
- spring.dev.langchain4j.azure.aisearch.spring.AutoConfig
- spring.dev.langchain4j.anthropic.spring.AutoConfig
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
@@ -67,42 +74,6 @@ s2:
names: S2VisitsDemo,S2ArtistDemo
enableLLM: true
langchain4j:
#1.chat-model
chat-model:
provider: open_ai
openai:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
#2.embedding-model
#2.1 in_memory(default)
embedding-model:
provider: in_process
# inProcess:
# modelPath: /data/model.onnx
# vocabularyPath: /data/onnx_vocab.txt
# shibing624/text2vec-base-chinese
#2.2 open_ai
# embedding-model:
# provider: open_ai
# openai:
# api-key: api_key
# modelName: all-minilm-l6-v2.onnx
#2.2 hugging_face
# embedding-model:
# provider: hugging_face
# hugging-face:
# access-token: hg_access_token
# model-id: sentence-transformers/all-MiniLM-L6-v2
# timeout: 1h
# swagger配置
swagger:
title: 'SuperSonic平台接口文档'
@@ -115,3 +86,20 @@ swagger:
email:
url: ''
version: 3.0
langchain4j:
open-ai:
chat-model:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
# java.lang.RuntimeException: dev.ai4j.openai4j.OpenAiHttpException: Too many requests
embedding-model:
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}

View File

@@ -40,7 +40,6 @@ public class MetricTest extends BaseTest {
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("人均访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问用户数"));
expectedParseInfo.getDimensionFilters().add(DataUtils.getFilter("user_name",
FilterOperatorEnum.EQUALS, "alice", "用户", 2L));
@@ -74,7 +73,6 @@ public class MetricTest extends BaseTest {
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("人均访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问用户数"));
expectedParseInfo.setDateInfo(DataUtils.getDateConf(DateConf.DateMode.RECENT, unit, period, startDay, endDay));
expectedParseInfo.setQueryType(QueryType.METRIC);
@@ -103,7 +101,6 @@ public class MetricTest extends BaseTest {
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("人均访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问用户数"));
expectedParseInfo.getDimensions().add(DataUtils.getSchemaElement("部门"));
expectedParseInfo.setDateInfo(DataUtils.getDateConf(DateConf.DateMode.RECENT, unit, period, startDay, endDay));
@@ -124,7 +121,6 @@ public class MetricTest extends BaseTest {
expectedParseInfo.setAggType(NONE);
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("人均访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问用户数"));
List<String> list = new ArrayList<>();
list.add("alice");
list.add("lucy");
@@ -171,7 +167,6 @@ public class MetricTest extends BaseTest {
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("人均访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问用户数"));
expectedParseInfo.getDimensions().add(DataUtils.getSchemaElement("部门"));
expectedParseInfo.setDateInfo(DataUtils.getDateConf(DateConf.DateMode.RECENT, unit, period, startDay, endDay));
@@ -197,7 +192,6 @@ public class MetricTest extends BaseTest {
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("人均访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问用户数"));
expectedParseInfo.getDimensionFilters().add(DataUtils.getFilter("user_name",
FilterOperatorEnum.EQUALS, "alice", "用户", 2L));

View File

@@ -29,7 +29,6 @@ public class MultiTurnsTest extends BaseTest {
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("人均访问次数"));
expectedParseInfo.getMetrics().add(DataUtils.getSchemaElement("访问用户数"));
expectedParseInfo.getDimensionFilters().add(DataUtils.getFilter("user_name",
FilterOperatorEnum.EQUALS, "alice", "用户", 2L));

View File

@@ -14,7 +14,7 @@ import com.tencent.supersonic.headless.api.pojo.request.SemanticQueryReq;
import com.tencent.supersonic.headless.api.pojo.response.SemanticQueryResp;
import com.tencent.supersonic.headless.server.service.QueryService;
import com.tencent.supersonic.util.DataUtils;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.ArrayList;

View File

@@ -1,3 +1,5 @@
### headless-chat SPIs
com.tencent.supersonic.headless.chat.mapper.SchemaMapper=\
com.tencent.supersonic.headless.chat.mapper.EmbeddingMapper, \
com.tencent.supersonic.headless.chat.mapper.KeywordMapper, \
@@ -9,20 +11,20 @@ com.tencent.supersonic.headless.chat.parser.SemanticParser=\
com.tencent.supersonic.headless.chat.parser.llm.LLMSqlParser, \
com.tencent.supersonic.headless.chat.parser.QueryTypeParser
com.tencent.supersonic.chat.server.parser.ChatParser=\
com.tencent.supersonic.chat.server.parser.NL2PluginParser, \
com.tencent.supersonic.chat.server.parser.MultiTurnParser, \
com.tencent.supersonic.chat.server.parser.NL2SQLParser
com.tencent.supersonic.chat.server.executor.ChatExecutor=\
com.tencent.supersonic.chat.server.executor.PluginExecutor, \
com.tencent.supersonic.chat.server.executor.SqlExecutor
com.tencent.supersonic.headless.chat.corrector.SemanticCorrector=\
com.tencent.supersonic.headless.chat.corrector.SchemaCorrector, \
com.tencent.supersonic.headless.chat.corrector.TimeCorrector, \
com.tencent.supersonic.headless.chat.corrector.GrammarCorrector
com.tencent.supersonic.headless.chat.knowledge.file.FileHandler=\
com.tencent.supersonic.headless.chat.knowledge.file.FileHandlerImpl
com.tencent.supersonic.headless.chat.parser.llm.DataSetResolver=\
com.tencent.supersonic.headless.chat.parser.llm.HeuristicDataSetResolver
### headless-core SPIs
com.tencent.supersonic.headless.core.parser.converter.HeadlessConverter=\
com.tencent.supersonic.headless.core.parser.converter.DefaultDimValueConverter,\
com.tencent.supersonic.headless.core.parser.converter.SqlVariableParseConverter,\
@@ -41,18 +43,24 @@ com.tencent.supersonic.headless.core.parser.SqlParser=\
com.tencent.supersonic.headless.core.cache.QueryCache=\
com.tencent.supersonic.headless.core.cache.DefaultQueryCache
### headless-server SPIs
com.tencent.supersonic.headless.server.processor.ResultProcessor=\
com.tencent.supersonic.headless.server.processor.ParseInfoProcessor, \
com.tencent.supersonic.headless.server.processor.SqlInfoProcessor
com.tencent.supersonic.headless.chat.parser.llm.DataSetResolver=\
com.tencent.supersonic.headless.chat.parser.llm.HeuristicDataSetResolver
com.tencent.supersonic.auth.authentication.interceptor.AuthenticationInterceptor=\
com.tencent.supersonic.auth.authentication.interceptor.DefaultAuthenticationInterceptor
### chat-server SPIs
com.tencent.supersonic.auth.api.authentication.adaptor.UserAdaptor=\
com.tencent.supersonic.auth.authentication.adaptor.DefaultUserAdaptor
com.tencent.supersonic.chat.server.parser.ChatParser=\
com.tencent.supersonic.chat.server.parser.NL2PluginParser, \
com.tencent.supersonic.chat.server.parser.MultiTurnParser,\
com.tencent.supersonic.chat.server.parser.NL2SQLParser
com.tencent.supersonic.chat.server.executor.ChatExecutor=\
com.tencent.supersonic.chat.server.executor.PluginExecutor, \
com.tencent.supersonic.chat.server.executor.SqlExecutor
com.tencent.supersonic.chat.server.plugin.recognize.PluginRecognizer=\
com.tencent.supersonic.chat.server.plugin.recognize.embedding.EmbeddingRecallRecognizer
@@ -67,5 +75,16 @@ com.tencent.supersonic.chat.server.processor.execute.ExecuteResultProcessor=\
com.tencent.supersonic.chat.server.processor.execute.DimensionRecommendProcessor,\
com.tencent.supersonic.chat.server.processor.execute.MetricRatioProcessor
### auth-authentication SPIs
com.tencent.supersonic.auth.authentication.interceptor.AuthenticationInterceptor=\
com.tencent.supersonic.auth.authentication.interceptor.DefaultAuthenticationInterceptor
com.tencent.supersonic.auth.api.authentication.adaptor.UserAdaptor=\
com.tencent.supersonic.auth.authentication.adaptor.DefaultUserAdaptor
### common SPIs
dev.langchain4j.store.embedding.S2EmbeddingStore=\
dev.langchain4j.store.embedding.InMemoryS2EmbeddingStore

View File

@@ -17,7 +17,14 @@ spring:
url: jdbc:h2:mem:semantic;DATABASE_TO_UPPER=false
username: root
password: semantic
autoconfigure:
exclude:
- spring.dev.langchain4j.spring.LangChain4jAutoConfig
- spring.dev.langchain4j.openai.spring.AutoConfig
- spring.dev.langchain4j.ollama.spring.AutoConfig
- spring.dev.langchain4j.azure.openai.spring.AutoConfig
- spring.dev.langchain4j.azure.aisearch.spring.AutoConfig
- spring.dev.langchain4j.anthropic.spring.AutoConfig
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
@@ -36,7 +43,11 @@ logging:
dev.ai4j.openai4j: DEBUG
s2:
pyllm:
url: http://127.0.0.1:9092
parser:
url: ${s2.pyllm.url}
strategy: ONE_PASS_SELF_CONSISTENCY
exemplar-recall:
number: 10
@@ -50,6 +61,14 @@ s2:
corrector:
additional:
information: true
date: true
functionCall:
url: ${s2.pyllm.url}
embedding:
url: ${s2.pyllm.url}
persistent:
path: /tmp
demo:
names: S2VisitsDemo,S2ArtistDemo
@@ -59,24 +78,6 @@ s2:
cache:
enable: false
langchain4j:
#1.chat-model
chat-model:
provider: open_ai
openai:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
#2.embedding-model
#2.1 in_memory(default)
embedding-model:
provider: in_process
# swagger配置
swagger:
title: 'SuperSonic平台接口文档'
@@ -88,4 +89,21 @@ swagger:
name:
email:
url: ''
version: 3.0
version: 3.0
langchain4j:
open-ai:
chat-model:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
# java.lang.RuntimeException: dev.ai4j.openai4j.OpenAiHttpException: Too many requests
# embedding-model:
# base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
# api-key: ${OPENAI_API_KEY:demo}