(improvement)(launcher)Separate prd and local profiles for langchain4j yaml.

This commit is contained in:
jerryjzhang
2024-07-01 15:40:36 +08:00
parent 7a0ae9f075
commit 07a981216f
10 changed files with 188 additions and 178 deletions

View File

@@ -1,15 +1,4 @@
server:
port: 9080
compression:
enabled: true
min-response-size: 1024
mime-types: application/javascript,application/json,application/xml,text/html,text/xml,text/plain,text/css,image/*
spring:
h2:
console:
path: /h2-console/semantic
enabled: true
datasource:
driver-class-name: org.h2.Driver
schema: classpath:db/schema-h2.sql
@@ -17,23 +6,10 @@ spring:
url: jdbc:h2:mem:semantic;DATABASE_TO_UPPER=false
username: root
password: semantic
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
logging:
level:
dev.langchain4j: DEBUG
dev.ai4j.openai4j: DEBUG
# swagger配置
swagger:
title: 'SuperSonic平台接口文档'
base:
package: com.tencent.supersonic
description: 'SuperSonic平台接口文档'
url: ''
contact:
name:
email:
url: ''
version: 3.0
h2:
console:
path: /h2-console/semantic
enabled: true
config:
import:
- classpath:langchain4j-local.yaml

View File

@@ -1,33 +1,9 @@
server:
port: 9080
compression:
enabled: true
min-response-size: 1024
mime-types: application/javascript,application/json,application/xml,text/html,text/xml,text/plain,text/css,image/*
spring:
datasource:
url: jdbc:mysql://${DB_HOST}:3306/${DB_NAME}?useUnicode=true&characterEncoding=UTF-8&useSSL=false&allowMultiQueries=true&allowPublicKeyRetrieval=true
username: ${DB_USERNAME}
password: ${DB_PASSWORD}
driver-class-name: com.mysql.jdbc.Driver
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
logging:
level:
dev.langchain4j: DEBUG
dev.ai4j.openai4j: DEBUG
# swagger配置
swagger:
title: 'SuperSonic平台接口文档'
base:
package: com.tencent.supersonic
description: 'SuperSonic平台接口文档'
url: ''
contact:
name:
email:
url: ''
version: 3.0
config:
import:
- classpath:langchain4j-prd.yaml

View File

@@ -1,3 +1,10 @@
server:
port: 9080
compression:
enabled: true
min-response-size: 1024
mime-types: application/javascript,application/json,application/xml,text/html,text/xml,text/plain,text/css,image/*
spring:
profiles:
active: local
@@ -6,7 +13,6 @@ spring:
config:
import:
- classpath:s2-config.yaml
- classpath:langchain4j-config.yaml
autoconfigure:
exclude:
- spring.dev.langchain4j.spring.LangChain4jAutoConfig
@@ -17,4 +23,21 @@ spring:
- spring.dev.langchain4j.anthropic.spring.AutoConfig
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
logging:
level:
dev.langchain4j: DEBUG
dev.ai4j.openai4j: DEBUG
swagger:
title: 'SuperSonic API Documentation'
base:
package: com.tencent.supersonic
description: 'SuperSonic API Documentation'
url: ''
contact:
name:
email:
url: ''
version: 3.0

View File

@@ -1,39 +0,0 @@
langchain4j:
open-ai:
chat-model:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: https://api.openai.com/v1
api-key: demo
model-name: gpt-3.5-turbo
temperature: 0.0
timeout: PT60S
in-memory:
embedding-model:
model-name: bge-small-zh
chroma:
embedding-store:
baseUrl: http://0.0.0.0:8000
timeout: 120s
# ollama:
# chat-model:
# base-url: http://localhost:11434
# api-key: demo
# model-name: qwen:0.5b
# temperature: 0.0
# timeout: PT60S
# chroma:
# embedding-store:
# baseUrl: http://0.0.0.0:8000
# timeout: 120s
# milvus:
# embedding-store:
# host: localhost
# port: 2379
# uri: http://0.0.0.0:19530
# token: demo
# dimension: 512
# timeout: 120s

View File

@@ -0,0 +1,42 @@
langchain4j:
# Replace `open_ai` with ollama/zhipu/azure/dashscope as needed.
# Note:
# 1. `open_ai` is commonly used to connect to cloud-based models;
# 2. `ollama` is commonly used to connect to local models.
open-ai:
chat-model:
# It is recommended to replace with your API key in production.
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
# embedding-model:
# base-url: https://api.openai.com/v1
# api-key: demo
# model-name: text-embedding-3-small
# timeout: PT60S
in-memory:
embedding-model:
model-name: bge-small-zh
embedding-store:
persist-path: /tmp
# chroma:
# embedding-store:
# baseUrl: http://0.0.0.0:8000
# timeout: 120s
# milvus:
# embedding-store:
# host: localhost
# port: 2379
# uri: http://0.0.0.0:19530
# token: demo
# dimension: 512
# timeout: 120s

View File

@@ -0,0 +1,42 @@
langchain4j:
# Replace `open_ai` with ollama/zhipu/azure/dashscope as needed.
# Note:
# 1. `open_ai` is commonly used to connect to cloud-based models;
# 2. `ollama` is commonly used to connect to local models.
open-ai:
chat-model:
# It is recommended to replace with your API key in production.
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
# embedding-model:
# base-url: https://api.openai.com/v1
# api-key: demo
# model-name: text-embedding-3-small
# timeout: PT60S
in-memory:
embedding-model:
model-name: bge-small-zh
# embedding-store:
# persist-path: /tmp
chroma:
embedding-store:
baseUrl: http://0.0.0.0:8000
timeout: 120s
# milvus:
# embedding-store:
# host: localhost
# port: 2379
# uri: http://0.0.0.0:19530
# token: demo
# dimension: 512
# timeout: 120s

View File

@@ -1,39 +0,0 @@
server:
port: 9080
compression:
enabled: true
min-response-size: 1024
mime-types: application/javascript,application/json,application/xml,text/html,text/xml,text/plain,text/css,image/*
spring:
h2:
console:
path: /h2-console/semantic
enabled: true
datasource:
driver-class-name: org.h2.Driver
schema: classpath:db/schema-h2.sql
data: classpath:db/data-h2.sql
url: jdbc:h2:mem:semantic;DATABASE_TO_UPPER=false
username: root
password: semantic
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
logging:
level:
dev.langchain4j: DEBUG
dev.ai4j.openai4j: DEBUG
# swagger配置
swagger:
title: 'SuperSonic平台接口文档'
base:
package: com.tencent.supersonic
description: 'SuperSonic平台接口文档'
url: ''
contact:
name:
email:
url: ''
version: 3.0

View File

@@ -1,12 +1,19 @@
server:
port: 9080
compression:
enabled: true
min-response-size: 1024
mime-types: application/javascript,application/json,application/xml,text/html,text/xml,text/plain,text/css,image/*
spring:
profiles:
active: prd
active: local
application:
name: chat
config:
import:
- classpath:s2-config.yaml
- classpath:langchain4j-config.yaml
- classpath:langchain4j-local.yaml
autoconfigure:
exclude:
- spring.dev.langchain4j.spring.LangChain4jAutoConfig
@@ -15,5 +22,22 @@ spring:
- spring.dev.langchain4j.azure.openai.spring.AutoConfig
- spring.dev.langchain4j.azure.aisearch.spring.AutoConfig
- spring.dev.langchain4j.anthropic.spring.AutoConfig
h2:
console:
path: /h2-console/semantic
enabled: true
datasource:
driver-class-name: org.h2.Driver
schema: classpath:db/schema-h2.sql
data: classpath:db/data-h2.sql
url: jdbc:h2:mem:semantic;DATABASE_TO_UPPER=false
username: root
password: semantic
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml
logging:
level:
dev.langchain4j: DEBUG
dev.ai4j.openai4j: DEBUG

View File

@@ -1,37 +0,0 @@
langchain4j:
open-ai:
chat-model:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: https://api.openai.com/v1
api-key: demo
model-name: gpt-3.5-turbo
temperature: 0.0
timeout: PT60S
in-memory:
embedding-model:
model-name: bge-small-zh
embedding-store:
persist-path: /tmp
# ollama:
# chat-model:
# base-url: http://localhost:11434
# api-key: demo
# model-name: qwen:0.5b
# temperature: 0.0
# timeout: PT60S
# chroma:
# embedding-store:
# baseUrl: http://0.0.0.0:8000
# timeout: 120s
# milvus:
# embedding-store:
# host: localhost
# port: 2379
# uri: http://0.0.0.0:19530
# token: demo
# dimension: 512
# timeout: 120s

View File

@@ -0,0 +1,42 @@
langchain4j:
# Replace `open_ai` with ollama/zhipu/azure/dashscope as needed.
# Note:
# 1. `open_ai` is commonly used to connect to cloud-based models;
# 2. `ollama` is commonly used to connect to local models.
open-ai:
chat-model:
# It is recommended to replace with your API key in production.
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
# embedding-model:
# base-url: https://api.openai.com/v1
# api-key: demo
# model-name: text-embedding-3-small
# timeout: PT60S
in-memory:
embedding-model:
model-name: bge-small-zh
embedding-store:
persist-path: /tmp
# chroma:
# embedding-store:
# baseUrl: http://0.0.0.0:8000
# timeout: 120s
# milvus:
# embedding-store:
# host: localhost
# port: 2379
# uri: http://0.0.0.0:19530
# token: demo
# dimension: 512
# timeout: 120s