server: port: 9080 compression: enabled: true min-response-size: 1024 mime-types: application/javascript,application/json,application/xml,text/html,text/xml,text/plain,text/css,image/* spring: h2: console: path: /h2-console/semantic enabled: true datasource: driver-class-name: org.h2.Driver schema: classpath:db/schema-h2.sql data: classpath:db/data-h2.sql url: jdbc:h2:mem:semantic;DATABASE_TO_UPPER=false username: root password: semantic mybatis: mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml authentication: enable: true exclude: path: /api/auth/user/register,/api/auth/user/login token: http: header: key: Authorization query: optimizer: enable: true time: threshold: 100 dimension: topn: 20 metric: topn: 20 corrector: additional: information: true pyllm: url: http://127.0.0.1:9092 llm: parser: url: ${pyllm.url} embedding: url: ${pyllm.url} functionCall: url: ${pyllm.url} text2sql: example: num: 1 s2: demo: names: S2VisitsDemo,S2ArtistDemo enableLLM: true multi-turn: enable: false langchain4j: #1.chat-model chat-model: provider: open_ai openai: # Replace with your LLM configs # Note: The default API key `demo` is provided by langchain4j community # which limits 1000 tokens per request. base-url: ${OPENAI_API_BASE:https://api.openai.com/v1} api-key: ${OPENAI_API_KEY:demo} model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo} temperature: ${OPENAI_TEMPERATURE:0.0} timeout: ${OPENAI_TIMEOUT:PT60S} #2.embedding-model #2.1 in_memory(default) embedding-model: provider: in_process # inProcess: # modelPath: /data/model.onnx # vocabularyPath: /data/onnx_vocab.txt # shibing624/text2vec-base-chinese #2.2 open_ai # embedding-model: # provider: open_ai # openai: # api-key: api_key # modelName: all-minilm-l6-v2.onnx #2.2 hugging_face # embedding-model: # provider: hugging_face # hugging-face: # access-token: hg_access_token # model-id: sentence-transformers/all-MiniLM-L6-v2 # timeout: 1h #langchain4j log logging: level: dev.langchain4j: DEBUG dev.ai4j.openai4j: DEBUG inMemoryEmbeddingStore: persistent: path: /tmp