From edb60c3391d2f5b547efc32fd271accab3e39a22 Mon Sep 17 00:00:00 2001 From: jerryjzhang Date: Fri, 17 May 2024 15:51:16 +0800 Subject: [PATCH] (improvement)(launcher)Use `demo` API key provided by langchain4j community as default config. --- headless/python/config/run_config.ini | 4 ++-- .../src/main/resources/application-local.yaml | 19 +++++++++++++------ 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/headless/python/config/run_config.ini b/headless/python/config/run_config.ini index 0522dcff0..126697ed2 100644 --- a/headless/python/config/run_config.ini +++ b/headless/python/config/run_config.ini @@ -23,7 +23,7 @@ LLM_PROVIDER_NAME = openai [LLMModel] -MODEL_NAME = gpt-3.5-turbo-16k +MODEL_NAME = gpt-3.5-turbo OPENAI_API_KEY = YOUR_API_KEY -OPENAI_API_BASE = YOUR_API_BASE +OPENAI_API_BASE = http://YOUR_API_BASE TEMPERATURE = 0.0 diff --git a/launchers/standalone/src/main/resources/application-local.yaml b/launchers/standalone/src/main/resources/application-local.yaml index bcf495411..3125fbae9 100644 --- a/launchers/standalone/src/main/resources/application-local.yaml +++ b/launchers/standalone/src/main/resources/application-local.yaml @@ -45,14 +45,20 @@ mybatis: corrector: additional: information: true +pyllm: + url: http://127.0.0.1:9092 llm: parser: - url: http://127.0.0.1:9092 + url: ${pyllm.url} embedding: - url: http://127.0.0.1:9092 + url: ${pyllm.url} functionCall: - url: http://127.0.0.1:9092 + url: ${pyllm.url} + +text2sql: + example: + num: 1 #langchain4j config s2: @@ -62,9 +68,10 @@ s2: provider: open_ai openai: # Replace with your LLM configs - base-url: http://YOUR_API_URL - api-key: YOUR_API_KEY - model-name: gpt-3.5-turbo-16k + # Note: Below API key `demo` is provided by langchain4j community which limits 1000 tokens per request. + base-url: https://api.openai.com/v1 + api-key: demo + model-name: gpt-3.5-turbo temperature: 0.0 timeout: PT60S #2.embedding-model