Files
supersonic/chat/python/config/run_config.ini
codescracker d79f73eab6 add auto-CoT feature (#483)
* 1.refactor the retrieval module. 2.refactor the http service module. 3.upgrade text2sql output format the parse for absolute time related expression in query.

* fix bug.

* upgrade the config module, now support config llm suppoted by langchain.

* fix conflicts.

* update text2sql config reload to be compitable with new config format.

* modify default config.

* 1.add self-consistency feature for text2sql. 2.upgrade llm api call from sync to async. 3.refactor text2sql module. 4. refactor semantical retriever modules.

* merege with upstream master

* add general retrieve service.

* add api service for sql_agent for crud opereations of few-shots examples.

* modify requirements

* add auto-cot feature

---------

Co-authored-by: shaoweigong <shaoweigong@tencent.com>
2023-12-11 16:07:49 +08:00

29 lines
682 B
INI

[LLMParser]
LLMPARSER_HOST = 127.0.0.1
LLMPARSER_PORT = 9092
[ChromaDB]
CHROMA_DB_PERSIST_DIR = chm_db
PRESET_QUERY_COLLECTION_NAME = preset_query_collection
SOLVED_QUERY_COLLECTION_NAME = solved_query_collection
TEXT2DSLAGENT_COLLECTION_NAME = text2dsl_agent_collection
TEXT2DSLAGENTACT_COLLECTION_NAME = text2dsl_agent_act_collection
TEXT2DSL_EXAMPLE_NUM = 15
TEXT2DSL_FEWSHOTS_NUM = 10
TEXT2DSL_SELF_CONSISTENCY_NUM = 5
ACT_MIN_WINDOWN_SIZE = 6
ACT_MAX_WINDOWN_SIZE = 10
[Text2Vec]
HF_TEXT2VEC_MODEL_NAME = GanymedeNil/text2vec-large-chinese
[LLMProvider]
LLM_PROVIDER_NAME = openai
[LLMModel]
MODEL_NAME = gpt-3.5-turbo-16k
OPENAI_API_KEY = YOUR_API_KEY
TEMPERATURE = 0.0