[improvement](chat) Add an in_process provider and support offline loading of local embedding models. (#505)

This commit is contained in:
lexluo09
2023-12-14 14:16:03 +08:00
committed by GitHub
parent 169262cc62
commit 287a6561ff
9 changed files with 292 additions and 55 deletions

View File

@@ -43,31 +43,37 @@ functionCall:
url: http://127.0.0.1:9092
#langchain4j config
langchain4j:
#1.chat-model
chat-model:
provider: open_ai
openai:
api-key: api_key
model-name: gpt-3.5-turbo
temperature: 0.0
timeout: PT60S
#2.embedding-model
#2.1 in_memory(default)
#2.2 open_ai
# embedding-model:
# provider: open_ai
# openai:
# api-key: api_key
# modelName: all-minilm-l6-v2.onnx
s2:
langchain4j:
#1.chat-model
chat-model:
provider: open_ai
openai:
api-key: api_key
model-name: gpt-3.5-turbo
temperature: 0.0
timeout: PT60S
#2.embedding-model
#2.1 in_memory(default)
embedding-model:
provider: in_process
# inProcess:
# modelPath: /data/model.onnx
# vocabularyPath: /data/onnx_vocab.txt
#2.2 open_ai
# embedding-model:
# provider: open_ai
# openai:
# api-key: api_key
# modelName: all-minilm-l6-v2.onnx
#2.2 hugging_face
# embedding-model:
# provider: hugging_face
# hugging-face:
# access-token: hg_access_token
# model-id: sentence-transformers/all-MiniLM-L6-v2
# timeout: 1h
#2.2 hugging_face
# embedding-model:
# provider: hugging_face
# hugging-face:
# access-token: hg_access_token
# model-id: sentence-transformers/all-MiniLM-L6-v2
# timeout: 1h
#langchain4j log
logging: