Files
supersonic/launchers/standalone/src/main/resources/langchain4j-config.yaml

37 lines
881 B
YAML

langchain4j:
open-ai:
chat-model:
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: https://api.openai.com/v1
api-key: demo
model-name: gpt-3.5-turbo
temperature: 0.0
timeout: PT60S
in-memory:
embedding-model:
model-name: bge-small-zh
embedding-store:
persist-path: /tmp
# ollama:
# chat-model:
# base-url: http://localhost:11434
# api-key: demo
# model-name: qwen:0.5b
# temperature: 0.0
# timeout: PT60S
# chroma:
# embedding-store:
# baseUrl: http://0.0.0.0:8000
# timeout: 120s
# milvus:
# embedding-store:
# host: localhost
# port: 2379
# uri: http://0.0.0.0:19530
# token: demo
# dimension: 512
# timeout: 120s