(improvement)(pyllm)Use HTTP parameter llm_config in place of the default llm_config

This commit is contained in:
jerryjzhang
2024-05-20 17:40:34 +08:00
parent 53b6c03288
commit eaec7b4663
11 changed files with 106 additions and 86 deletions

View File

@@ -22,8 +22,6 @@ import com.tencent.supersonic.chat.server.service.PluginService;
import com.tencent.supersonic.common.pojo.SysParameter;
import com.tencent.supersonic.common.service.SysParameterService;
import com.tencent.supersonic.common.util.JsonUtil;
import com.tencent.supersonic.headless.api.pojo.LLMConfig;
import com.tencent.supersonic.common.pojo.enums.S2ModelProvider;
import com.tencent.supersonic.headless.api.pojo.response.ParseResp;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
@@ -177,10 +175,7 @@ public class ChatDemoLoader implements CommandLineRunner {
agentConfig.getTools().add(llmParserTool);
}
agent.setAgentConfig(JSONObject.toJSONString(agentConfig));
LLMConfig llmConfig = new LLMConfig(S2ModelProvider.OPEN_AI.name(),
"", "your_key", "gpt-3.5-turbo");
MultiTurnConfig multiTurnConfig = new MultiTurnConfig(false);
agent.setLlmConfig(llmConfig);
agent.setMultiTurnConfig(multiTurnConfig);
agentService.createAgent(agent, User.getFakeUser());
}

View File

@@ -3,7 +3,7 @@
# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
OPENAI_API_BASE=https://api.openai.com/v1
OPENAI_API_BASE=http://langchain4j.dev/demo/openai/v1
OPENAI_API_KEY=demo
OPENAI_MODEL_NAME=gpt-3.5-turbo
OPENAI_TEMPERATURE=0.0