mirror of
https://github.com/tencentmusic/supersonic.git
synced 2025-12-11 12:07:42 +00:00
(improvement)(parser) Add json format to LLM request for performance improvement (#2352)
This commit is contained in:
@@ -28,6 +28,8 @@ public class ChatModelConfig implements Serializable {
|
||||
private Boolean logRequests = false;
|
||||
private Boolean logResponses = false;
|
||||
private Boolean enableSearch = false;
|
||||
private Boolean jsonFormat = false;
|
||||
private String jsonFormatType = "json_schema";
|
||||
|
||||
public String keyDecrypt() {
|
||||
return AESEncryptionUtil.aesDecryptECB(getApiKey());
|
||||
|
||||
@@ -22,13 +22,17 @@ public class OpenAiModelFactory implements ModelFactory, InitializingBean {
|
||||
|
||||
@Override
|
||||
public ChatLanguageModel createChatModel(ChatModelConfig modelConfig) {
|
||||
return OpenAiChatModel.builder().baseUrl(modelConfig.getBaseUrl())
|
||||
OpenAiChatModel.OpenAiChatModelBuilder openAiChatModelBuilder = OpenAiChatModel.builder().baseUrl(modelConfig.getBaseUrl())
|
||||
.modelName(modelConfig.getModelName()).apiKey(modelConfig.keyDecrypt())
|
||||
.apiVersion(modelConfig.getApiVersion()).temperature(modelConfig.getTemperature())
|
||||
.topP(modelConfig.getTopP()).maxRetries(modelConfig.getMaxRetries())
|
||||
.timeout(Duration.ofSeconds(modelConfig.getTimeOut()))
|
||||
.logRequests(modelConfig.getLogRequests())
|
||||
.logResponses(modelConfig.getLogResponses()).build();
|
||||
.logResponses(modelConfig.getLogResponses());
|
||||
if (modelConfig.getJsonFormat()) {
|
||||
openAiChatModelBuilder.strictJsonSchema(true).responseFormat(modelConfig.getJsonFormatType());
|
||||
}
|
||||
return openAiChatModelBuilder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
Reference in New Issue
Block a user