Pārlūkot izejas kodu

chore: o1模型使用max_completion_tokens

DDMeaqua 1 gadu atpakaļ
vecāks
revīzija
d5bda2904d
1 mainītis faili ar 6 papildinājumiem un 0 dzēšanām
  1. 6 0
      app/client/platforms/openai.ts

+ 6 - 0
app/client/platforms/openai.ts

@@ -65,6 +65,7 @@ export interface RequestPayload {
   frequency_penalty: number;
   top_p: number;
   max_tokens?: number;
+  max_completion_tokens?: number;
 }
 
 export interface DalleRequestPayload {
@@ -233,6 +234,11 @@ export class ChatGPTApi implements LLMApi {
         // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
       };
 
+      // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
+      if (isO1) {
+        requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
+      }
+
       // add max_tokens to vision model
       if (visionModel) {
         requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);