|
|
@@ -65,6 +65,7 @@ export interface RequestPayload {
|
|
|
frequency_penalty: number;
|
|
|
top_p: number;
|
|
|
max_tokens?: number;
|
|
|
+ max_completion_tokens?: number;
|
|
|
}
|
|
|
|
|
|
export interface DalleRequestPayload {
|
|
|
@@ -233,6 +234,11 @@ export class ChatGPTApi implements LLMApi {
|
|
|
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
|
|
};
|
|
|
|
|
|
+ // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
|
|
|
+ if (isO1) {
|
|
|
+ requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
|
|
+ }
|
|
|
+
|
|
|
// add max_tokens to vision model
|
|
|
if (visionModel) {
|
|
|
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|