|
@@ -7,7 +7,10 @@ import {
|
|
|
ChatMessageTool,
|
|
ChatMessageTool,
|
|
|
usePluginStore,
|
|
usePluginStore,
|
|
|
} from "@/app/store";
|
|
} from "@/app/store";
|
|
|
-import { streamWithThink } from "@/app/utils/chat";
|
|
|
|
|
|
|
+import {
|
|
|
|
|
+ preProcessImageContentForAlibabaDashScope,
|
|
|
|
|
+ streamWithThink,
|
|
|
|
|
+} from "@/app/utils/chat";
|
|
|
import {
|
|
import {
|
|
|
ChatOptions,
|
|
ChatOptions,
|
|
|
getHeaders,
|
|
getHeaders,
|
|
@@ -15,12 +18,14 @@ import {
|
|
|
LLMModel,
|
|
LLMModel,
|
|
|
SpeechOptions,
|
|
SpeechOptions,
|
|
|
MultimodalContent,
|
|
MultimodalContent,
|
|
|
|
|
+ MultimodalContentForAlibaba,
|
|
|
} from "../api";
|
|
} from "../api";
|
|
|
import { getClientConfig } from "@/app/config/client";
|
|
import { getClientConfig } from "@/app/config/client";
|
|
|
import {
|
|
import {
|
|
|
getMessageTextContent,
|
|
getMessageTextContent,
|
|
|
getMessageTextContentWithoutThinking,
|
|
getMessageTextContentWithoutThinking,
|
|
|
getTimeoutMSByModel,
|
|
getTimeoutMSByModel,
|
|
|
|
|
+ isVisionModel,
|
|
|
} from "@/app/utils";
|
|
} from "@/app/utils";
|
|
|
import { fetch } from "@/app/utils/stream";
|
|
import { fetch } from "@/app/utils/stream";
|
|
|
|
|
|
|
@@ -89,14 +94,6 @@ export class QwenApi implements LLMApi {
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
async chat(options: ChatOptions) {
|
|
async chat(options: ChatOptions) {
|
|
|
- const messages = options.messages.map((v) => ({
|
|
|
|
|
- role: v.role,
|
|
|
|
|
- content:
|
|
|
|
|
- v.role === "assistant"
|
|
|
|
|
- ? getMessageTextContentWithoutThinking(v)
|
|
|
|
|
- : getMessageTextContent(v),
|
|
|
|
|
- }));
|
|
|
|
|
-
|
|
|
|
|
const modelConfig = {
|
|
const modelConfig = {
|
|
|
...useAppConfig.getState().modelConfig,
|
|
...useAppConfig.getState().modelConfig,
|
|
|
...useChatStore.getState().currentSession().mask.modelConfig,
|
|
...useChatStore.getState().currentSession().mask.modelConfig,
|
|
@@ -105,6 +102,21 @@ export class QwenApi implements LLMApi {
|
|
|
},
|
|
},
|
|
|
};
|
|
};
|
|
|
|
|
|
|
|
|
|
+ const visionModel = isVisionModel(options.config.model);
|
|
|
|
|
+
|
|
|
|
|
+ const messages: ChatOptions["messages"] = [];
|
|
|
|
|
+ for (const v of options.messages) {
|
|
|
|
|
+ const content = (
|
|
|
|
|
+ visionModel
|
|
|
|
|
+ ? await preProcessImageContentForAlibabaDashScope(v.content)
|
|
|
|
|
+ : v.role === "assistant"
|
|
|
|
|
+ ? getMessageTextContentWithoutThinking(v)
|
|
|
|
|
+ : getMessageTextContent(v)
|
|
|
|
|
+ ) as any;
|
|
|
|
|
+
|
|
|
|
|
+ messages.push({ role: v.role, content });
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
const shouldStream = !!options.config.stream;
|
|
const shouldStream = !!options.config.stream;
|
|
|
const requestPayload: RequestPayload = {
|
|
const requestPayload: RequestPayload = {
|
|
|
model: modelConfig.model,
|
|
model: modelConfig.model,
|
|
@@ -129,7 +141,7 @@ export class QwenApi implements LLMApi {
|
|
|
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
|
|
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
|
|
|
};
|
|
};
|
|
|
|
|
|
|
|
- const chatPath = this.path(Alibaba.ChatPath);
|
|
|
|
|
|
|
+ const chatPath = this.path(Alibaba.ChatPath(modelConfig.model));
|
|
|
const chatPayload = {
|
|
const chatPayload = {
|
|
|
method: "POST",
|
|
method: "POST",
|
|
|
body: JSON.stringify(requestPayload),
|
|
body: JSON.stringify(requestPayload),
|
|
@@ -162,7 +174,7 @@ export class QwenApi implements LLMApi {
|
|
|
const json = JSON.parse(text);
|
|
const json = JSON.parse(text);
|
|
|
const choices = json.output.choices as Array<{
|
|
const choices = json.output.choices as Array<{
|
|
|
message: {
|
|
message: {
|
|
|
- content: string | null;
|
|
|
|
|
|
|
+ content: string | null | MultimodalContentForAlibaba[];
|
|
|
tool_calls: ChatMessageTool[];
|
|
tool_calls: ChatMessageTool[];
|
|
|
reasoning_content: string | null;
|
|
reasoning_content: string | null;
|
|
|
};
|
|
};
|
|
@@ -212,7 +224,9 @@ export class QwenApi implements LLMApi {
|
|
|
} else if (content && content.length > 0) {
|
|
} else if (content && content.length > 0) {
|
|
|
return {
|
|
return {
|
|
|
isThinking: false,
|
|
isThinking: false,
|
|
|
- content: content,
|
|
|
|
|
|
|
+ content: Array.isArray(content)
|
|
|
|
|
+ ? content.map((item) => item.text).join(",")
|
|
|
|
|
+ : content,
|
|
|
};
|
|
};
|
|
|
}
|
|
}
|
|
|
|
|
|