|
|
@@ -21,7 +21,50 @@ import {
|
|
|
} from "@fortaine/fetch-event-source";
|
|
|
import { prettyObject } from "@/app/utils/format";
|
|
|
import { getClientConfig } from "@/app/config/client";
|
|
|
-import { getMessageTextContent } from "@/app/utils";
|
|
|
+import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
|
|
+
|
|
|
+// 预处理图片内容,将base64转换为阿里云API格式
|
|
|
+async function preProcessImageContent(content: string | MultimodalContent[]) {
|
|
|
+ if (typeof content === "string") {
|
|
|
+ return content;
|
|
|
+ }
|
|
|
+
|
|
|
+ const processedContent: any[] = [];
|
|
|
+
|
|
|
+ for (const item of content) {
|
|
|
+ if (item.type === "text") {
|
|
|
+ processedContent.push({
|
|
|
+ text: item.text
|
|
|
+ });
|
|
|
+ } else if (item.type === "image_url") {
|
|
|
+ // 阿里云API支持URL和base64格式的图片
|
|
|
+ let imageData = item.image_url?.url || "";
|
|
|
+
|
|
|
+ if (imageData.startsWith("data:image/")) {
|
|
|
+ // 提取base64部分
|
|
|
+ const base64Match = imageData.match(/data:image\/[^;]+;base64,(.+)/);
|
|
|
+ if (base64Match) {
|
|
|
+ imageData = base64Match[1];
|
|
|
+ }
|
|
|
+ processedContent.push({
|
|
|
+ image: imageData
|
|
|
+ });
|
|
|
+ } else if (imageData.startsWith("http")) {
|
|
|
+ // 直接使用URL
|
|
|
+ processedContent.push({
|
|
|
+ image: imageData
|
|
|
+ });
|
|
|
+ } else {
|
|
|
+ // 假设是纯base64
|
|
|
+ processedContent.push({
|
|
|
+ image: imageData
|
|
|
+ });
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return processedContent;
|
|
|
+}
|
|
|
|
|
|
export interface OpenAIListModelResponse {
|
|
|
object: string;
|
|
|
@@ -84,11 +127,6 @@ export class QwenApi implements LLMApi {
|
|
|
}
|
|
|
|
|
|
async chat(options: ChatOptions) {
|
|
|
- const messages = options.messages.map((v) => ({
|
|
|
- role: v.role,
|
|
|
- content: getMessageTextContent(v),
|
|
|
- }));
|
|
|
-
|
|
|
const modelConfig = {
|
|
|
...useAppConfig.getState().modelConfig,
|
|
|
...useChatStore.getState().currentSession().mask.modelConfig,
|
|
|
@@ -97,6 +135,16 @@ export class QwenApi implements LLMApi {
|
|
|
},
|
|
|
};
|
|
|
|
|
|
+ const visionModel = isVisionModel(options.config.model);
|
|
|
+ const messages: any[] = [];
|
|
|
+
|
|
|
+ for (const v of options.messages) {
|
|
|
+ const content = visionModel
|
|
|
+ ? await preProcessImageContent(v.content)
|
|
|
+ : getMessageTextContent(v);
|
|
|
+ messages.push({ role: v.role, content });
|
|
|
+ }
|
|
|
+
|
|
|
const shouldStream = !!options.config.stream;
|
|
|
const requestPayload: RequestPayload = {
|
|
|
model: modelConfig.model,
|
|
|
@@ -116,7 +164,12 @@ export class QwenApi implements LLMApi {
|
|
|
options.onController?.(controller);
|
|
|
|
|
|
try {
|
|
|
- const chatPath = this.path(Alibaba.ChatPath);
|
|
|
+ // 根据模型类型选择不同的端点
|
|
|
+ let chatPath = this.path(Alibaba.ChatPath);
|
|
|
+ if (visionModel) {
|
|
|
+ chatPath = this.path('/services/aigc/multimodal-generation/generation');
|
|
|
+ }
|
|
|
+
|
|
|
const chatPayload = {
|
|
|
method: "POST",
|
|
|
body: JSON.stringify(requestPayload),
|