|
@@ -0,0 +1,287 @@
|
|
|
|
|
+"use client";
|
|
|
|
|
+
|
|
|
|
|
+import {
|
|
|
|
|
+ ApiPath,
|
|
|
|
|
+ AI302_BASE_URL,
|
|
|
|
|
+ DEFAULT_MODELS,
|
|
|
|
|
+ AI302,
|
|
|
|
|
+} from "@/app/constant";
|
|
|
|
|
+import {
|
|
|
|
|
+ useAccessStore,
|
|
|
|
|
+ useAppConfig,
|
|
|
|
|
+ useChatStore,
|
|
|
|
|
+ ChatMessageTool,
|
|
|
|
|
+ usePluginStore,
|
|
|
|
|
+} from "@/app/store";
|
|
|
|
|
+import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
|
|
|
|
|
+import {
|
|
|
|
|
+ ChatOptions,
|
|
|
|
|
+ getHeaders,
|
|
|
|
|
+ LLMApi,
|
|
|
|
|
+ LLMModel,
|
|
|
|
|
+ SpeechOptions,
|
|
|
|
|
+} from "../api";
|
|
|
|
|
+import { getClientConfig } from "@/app/config/client";
|
|
|
|
|
+import {
|
|
|
|
|
+ getMessageTextContent,
|
|
|
|
|
+ getMessageTextContentWithoutThinking,
|
|
|
|
|
+ isVisionModel,
|
|
|
|
|
+ getTimeoutMSByModel,
|
|
|
|
|
+} from "@/app/utils";
|
|
|
|
|
+import { RequestPayload } from "./openai";
|
|
|
|
|
+
|
|
|
|
|
+import { fetch } from "@/app/utils/stream";
|
|
|
|
|
+export interface Ai302ListModelResponse {
|
|
|
|
|
+ object: string;
|
|
|
|
|
+ data: Array<{
|
|
|
|
|
+ id: string;
|
|
|
|
|
+ object: string;
|
|
|
|
|
+ root: string;
|
|
|
|
|
+ }>;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+export class Ai302Api implements LLMApi {
|
|
|
|
|
+ private disableListModels = false;
|
|
|
|
|
+
|
|
|
|
|
+ path(path: string): string {
|
|
|
|
|
+ const accessStore = useAccessStore.getState();
|
|
|
|
|
+
|
|
|
|
|
+ let baseUrl = "";
|
|
|
|
|
+
|
|
|
|
|
+ if (accessStore.useCustomConfig) {
|
|
|
|
|
+ baseUrl = accessStore.ai302Url;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (baseUrl.length === 0) {
|
|
|
|
|
+ const isApp = !!getClientConfig()?.isApp;
|
|
|
|
|
+ const apiPath = ApiPath["302.AI"];
|
|
|
|
|
+ baseUrl = isApp ? AI302_BASE_URL : apiPath;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (baseUrl.endsWith("/")) {
|
|
|
|
|
+ baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
|
|
|
|
+ }
|
|
|
|
|
+ if (
|
|
|
|
|
+ !baseUrl.startsWith("http") &&
|
|
|
|
|
+ !baseUrl.startsWith(ApiPath["302.AI"])
|
|
|
|
|
+ ) {
|
|
|
|
|
+ baseUrl = "https://" + baseUrl;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ console.log("[Proxy Endpoint] ", baseUrl, path);
|
|
|
|
|
+
|
|
|
|
|
+ return [baseUrl, path].join("/");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ extractMessage(res: any) {
|
|
|
|
|
+ return res.choices?.at(0)?.message?.content ?? "";
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
|
|
|
|
+ throw new Error("Method not implemented.");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ async chat(options: ChatOptions) {
|
|
|
|
|
+ const visionModel = isVisionModel(options.config.model);
|
|
|
|
|
+ const messages: ChatOptions["messages"] = [];
|
|
|
|
|
+ for (const v of options.messages) {
|
|
|
|
|
+ if (v.role === "assistant") {
|
|
|
|
|
+ const content = getMessageTextContentWithoutThinking(v);
|
|
|
|
|
+ messages.push({ role: v.role, content });
|
|
|
|
|
+ } else {
|
|
|
|
|
+ const content = visionModel
|
|
|
|
|
+ ? await preProcessImageContent(v.content)
|
|
|
|
|
+ : getMessageTextContent(v);
|
|
|
|
|
+ messages.push({ role: v.role, content });
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const modelConfig = {
|
|
|
|
|
+ ...useAppConfig.getState().modelConfig,
|
|
|
|
|
+ ...useChatStore.getState().currentSession().mask.modelConfig,
|
|
|
|
|
+ ...{
|
|
|
|
|
+ model: options.config.model,
|
|
|
|
|
+ providerName: options.config.providerName,
|
|
|
|
|
+ },
|
|
|
|
|
+ };
|
|
|
|
|
+
|
|
|
|
|
+ const requestPayload: RequestPayload = {
|
|
|
|
|
+ messages,
|
|
|
|
|
+ stream: options.config.stream,
|
|
|
|
|
+ model: modelConfig.model,
|
|
|
|
|
+ temperature: modelConfig.temperature,
|
|
|
|
|
+ presence_penalty: modelConfig.presence_penalty,
|
|
|
|
|
+ frequency_penalty: modelConfig.frequency_penalty,
|
|
|
|
|
+ top_p: modelConfig.top_p,
|
|
|
|
|
+ // max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
|
|
|
|
+ // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
|
|
|
|
+ };
|
|
|
|
|
+
|
|
|
|
|
+ console.log("[Request] openai payload: ", requestPayload);
|
|
|
|
|
+
|
|
|
|
|
+ const shouldStream = !!options.config.stream;
|
|
|
|
|
+ const controller = new AbortController();
|
|
|
|
|
+ options.onController?.(controller);
|
|
|
|
|
+
|
|
|
|
|
+ try {
|
|
|
|
|
+ const chatPath = this.path(AI302.ChatPath);
|
|
|
|
|
+ const chatPayload = {
|
|
|
|
|
+ method: "POST",
|
|
|
|
|
+ body: JSON.stringify(requestPayload),
|
|
|
|
|
+ signal: controller.signal,
|
|
|
|
|
+ headers: getHeaders(),
|
|
|
|
|
+ };
|
|
|
|
|
+
|
|
|
|
|
+ // console.log(chatPayload);
|
|
|
|
|
+
|
|
|
|
|
+ // Use extended timeout for thinking models as they typically require more processing time
|
|
|
|
|
+ const requestTimeoutId = setTimeout(
|
|
|
|
|
+ () => controller.abort(),
|
|
|
|
|
+ getTimeoutMSByModel(options.config.model),
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ if (shouldStream) {
|
|
|
|
|
+ const [tools, funcs] = usePluginStore
|
|
|
|
|
+ .getState()
|
|
|
|
|
+ .getAsTools(
|
|
|
|
|
+ useChatStore.getState().currentSession().mask?.plugin || [],
|
|
|
|
|
+ );
|
|
|
|
|
+ return streamWithThink(
|
|
|
|
|
+ chatPath,
|
|
|
|
|
+ requestPayload,
|
|
|
|
|
+ getHeaders(),
|
|
|
|
|
+ tools as any,
|
|
|
|
|
+ funcs,
|
|
|
|
|
+ controller,
|
|
|
|
|
+ // parseSSE
|
|
|
|
|
+ (text: string, runTools: ChatMessageTool[]) => {
|
|
|
|
|
+ // console.log("parseSSE", text, runTools);
|
|
|
|
|
+ const json = JSON.parse(text);
|
|
|
|
|
+ const choices = json.choices as Array<{
|
|
|
|
|
+ delta: {
|
|
|
|
|
+ content: string | null;
|
|
|
|
|
+ tool_calls: ChatMessageTool[];
|
|
|
|
|
+ reasoning_content: string | null;
|
|
|
|
|
+ };
|
|
|
|
|
+ }>;
|
|
|
|
|
+ const tool_calls = choices[0]?.delta?.tool_calls;
|
|
|
|
|
+ if (tool_calls?.length > 0) {
|
|
|
|
|
+ const index = tool_calls[0]?.index;
|
|
|
|
|
+ const id = tool_calls[0]?.id;
|
|
|
|
|
+ const args = tool_calls[0]?.function?.arguments;
|
|
|
|
|
+ if (id) {
|
|
|
|
|
+ runTools.push({
|
|
|
|
|
+ id,
|
|
|
|
|
+ type: tool_calls[0]?.type,
|
|
|
|
|
+ function: {
|
|
|
|
|
+ name: tool_calls[0]?.function?.name as string,
|
|
|
|
|
+ arguments: args,
|
|
|
|
|
+ },
|
|
|
|
|
+ });
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // @ts-ignore
|
|
|
|
|
+ runTools[index]["function"]["arguments"] += args;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ const reasoning = choices[0]?.delta?.reasoning_content;
|
|
|
|
|
+ const content = choices[0]?.delta?.content;
|
|
|
|
|
+
|
|
|
|
|
+ // Skip if both content and reasoning_content are empty or null
|
|
|
|
|
+ if (
|
|
|
|
|
+ (!reasoning || reasoning.length === 0) &&
|
|
|
|
|
+ (!content || content.length === 0)
|
|
|
|
|
+ ) {
|
|
|
|
|
+ return {
|
|
|
|
|
+ isThinking: false,
|
|
|
|
|
+ content: "",
|
|
|
|
|
+ };
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (reasoning && reasoning.length > 0) {
|
|
|
|
|
+ return {
|
|
|
|
|
+ isThinking: true,
|
|
|
|
|
+ content: reasoning,
|
|
|
|
|
+ };
|
|
|
|
|
+ } else if (content && content.length > 0) {
|
|
|
|
|
+ return {
|
|
|
|
|
+ isThinking: false,
|
|
|
|
|
+ content: content,
|
|
|
|
|
+ };
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return {
|
|
|
|
|
+ isThinking: false,
|
|
|
|
|
+ content: "",
|
|
|
|
|
+ };
|
|
|
|
|
+ },
|
|
|
|
|
+ // processToolMessage, include tool_calls message and tool call results
|
|
|
|
|
+ (
|
|
|
|
|
+ requestPayload: RequestPayload,
|
|
|
|
|
+ toolCallMessage: any,
|
|
|
|
|
+ toolCallResult: any[],
|
|
|
|
|
+ ) => {
|
|
|
|
|
+ // @ts-ignore
|
|
|
|
|
+ requestPayload?.messages?.splice(
|
|
|
|
|
+ // @ts-ignore
|
|
|
|
|
+ requestPayload?.messages?.length,
|
|
|
|
|
+ 0,
|
|
|
|
|
+ toolCallMessage,
|
|
|
|
|
+ ...toolCallResult,
|
|
|
|
|
+ );
|
|
|
|
|
+ },
|
|
|
|
|
+ options,
|
|
|
|
|
+ );
|
|
|
|
|
+ } else {
|
|
|
|
|
+ const res = await fetch(chatPath, chatPayload);
|
|
|
|
|
+ clearTimeout(requestTimeoutId);
|
|
|
|
|
+
|
|
|
|
|
+ const resJson = await res.json();
|
|
|
|
|
+ const message = this.extractMessage(resJson);
|
|
|
|
|
+ options.onFinish(message, res);
|
|
|
|
|
+ }
|
|
|
|
|
+ } catch (e) {
|
|
|
|
|
+ console.log("[Request] failed to make a chat request", e);
|
|
|
|
|
+ options.onError?.(e as Error);
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ async usage() {
|
|
|
|
|
+ return {
|
|
|
|
|
+ used: 0,
|
|
|
|
|
+ total: 0,
|
|
|
|
|
+ };
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ async models(): Promise<LLMModel[]> {
|
|
|
|
|
+ if (this.disableListModels) {
|
|
|
|
|
+ return DEFAULT_MODELS.slice();
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ const res = await fetch(this.path(AI302.ListModelPath), {
|
|
|
|
|
+ method: "GET",
|
|
|
|
|
+ headers: {
|
|
|
|
|
+ ...getHeaders(),
|
|
|
|
|
+ },
|
|
|
|
|
+ });
|
|
|
|
|
+
|
|
|
|
|
+ const resJson = (await res.json()) as Ai302ListModelResponse;
|
|
|
|
|
+ const chatModels = resJson.data;
|
|
|
|
|
+ console.log("[Models]", chatModels);
|
|
|
|
|
+
|
|
|
|
|
+ if (!chatModels) {
|
|
|
|
|
+ return [];
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ let seq = 1000; //同 Constant.ts 中的排序保持一致
|
|
|
|
|
+ return chatModels.map((m) => ({
|
|
|
|
|
+ name: m.id,
|
|
|
|
|
+ available: true,
|
|
|
|
|
+ sorted: seq++,
|
|
|
|
|
+ provider: {
|
|
|
|
|
+ id: "ai302",
|
|
|
|
|
+ providerName: "302.AI",
|
|
|
|
|
+ providerType: "ai302",
|
|
|
|
|
+ sorted: 15,
|
|
|
|
|
+ },
|
|
|
|
|
+ }));
|
|
|
|
|
+ }
|
|
|
|
|
+}
|