| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194 |
- "use client";
- // azure and openai, using same models. so using same LLMApi.
- import { ApiPath, XAI_BASE_URL, XAI } from "@/app/constant";
- import {
- useAccessStore,
- useAppConfig,
- useChatStore,
- ChatMessageTool,
- usePluginStore,
- } from "@/app/store";
- import { stream } from "@/app/utils/chat";
- import {
- ChatOptions,
- getHeaders,
- LLMApi,
- LLMModel,
- SpeechOptions,
- } from "../api";
- import { getClientConfig } from "@/app/config/client";
- import { getTimeoutMSByModel } from "@/app/utils";
- import { preProcessImageContent } from "@/app/utils/chat";
- import { RequestPayload } from "./openai";
- import { fetch } from "@/app/utils/stream";
- export class XAIApi implements LLMApi {
- private disableListModels = true;
- path(path: string): string {
- const accessStore = useAccessStore.getState();
- let baseUrl = "";
- if (accessStore.useCustomConfig) {
- baseUrl = accessStore.xaiUrl;
- }
- if (baseUrl.length === 0) {
- const isApp = !!getClientConfig()?.isApp;
- const apiPath = ApiPath.XAI;
- baseUrl = isApp ? XAI_BASE_URL : apiPath;
- }
- if (baseUrl.endsWith("/")) {
- baseUrl = baseUrl.slice(0, baseUrl.length - 1);
- }
- if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.XAI)) {
- baseUrl = "https://" + baseUrl;
- }
- console.log("[Proxy Endpoint] ", baseUrl, path);
- return [baseUrl, path].join("/");
- }
- extractMessage(res: any) {
- return res.choices?.at(0)?.message?.content ?? "";
- }
- speech(options: SpeechOptions): Promise<ArrayBuffer> {
- throw new Error("Method not implemented.");
- }
- async chat(options: ChatOptions) {
- const messages: ChatOptions["messages"] = [];
- for (const v of options.messages) {
- const content = await preProcessImageContent(v.content);
- messages.push({ role: v.role, content });
- }
- const modelConfig = {
- ...useAppConfig.getState().modelConfig,
- ...useChatStore.getState().currentSession().mask.modelConfig,
- ...{
- model: options.config.model,
- providerName: options.config.providerName,
- },
- };
- const requestPayload: RequestPayload = {
- messages,
- stream: options.config.stream,
- model: modelConfig.model,
- temperature: modelConfig.temperature,
- presence_penalty: modelConfig.presence_penalty,
- frequency_penalty: modelConfig.frequency_penalty,
- top_p: modelConfig.top_p,
- };
- console.log("[Request] xai payload: ", requestPayload);
- const shouldStream = !!options.config.stream;
- const controller = new AbortController();
- options.onController?.(controller);
- try {
- const chatPath = this.path(XAI.ChatPath);
- const chatPayload = {
- method: "POST",
- body: JSON.stringify(requestPayload),
- signal: controller.signal,
- headers: getHeaders(),
- };
- // make a fetch request
- const requestTimeoutId = setTimeout(
- () => controller.abort(),
- getTimeoutMSByModel(options.config.model),
- );
- if (shouldStream) {
- const [tools, funcs] = usePluginStore
- .getState()
- .getAsTools(
- useChatStore.getState().currentSession().mask?.plugin || [],
- );
- return stream(
- chatPath,
- requestPayload,
- getHeaders(),
- tools as any,
- funcs,
- controller,
- // parseSSE
- (text: string, runTools: ChatMessageTool[]) => {
- // console.log("parseSSE", text, runTools);
- const json = JSON.parse(text);
- const choices = json.choices as Array<{
- delta: {
- content: string;
- tool_calls: ChatMessageTool[];
- };
- }>;
- const tool_calls = choices[0]?.delta?.tool_calls;
- if (tool_calls?.length > 0) {
- const index = tool_calls[0]?.index;
- const id = tool_calls[0]?.id;
- const args = tool_calls[0]?.function?.arguments;
- if (id) {
- runTools.push({
- id,
- type: tool_calls[0]?.type,
- function: {
- name: tool_calls[0]?.function?.name as string,
- arguments: args,
- },
- });
- } else {
- // @ts-ignore
- runTools[index]["function"]["arguments"] += args;
- }
- }
- return choices[0]?.delta?.content;
- },
- // processToolMessage, include tool_calls message and tool call results
- (
- requestPayload: RequestPayload,
- toolCallMessage: any,
- toolCallResult: any[],
- ) => {
- // @ts-ignore
- requestPayload?.messages?.splice(
- // @ts-ignore
- requestPayload?.messages?.length,
- 0,
- toolCallMessage,
- ...toolCallResult,
- );
- },
- options,
- );
- } else {
- const res = await fetch(chatPath, chatPayload);
- clearTimeout(requestTimeoutId);
- const resJson = await res.json();
- const message = this.extractMessage(resJson);
- options.onFinish(message, res);
- }
- } catch (e) {
- console.log("[Request] failed to make a chat request", e);
- options.onError?.(e as Error);
- }
- }
- async usage() {
- return {
- used: 0,
- total: 0,
- };
- }
- async models(): Promise<LLMModel[]> {
- return [];
- }
- }
|