xai.ts 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. "use client";
  2. // azure and openai, using same models. so using same LLMApi.
  3. import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant";
  4. import {
  5. useAccessStore,
  6. useAppConfig,
  7. useChatStore,
  8. ChatMessageTool,
  9. usePluginStore,
  10. } from "@/app/store";
  11. import { stream } from "@/app/utils/chat";
  12. import {
  13. ChatOptions,
  14. getHeaders,
  15. LLMApi,
  16. LLMModel,
  17. SpeechOptions,
  18. } from "../api";
  19. import { getClientConfig } from "@/app/config/client";
  20. import { getMessageTextContent } from "@/app/utils";
  21. import { RequestPayload } from "./openai";
  22. import { fetch } from "@/app/utils/stream";
  23. export class XAIApi implements LLMApi {
  24. private disableListModels = true;
  25. path(path: string): string {
  26. const accessStore = useAccessStore.getState();
  27. let baseUrl = "";
  28. if (accessStore.useCustomConfig) {
  29. baseUrl = accessStore.xaiUrl;
  30. }
  31. if (baseUrl.length === 0) {
  32. const isApp = !!getClientConfig()?.isApp;
  33. const apiPath = ApiPath.XAI;
  34. baseUrl = isApp ? XAI_BASE_URL : apiPath;
  35. }
  36. if (baseUrl.endsWith("/")) {
  37. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  38. }
  39. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.XAI)) {
  40. baseUrl = "https://" + baseUrl;
  41. }
  42. console.log("[Proxy Endpoint] ", baseUrl, path);
  43. return [baseUrl, path].join("/");
  44. }
  45. extractMessage(res: any) {
  46. return res.choices?.at(0)?.message?.content ?? "";
  47. }
  48. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  49. throw new Error("Method not implemented.");
  50. }
  51. async chat(options: ChatOptions) {
  52. const messages: ChatOptions["messages"] = [];
  53. for (const v of options.messages) {
  54. const content = getMessageTextContent(v);
  55. messages.push({ role: v.role, content });
  56. }
  57. const modelConfig = {
  58. ...useAppConfig.getState().modelConfig,
  59. ...useChatStore.getState().currentSession().mask.modelConfig,
  60. ...{
  61. model: options.config.model,
  62. providerName: options.config.providerName,
  63. },
  64. };
  65. const requestPayload: RequestPayload = {
  66. messages,
  67. stream: options.config.stream,
  68. model: modelConfig.model,
  69. temperature: modelConfig.temperature,
  70. presence_penalty: modelConfig.presence_penalty,
  71. frequency_penalty: modelConfig.frequency_penalty,
  72. top_p: modelConfig.top_p,
  73. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  74. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  75. };
  76. console.log("[Request] openai payload: ", requestPayload);
  77. const shouldStream = !!options.config.stream;
  78. const controller = new AbortController();
  79. options.onController?.(controller);
  80. try {
  81. const chatPath = this.path(XAI.ChatPath);
  82. const chatPayload = {
  83. method: "POST",
  84. body: JSON.stringify(requestPayload),
  85. signal: controller.signal,
  86. headers: getHeaders(),
  87. };
  88. // make a fetch request
  89. const requestTimeoutId = setTimeout(
  90. () => controller.abort(),
  91. REQUEST_TIMEOUT_MS,
  92. );
  93. if (shouldStream) {
  94. const [tools, funcs] = usePluginStore
  95. .getState()
  96. .getAsTools(
  97. useChatStore.getState().currentSession().mask?.plugin || [],
  98. );
  99. return stream(
  100. chatPath,
  101. requestPayload,
  102. getHeaders(),
  103. tools as any,
  104. funcs,
  105. controller,
  106. // parseSSE
  107. (text: string, runTools: ChatMessageTool[]) => {
  108. // console.log("parseSSE", text, runTools);
  109. const json = JSON.parse(text);
  110. const choices = json.choices as Array<{
  111. delta: {
  112. content: string;
  113. tool_calls: ChatMessageTool[];
  114. };
  115. }>;
  116. const tool_calls = choices[0]?.delta?.tool_calls;
  117. if (tool_calls?.length > 0) {
  118. const index = tool_calls[0]?.index;
  119. const id = tool_calls[0]?.id;
  120. const args = tool_calls[0]?.function?.arguments;
  121. if (id) {
  122. runTools.push({
  123. id,
  124. type: tool_calls[0]?.type,
  125. function: {
  126. name: tool_calls[0]?.function?.name as string,
  127. arguments: args,
  128. },
  129. });
  130. } else {
  131. // @ts-ignore
  132. runTools[index]["function"]["arguments"] += args;
  133. }
  134. }
  135. return choices[0]?.delta?.content;
  136. },
  137. // processToolMessage, include tool_calls message and tool call results
  138. (
  139. requestPayload: RequestPayload,
  140. toolCallMessage: any,
  141. toolCallResult: any[],
  142. ) => {
  143. // @ts-ignore
  144. requestPayload?.messages?.splice(
  145. // @ts-ignore
  146. requestPayload?.messages?.length,
  147. 0,
  148. toolCallMessage,
  149. ...toolCallResult,
  150. );
  151. },
  152. options,
  153. );
  154. } else {
  155. const res = await fetch(chatPath, chatPayload);
  156. clearTimeout(requestTimeoutId);
  157. const resJson = await res.json();
  158. const message = this.extractMessage(resJson);
  159. options.onFinish(message);
  160. }
  161. } catch (e) {
  162. console.log("[Request] failed to make a chat request", e);
  163. options.onError?.(e as Error);
  164. }
  165. }
  166. async usage() {
  167. return {
  168. used: 0,
  169. total: 0,
  170. };
  171. }
  172. async models(): Promise<LLMModel[]> {
  173. return [];
  174. }
  175. }