ai302.ts 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. "use client";
  2. import {
  3. ApiPath,
  4. AI302_BASE_URL,
  5. DEFAULT_MODELS,
  6. AI302,
  7. } from "@/app/constant";
  8. import {
  9. useAccessStore,
  10. useAppConfig,
  11. useChatStore,
  12. ChatMessageTool,
  13. usePluginStore,
  14. } from "@/app/store";
  15. import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
  16. import {
  17. ChatOptions,
  18. getHeaders,
  19. LLMApi,
  20. LLMModel,
  21. SpeechOptions,
  22. } from "../api";
  23. import { getClientConfig } from "@/app/config/client";
  24. import {
  25. getMessageTextContent,
  26. getMessageTextContentWithoutThinking,
  27. isVisionModel,
  28. getTimeoutMSByModel,
  29. } from "@/app/utils";
  30. import { RequestPayload } from "./openai";
  31. import { fetch } from "@/app/utils/stream";
  32. export interface Ai302ListModelResponse {
  33. object: string;
  34. data: Array<{
  35. id: string;
  36. object: string;
  37. root: string;
  38. }>;
  39. }
  40. export class Ai302Api implements LLMApi {
  41. private disableListModels = false;
  42. path(path: string): string {
  43. const accessStore = useAccessStore.getState();
  44. let baseUrl = "";
  45. if (accessStore.useCustomConfig) {
  46. baseUrl = accessStore.ai302Url;
  47. }
  48. if (baseUrl.length === 0) {
  49. const isApp = !!getClientConfig()?.isApp;
  50. const apiPath = ApiPath["302.AI"];
  51. baseUrl = isApp ? AI302_BASE_URL : apiPath;
  52. }
  53. if (baseUrl.endsWith("/")) {
  54. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  55. }
  56. if (
  57. !baseUrl.startsWith("http") &&
  58. !baseUrl.startsWith(ApiPath["302.AI"])
  59. ) {
  60. baseUrl = "https://" + baseUrl;
  61. }
  62. console.log("[Proxy Endpoint] ", baseUrl, path);
  63. return [baseUrl, path].join("/");
  64. }
  65. extractMessage(res: any) {
  66. return res.choices?.at(0)?.message?.content ?? "";
  67. }
  68. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  69. throw new Error("Method not implemented.");
  70. }
  71. async chat(options: ChatOptions) {
  72. const visionModel = isVisionModel(options.config.model);
  73. const messages: ChatOptions["messages"] = [];
  74. for (const v of options.messages) {
  75. if (v.role === "assistant") {
  76. const content = getMessageTextContentWithoutThinking(v);
  77. messages.push({ role: v.role, content });
  78. } else {
  79. const content = visionModel
  80. ? await preProcessImageContent(v.content)
  81. : getMessageTextContent(v);
  82. messages.push({ role: v.role, content });
  83. }
  84. }
  85. const modelConfig = {
  86. ...useAppConfig.getState().modelConfig,
  87. ...useChatStore.getState().currentSession().mask.modelConfig,
  88. ...{
  89. model: options.config.model,
  90. providerName: options.config.providerName,
  91. },
  92. };
  93. const requestPayload: RequestPayload = {
  94. messages,
  95. stream: options.config.stream,
  96. model: modelConfig.model,
  97. temperature: modelConfig.temperature,
  98. presence_penalty: modelConfig.presence_penalty,
  99. frequency_penalty: modelConfig.frequency_penalty,
  100. top_p: modelConfig.top_p,
  101. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  102. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  103. };
  104. console.log("[Request] openai payload: ", requestPayload);
  105. const shouldStream = !!options.config.stream;
  106. const controller = new AbortController();
  107. options.onController?.(controller);
  108. try {
  109. const chatPath = this.path(AI302.ChatPath);
  110. const chatPayload = {
  111. method: "POST",
  112. body: JSON.stringify(requestPayload),
  113. signal: controller.signal,
  114. headers: getHeaders(),
  115. };
  116. // console.log(chatPayload);
  117. // Use extended timeout for thinking models as they typically require more processing time
  118. const requestTimeoutId = setTimeout(
  119. () => controller.abort(),
  120. getTimeoutMSByModel(options.config.model),
  121. );
  122. if (shouldStream) {
  123. const [tools, funcs] = usePluginStore
  124. .getState()
  125. .getAsTools(
  126. useChatStore.getState().currentSession().mask?.plugin || [],
  127. );
  128. return streamWithThink(
  129. chatPath,
  130. requestPayload,
  131. getHeaders(),
  132. tools as any,
  133. funcs,
  134. controller,
  135. // parseSSE
  136. (text: string, runTools: ChatMessageTool[]) => {
  137. // console.log("parseSSE", text, runTools);
  138. const json = JSON.parse(text);
  139. const choices = json.choices as Array<{
  140. delta: {
  141. content: string | null;
  142. tool_calls: ChatMessageTool[];
  143. reasoning_content: string | null;
  144. };
  145. }>;
  146. const tool_calls = choices[0]?.delta?.tool_calls;
  147. if (tool_calls?.length > 0) {
  148. const index = tool_calls[0]?.index;
  149. const id = tool_calls[0]?.id;
  150. const args = tool_calls[0]?.function?.arguments;
  151. if (id) {
  152. runTools.push({
  153. id,
  154. type: tool_calls[0]?.type,
  155. function: {
  156. name: tool_calls[0]?.function?.name as string,
  157. arguments: args,
  158. },
  159. });
  160. } else {
  161. // @ts-ignore
  162. runTools[index]["function"]["arguments"] += args;
  163. }
  164. }
  165. const reasoning = choices[0]?.delta?.reasoning_content;
  166. const content = choices[0]?.delta?.content;
  167. // Skip if both content and reasoning_content are empty or null
  168. if (
  169. (!reasoning || reasoning.length === 0) &&
  170. (!content || content.length === 0)
  171. ) {
  172. return {
  173. isThinking: false,
  174. content: "",
  175. };
  176. }
  177. if (reasoning && reasoning.length > 0) {
  178. return {
  179. isThinking: true,
  180. content: reasoning,
  181. };
  182. } else if (content && content.length > 0) {
  183. return {
  184. isThinking: false,
  185. content: content,
  186. };
  187. }
  188. return {
  189. isThinking: false,
  190. content: "",
  191. };
  192. },
  193. // processToolMessage, include tool_calls message and tool call results
  194. (
  195. requestPayload: RequestPayload,
  196. toolCallMessage: any,
  197. toolCallResult: any[],
  198. ) => {
  199. // @ts-ignore
  200. requestPayload?.messages?.splice(
  201. // @ts-ignore
  202. requestPayload?.messages?.length,
  203. 0,
  204. toolCallMessage,
  205. ...toolCallResult,
  206. );
  207. },
  208. options,
  209. );
  210. } else {
  211. const res = await fetch(chatPath, chatPayload);
  212. clearTimeout(requestTimeoutId);
  213. const resJson = await res.json();
  214. const message = this.extractMessage(resJson);
  215. options.onFinish(message, res);
  216. }
  217. } catch (e) {
  218. console.log("[Request] failed to make a chat request", e);
  219. options.onError?.(e as Error);
  220. }
  221. }
  222. async usage() {
  223. return {
  224. used: 0,
  225. total: 0,
  226. };
  227. }
  228. async models(): Promise<LLMModel[]> {
  229. if (this.disableListModels) {
  230. return DEFAULT_MODELS.slice();
  231. }
  232. const res = await fetch(this.path(AI302.ListModelPath), {
  233. method: "GET",
  234. headers: {
  235. ...getHeaders(),
  236. },
  237. });
  238. const resJson = (await res.json()) as Ai302ListModelResponse;
  239. const chatModels = resJson.data;
  240. console.log("[Models]", chatModels);
  241. if (!chatModels) {
  242. return [];
  243. }
  244. let seq = 1000; //同 Constant.ts 中的排序保持一致
  245. return chatModels.map((m) => ({
  246. name: m.id,
  247. available: true,
  248. sorted: seq++,
  249. provider: {
  250. id: "ai302",
  251. providerName: "302.AI",
  252. providerType: "ai302",
  253. sorted: 15,
  254. },
  255. }));
  256. }
  257. }