siliconflow.ts 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. "use client";
  2. // azure and openai, using same models. so using same LLMApi.
  3. import {
  4. ApiPath,
  5. SILICONFLOW_BASE_URL,
  6. SiliconFlow,
  7. REQUEST_TIMEOUT_MS_FOR_THINKING,
  8. } from "@/app/constant";
  9. import {
  10. useAccessStore,
  11. useAppConfig,
  12. useChatStore,
  13. ChatMessageTool,
  14. usePluginStore,
  15. } from "@/app/store";
  16. import { streamWithThink } from "@/app/utils/chat";
  17. import {
  18. ChatOptions,
  19. getHeaders,
  20. LLMApi,
  21. LLMModel,
  22. SpeechOptions,
  23. } from "../api";
  24. import { getClientConfig } from "@/app/config/client";
  25. import {
  26. getMessageTextContent,
  27. getMessageTextContentWithoutThinking,
  28. } from "@/app/utils";
  29. import { RequestPayload } from "./openai";
  30. import { fetch } from "@/app/utils/stream";
  31. export class SiliconflowApi implements LLMApi {
  32. private disableListModels = true;
  33. path(path: string): string {
  34. const accessStore = useAccessStore.getState();
  35. let baseUrl = "";
  36. if (accessStore.useCustomConfig) {
  37. baseUrl = accessStore.siliconflowUrl;
  38. }
  39. if (baseUrl.length === 0) {
  40. const isApp = !!getClientConfig()?.isApp;
  41. const apiPath = ApiPath.SiliconFlow;
  42. baseUrl = isApp ? SILICONFLOW_BASE_URL : apiPath;
  43. }
  44. if (baseUrl.endsWith("/")) {
  45. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  46. }
  47. if (
  48. !baseUrl.startsWith("http") &&
  49. !baseUrl.startsWith(ApiPath.SiliconFlow)
  50. ) {
  51. baseUrl = "https://" + baseUrl;
  52. }
  53. console.log("[Proxy Endpoint] ", baseUrl, path);
  54. return [baseUrl, path].join("/");
  55. }
  56. extractMessage(res: any) {
  57. return res.choices?.at(0)?.message?.content ?? "";
  58. }
  59. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  60. throw new Error("Method not implemented.");
  61. }
  62. async chat(options: ChatOptions) {
  63. const messages: ChatOptions["messages"] = [];
  64. for (const v of options.messages) {
  65. if (v.role === "assistant") {
  66. const content = getMessageTextContentWithoutThinking(v);
  67. messages.push({ role: v.role, content });
  68. } else {
  69. const content = getMessageTextContent(v);
  70. messages.push({ role: v.role, content });
  71. }
  72. }
  73. const modelConfig = {
  74. ...useAppConfig.getState().modelConfig,
  75. ...useChatStore.getState().currentSession().mask.modelConfig,
  76. ...{
  77. model: options.config.model,
  78. providerName: options.config.providerName,
  79. },
  80. };
  81. const requestPayload: RequestPayload = {
  82. messages,
  83. stream: options.config.stream,
  84. model: modelConfig.model,
  85. temperature: modelConfig.temperature,
  86. presence_penalty: modelConfig.presence_penalty,
  87. frequency_penalty: modelConfig.frequency_penalty,
  88. top_p: modelConfig.top_p,
  89. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  90. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  91. };
  92. console.log("[Request] openai payload: ", requestPayload);
  93. const shouldStream = !!options.config.stream;
  94. const controller = new AbortController();
  95. options.onController?.(controller);
  96. try {
  97. const chatPath = this.path(SiliconFlow.ChatPath);
  98. const chatPayload = {
  99. method: "POST",
  100. body: JSON.stringify(requestPayload),
  101. signal: controller.signal,
  102. headers: getHeaders(),
  103. };
  104. // console.log(chatPayload);
  105. // Use extended timeout for thinking models as they typically require more processing time
  106. const requestTimeoutId = setTimeout(
  107. () => controller.abort(),
  108. REQUEST_TIMEOUT_MS_FOR_THINKING,
  109. );
  110. if (shouldStream) {
  111. const [tools, funcs] = usePluginStore
  112. .getState()
  113. .getAsTools(
  114. useChatStore.getState().currentSession().mask?.plugin || [],
  115. );
  116. return streamWithThink(
  117. chatPath,
  118. requestPayload,
  119. getHeaders(),
  120. tools as any,
  121. funcs,
  122. controller,
  123. // parseSSE
  124. (text: string, runTools: ChatMessageTool[]) => {
  125. // console.log("parseSSE", text, runTools);
  126. const json = JSON.parse(text);
  127. const choices = json.choices as Array<{
  128. delta: {
  129. content: string | null;
  130. tool_calls: ChatMessageTool[];
  131. reasoning_content: string | null;
  132. };
  133. }>;
  134. const tool_calls = choices[0]?.delta?.tool_calls;
  135. if (tool_calls?.length > 0) {
  136. const index = tool_calls[0]?.index;
  137. const id = tool_calls[0]?.id;
  138. const args = tool_calls[0]?.function?.arguments;
  139. if (id) {
  140. runTools.push({
  141. id,
  142. type: tool_calls[0]?.type,
  143. function: {
  144. name: tool_calls[0]?.function?.name as string,
  145. arguments: args,
  146. },
  147. });
  148. } else {
  149. // @ts-ignore
  150. runTools[index]["function"]["arguments"] += args;
  151. }
  152. }
  153. const reasoning = choices[0]?.delta?.reasoning_content;
  154. const content = choices[0]?.delta?.content;
  155. // Skip if both content and reasoning_content are empty or null
  156. if (
  157. (!reasoning || reasoning.length === 0) &&
  158. (!content || content.length === 0)
  159. ) {
  160. return {
  161. isThinking: false,
  162. content: "",
  163. };
  164. }
  165. if (reasoning && reasoning.length > 0) {
  166. return {
  167. isThinking: true,
  168. content: reasoning,
  169. };
  170. } else if (content && content.length > 0) {
  171. return {
  172. isThinking: false,
  173. content: content,
  174. };
  175. }
  176. return {
  177. isThinking: false,
  178. content: "",
  179. };
  180. },
  181. // processToolMessage, include tool_calls message and tool call results
  182. (
  183. requestPayload: RequestPayload,
  184. toolCallMessage: any,
  185. toolCallResult: any[],
  186. ) => {
  187. // @ts-ignore
  188. requestPayload?.messages?.splice(
  189. // @ts-ignore
  190. requestPayload?.messages?.length,
  191. 0,
  192. toolCallMessage,
  193. ...toolCallResult,
  194. );
  195. },
  196. options,
  197. );
  198. } else {
  199. const res = await fetch(chatPath, chatPayload);
  200. clearTimeout(requestTimeoutId);
  201. const resJson = await res.json();
  202. const message = this.extractMessage(resJson);
  203. options.onFinish(message, res);
  204. }
  205. } catch (e) {
  206. console.log("[Request] failed to make a chat request", e);
  207. options.onError?.(e as Error);
  208. }
  209. }
  210. async usage() {
  211. return {
  212. used: 0,
  213. total: 0,
  214. };
  215. }
  216. async models(): Promise<LLMModel[]> {
  217. return [];
  218. }
  219. }