siliconflow.ts 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. "use client";
  2. // azure and openai, using same models. so using same LLMApi.
  3. import {
  4. ApiPath,
  5. SILICONFLOW_BASE_URL,
  6. SiliconFlow,
  7. REQUEST_TIMEOUT_MS_FOR_THINKING,
  8. } from "@/app/constant";
  9. import {
  10. useAccessStore,
  11. useAppConfig,
  12. useChatStore,
  13. ChatMessageTool,
  14. usePluginStore,
  15. } from "@/app/store";
  16. import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
  17. import {
  18. ChatOptions,
  19. getHeaders,
  20. LLMApi,
  21. LLMModel,
  22. SpeechOptions,
  23. } from "../api";
  24. import { getClientConfig } from "@/app/config/client";
  25. import {
  26. getMessageTextContent,
  27. getMessageTextContentWithoutThinking,
  28. isVisionModel,
  29. } from "@/app/utils";
  30. import { RequestPayload } from "./openai";
  31. import { fetch } from "@/app/utils/stream";
  32. export class SiliconflowApi implements LLMApi {
  33. private disableListModels = true;
  34. path(path: string): string {
  35. const accessStore = useAccessStore.getState();
  36. let baseUrl = "";
  37. if (accessStore.useCustomConfig) {
  38. baseUrl = accessStore.siliconflowUrl;
  39. }
  40. if (baseUrl.length === 0) {
  41. const isApp = !!getClientConfig()?.isApp;
  42. const apiPath = ApiPath.SiliconFlow;
  43. baseUrl = isApp ? SILICONFLOW_BASE_URL : apiPath;
  44. }
  45. if (baseUrl.endsWith("/")) {
  46. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  47. }
  48. if (
  49. !baseUrl.startsWith("http") &&
  50. !baseUrl.startsWith(ApiPath.SiliconFlow)
  51. ) {
  52. baseUrl = "https://" + baseUrl;
  53. }
  54. console.log("[Proxy Endpoint] ", baseUrl, path);
  55. return [baseUrl, path].join("/");
  56. }
  57. extractMessage(res: any) {
  58. return res.choices?.at(0)?.message?.content ?? "";
  59. }
  60. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  61. throw new Error("Method not implemented.");
  62. }
  63. async chat(options: ChatOptions) {
  64. const visionModel = isVisionModel(options.config.model);
  65. const messages: ChatOptions["messages"] = [];
  66. for (const v of options.messages) {
  67. if (v.role === "assistant") {
  68. const content = getMessageTextContentWithoutThinking(v);
  69. messages.push({ role: v.role, content });
  70. } else {
  71. const content = visionModel
  72. ? await preProcessImageContent(v.content)
  73. : getMessageTextContent(v);
  74. messages.push({ role: v.role, content });
  75. }
  76. }
  77. const modelConfig = {
  78. ...useAppConfig.getState().modelConfig,
  79. ...useChatStore.getState().currentSession().mask.modelConfig,
  80. ...{
  81. model: options.config.model,
  82. providerName: options.config.providerName,
  83. },
  84. };
  85. const requestPayload: RequestPayload = {
  86. messages,
  87. stream: options.config.stream,
  88. model: modelConfig.model,
  89. temperature: modelConfig.temperature,
  90. presence_penalty: modelConfig.presence_penalty,
  91. frequency_penalty: modelConfig.frequency_penalty,
  92. top_p: modelConfig.top_p,
  93. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  94. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  95. };
  96. console.log("[Request] openai payload: ", requestPayload);
  97. const shouldStream = !!options.config.stream;
  98. const controller = new AbortController();
  99. options.onController?.(controller);
  100. try {
  101. const chatPath = this.path(SiliconFlow.ChatPath);
  102. const chatPayload = {
  103. method: "POST",
  104. body: JSON.stringify(requestPayload),
  105. signal: controller.signal,
  106. headers: getHeaders(),
  107. };
  108. // console.log(chatPayload);
  109. // Use extended timeout for thinking models as they typically require more processing time
  110. const requestTimeoutId = setTimeout(
  111. () => controller.abort(),
  112. REQUEST_TIMEOUT_MS_FOR_THINKING,
  113. );
  114. if (shouldStream) {
  115. const [tools, funcs] = usePluginStore
  116. .getState()
  117. .getAsTools(
  118. useChatStore.getState().currentSession().mask?.plugin || [],
  119. );
  120. return streamWithThink(
  121. chatPath,
  122. requestPayload,
  123. getHeaders(),
  124. tools as any,
  125. funcs,
  126. controller,
  127. // parseSSE
  128. (text: string, runTools: ChatMessageTool[]) => {
  129. // console.log("parseSSE", text, runTools);
  130. const json = JSON.parse(text);
  131. const choices = json.choices as Array<{
  132. delta: {
  133. content: string | null;
  134. tool_calls: ChatMessageTool[];
  135. reasoning_content: string | null;
  136. };
  137. }>;
  138. const tool_calls = choices[0]?.delta?.tool_calls;
  139. if (tool_calls?.length > 0) {
  140. const index = tool_calls[0]?.index;
  141. const id = tool_calls[0]?.id;
  142. const args = tool_calls[0]?.function?.arguments;
  143. if (id) {
  144. runTools.push({
  145. id,
  146. type: tool_calls[0]?.type,
  147. function: {
  148. name: tool_calls[0]?.function?.name as string,
  149. arguments: args,
  150. },
  151. });
  152. } else {
  153. // @ts-ignore
  154. runTools[index]["function"]["arguments"] += args;
  155. }
  156. }
  157. const reasoning = choices[0]?.delta?.reasoning_content;
  158. const content = choices[0]?.delta?.content;
  159. // Skip if both content and reasoning_content are empty or null
  160. if (
  161. (!reasoning || reasoning.length === 0) &&
  162. (!content || content.length === 0)
  163. ) {
  164. return {
  165. isThinking: false,
  166. content: "",
  167. };
  168. }
  169. if (reasoning && reasoning.length > 0) {
  170. return {
  171. isThinking: true,
  172. content: reasoning,
  173. };
  174. } else if (content && content.length > 0) {
  175. return {
  176. isThinking: false,
  177. content: content,
  178. };
  179. }
  180. return {
  181. isThinking: false,
  182. content: "",
  183. };
  184. },
  185. // processToolMessage, include tool_calls message and tool call results
  186. (
  187. requestPayload: RequestPayload,
  188. toolCallMessage: any,
  189. toolCallResult: any[],
  190. ) => {
  191. // @ts-ignore
  192. requestPayload?.messages?.splice(
  193. // @ts-ignore
  194. requestPayload?.messages?.length,
  195. 0,
  196. toolCallMessage,
  197. ...toolCallResult,
  198. );
  199. },
  200. options,
  201. );
  202. } else {
  203. const res = await fetch(chatPath, chatPayload);
  204. clearTimeout(requestTimeoutId);
  205. const resJson = await res.json();
  206. const message = this.extractMessage(resJson);
  207. options.onFinish(message, res);
  208. }
  209. } catch (e) {
  210. console.log("[Request] failed to make a chat request", e);
  211. options.onError?.(e as Error);
  212. }
  213. }
  214. async usage() {
  215. return {
  216. used: 0,
  217. total: 0,
  218. };
  219. }
  220. async models(): Promise<LLMModel[]> {
  221. return [];
  222. }
  223. }