siliconflow.ts 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. "use client";
  2. // azure and openai, using same models. so using same LLMApi.
  3. import { ApiPath, SILICONFLOW_BASE_URL, SiliconFlow } from "@/app/constant";
  4. import {
  5. useAccessStore,
  6. useAppConfig,
  7. useChatStore,
  8. ChatMessageTool,
  9. usePluginStore,
  10. } from "@/app/store";
  11. import { streamWithThink } from "@/app/utils/chat";
  12. import {
  13. ChatOptions,
  14. getHeaders,
  15. LLMApi,
  16. LLMModel,
  17. SpeechOptions,
  18. } from "../api";
  19. import { getClientConfig } from "@/app/config/client";
  20. import {
  21. getMessageTextContent,
  22. getMessageTextContentWithoutThinking,
  23. getTimeoutMSByModel,
  24. } from "@/app/utils";
  25. import { RequestPayload } from "./openai";
  26. import { fetch } from "@/app/utils/stream";
  27. export class SiliconflowApi implements LLMApi {
  28. private disableListModels = true;
  29. path(path: string): string {
  30. const accessStore = useAccessStore.getState();
  31. let baseUrl = "";
  32. if (accessStore.useCustomConfig) {
  33. baseUrl = accessStore.siliconflowUrl;
  34. }
  35. if (baseUrl.length === 0) {
  36. const isApp = !!getClientConfig()?.isApp;
  37. const apiPath = ApiPath.SiliconFlow;
  38. baseUrl = isApp ? SILICONFLOW_BASE_URL : apiPath;
  39. }
  40. if (baseUrl.endsWith("/")) {
  41. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  42. }
  43. if (
  44. !baseUrl.startsWith("http") &&
  45. !baseUrl.startsWith(ApiPath.SiliconFlow)
  46. ) {
  47. baseUrl = "https://" + baseUrl;
  48. }
  49. console.log("[Proxy Endpoint] ", baseUrl, path);
  50. return [baseUrl, path].join("/");
  51. }
  52. extractMessage(res: any) {
  53. return res.choices?.at(0)?.message?.content ?? "";
  54. }
  55. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  56. throw new Error("Method not implemented.");
  57. }
  58. async chat(options: ChatOptions) {
  59. const messages: ChatOptions["messages"] = [];
  60. for (const v of options.messages) {
  61. if (v.role === "assistant") {
  62. const content = getMessageTextContentWithoutThinking(v);
  63. messages.push({ role: v.role, content });
  64. } else {
  65. const content = getMessageTextContent(v);
  66. messages.push({ role: v.role, content });
  67. }
  68. }
  69. const modelConfig = {
  70. ...useAppConfig.getState().modelConfig,
  71. ...useChatStore.getState().currentSession().mask.modelConfig,
  72. ...{
  73. model: options.config.model,
  74. providerName: options.config.providerName,
  75. },
  76. };
  77. const requestPayload: RequestPayload = {
  78. messages,
  79. stream: options.config.stream,
  80. model: modelConfig.model,
  81. temperature: modelConfig.temperature,
  82. presence_penalty: modelConfig.presence_penalty,
  83. frequency_penalty: modelConfig.frequency_penalty,
  84. top_p: modelConfig.top_p,
  85. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  86. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  87. };
  88. console.log("[Request] openai payload: ", requestPayload);
  89. const shouldStream = !!options.config.stream;
  90. const controller = new AbortController();
  91. options.onController?.(controller);
  92. try {
  93. const chatPath = this.path(SiliconFlow.ChatPath);
  94. const chatPayload = {
  95. method: "POST",
  96. body: JSON.stringify(requestPayload),
  97. signal: controller.signal,
  98. headers: getHeaders(),
  99. };
  100. // console.log(chatPayload);
  101. // Use extended timeout for thinking models as they typically require more processing time
  102. const requestTimeoutId = setTimeout(
  103. () => controller.abort(),
  104. getTimeoutMSByModel(options.config.model),
  105. );
  106. if (shouldStream) {
  107. const [tools, funcs] = usePluginStore
  108. .getState()
  109. .getAsTools(
  110. useChatStore.getState().currentSession().mask?.plugin || [],
  111. );
  112. return streamWithThink(
  113. chatPath,
  114. requestPayload,
  115. getHeaders(),
  116. tools as any,
  117. funcs,
  118. controller,
  119. // parseSSE
  120. (text: string, runTools: ChatMessageTool[]) => {
  121. // console.log("parseSSE", text, runTools);
  122. const json = JSON.parse(text);
  123. const choices = json.choices as Array<{
  124. delta: {
  125. content: string | null;
  126. tool_calls: ChatMessageTool[];
  127. reasoning_content: string | null;
  128. };
  129. }>;
  130. const tool_calls = choices[0]?.delta?.tool_calls;
  131. if (tool_calls?.length > 0) {
  132. const index = tool_calls[0]?.index;
  133. const id = tool_calls[0]?.id;
  134. const args = tool_calls[0]?.function?.arguments;
  135. if (id) {
  136. runTools.push({
  137. id,
  138. type: tool_calls[0]?.type,
  139. function: {
  140. name: tool_calls[0]?.function?.name as string,
  141. arguments: args,
  142. },
  143. });
  144. } else {
  145. // @ts-ignore
  146. runTools[index]["function"]["arguments"] += args;
  147. }
  148. }
  149. const reasoning = choices[0]?.delta?.reasoning_content;
  150. const content = choices[0]?.delta?.content;
  151. // Skip if both content and reasoning_content are empty or null
  152. if (
  153. (!reasoning || reasoning.length === 0) &&
  154. (!content || content.length === 0)
  155. ) {
  156. return {
  157. isThinking: false,
  158. content: "",
  159. };
  160. }
  161. if (reasoning && reasoning.length > 0) {
  162. return {
  163. isThinking: true,
  164. content: reasoning,
  165. };
  166. } else if (content && content.length > 0) {
  167. return {
  168. isThinking: false,
  169. content: content,
  170. };
  171. }
  172. return {
  173. isThinking: false,
  174. content: "",
  175. };
  176. },
  177. // processToolMessage, include tool_calls message and tool call results
  178. (
  179. requestPayload: RequestPayload,
  180. toolCallMessage: any,
  181. toolCallResult: any[],
  182. ) => {
  183. // @ts-ignore
  184. requestPayload?.messages?.splice(
  185. // @ts-ignore
  186. requestPayload?.messages?.length,
  187. 0,
  188. toolCallMessage,
  189. ...toolCallResult,
  190. );
  191. },
  192. options,
  193. );
  194. } else {
  195. const res = await fetch(chatPath, chatPayload);
  196. clearTimeout(requestTimeoutId);
  197. const resJson = await res.json();
  198. const message = this.extractMessage(resJson);
  199. options.onFinish(message, res);
  200. }
  201. } catch (e) {
  202. console.log("[Request] failed to make a chat request", e);
  203. options.onError?.(e as Error);
  204. }
  205. }
  206. async usage() {
  207. return {
  208. used: 0,
  209. total: 0,
  210. };
  211. }
  212. async models(): Promise<LLMModel[]> {
  213. return [];
  214. }
  215. }