deepseek.ts 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. "use client";
  2. // azure and openai, using same models. so using same LLMApi.
  3. import {
  4. ApiPath,
  5. DEEPSEEK_BASE_URL,
  6. DeepSeek,
  7. REQUEST_TIMEOUT_MS,
  8. REQUEST_TIMEOUT_MS_FOR_THINKING,
  9. } from "@/app/constant";
  10. import {
  11. useAccessStore,
  12. useAppConfig,
  13. useChatStore,
  14. ChatMessageTool,
  15. usePluginStore,
  16. } from "@/app/store";
  17. import { streamWithThink } from "@/app/utils/chat";
  18. import {
  19. ChatOptions,
  20. getHeaders,
  21. LLMApi,
  22. LLMModel,
  23. SpeechOptions,
  24. } from "../api";
  25. import { getClientConfig } from "@/app/config/client";
  26. import {
  27. getMessageTextContent,
  28. getMessageTextContentWithoutThinking,
  29. } from "@/app/utils";
  30. import { RequestPayload } from "./openai";
  31. import { fetch } from "@/app/utils/stream";
  32. export class DeepSeekApi implements LLMApi {
  33. private disableListModels = true;
  34. path(path: string): string {
  35. const accessStore = useAccessStore.getState();
  36. let baseUrl = "";
  37. if (accessStore.useCustomConfig) {
  38. baseUrl = accessStore.deepseekUrl;
  39. }
  40. if (baseUrl.length === 0) {
  41. const isApp = !!getClientConfig()?.isApp;
  42. const apiPath = ApiPath.DeepSeek;
  43. baseUrl = isApp ? DEEPSEEK_BASE_URL : apiPath;
  44. }
  45. if (baseUrl.endsWith("/")) {
  46. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  47. }
  48. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.DeepSeek)) {
  49. baseUrl = "https://" + baseUrl;
  50. }
  51. console.log("[Proxy Endpoint] ", baseUrl, path);
  52. return [baseUrl, path].join("/");
  53. }
  54. extractMessage(res: any) {
  55. return res.choices?.at(0)?.message?.content ?? "";
  56. }
  57. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  58. throw new Error("Method not implemented.");
  59. }
  60. async chat(options: ChatOptions) {
  61. const messages: ChatOptions["messages"] = [];
  62. for (const v of options.messages) {
  63. if (v.role === "assistant") {
  64. const content = getMessageTextContentWithoutThinking(v);
  65. messages.push({ role: v.role, content });
  66. } else {
  67. const content = getMessageTextContent(v);
  68. messages.push({ role: v.role, content });
  69. }
  70. }
  71. const modelConfig = {
  72. ...useAppConfig.getState().modelConfig,
  73. ...useChatStore.getState().currentSession().mask.modelConfig,
  74. ...{
  75. model: options.config.model,
  76. providerName: options.config.providerName,
  77. },
  78. };
  79. const requestPayload: RequestPayload = {
  80. messages,
  81. stream: options.config.stream,
  82. model: modelConfig.model,
  83. temperature: modelConfig.temperature,
  84. presence_penalty: modelConfig.presence_penalty,
  85. frequency_penalty: modelConfig.frequency_penalty,
  86. top_p: modelConfig.top_p,
  87. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  88. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  89. };
  90. console.log("[Request] openai payload: ", requestPayload);
  91. const shouldStream = !!options.config.stream;
  92. const controller = new AbortController();
  93. options.onController?.(controller);
  94. try {
  95. const chatPath = this.path(DeepSeek.ChatPath);
  96. const chatPayload = {
  97. method: "POST",
  98. body: JSON.stringify(requestPayload),
  99. signal: controller.signal,
  100. headers: getHeaders(),
  101. };
  102. // console.log(chatPayload);
  103. const isR1 =
  104. options.config.model.endsWith("-reasoner") ||
  105. options.config.model.endsWith("-r1");
  106. // make a fetch request
  107. const requestTimeoutId = setTimeout(
  108. () => controller.abort(),
  109. isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
  110. );
  111. if (shouldStream) {
  112. const [tools, funcs] = usePluginStore
  113. .getState()
  114. .getAsTools(
  115. useChatStore.getState().currentSession().mask?.plugin || [],
  116. );
  117. return streamWithThink(
  118. chatPath,
  119. requestPayload,
  120. getHeaders(),
  121. tools as any,
  122. funcs,
  123. controller,
  124. // parseSSE
  125. (text: string, runTools: ChatMessageTool[]) => {
  126. // console.log("parseSSE", text, runTools);
  127. const json = JSON.parse(text);
  128. const choices = json.choices as Array<{
  129. delta: {
  130. content: string | null;
  131. tool_calls: ChatMessageTool[];
  132. reasoning_content: string | null;
  133. };
  134. }>;
  135. const tool_calls = choices[0]?.delta?.tool_calls;
  136. if (tool_calls?.length > 0) {
  137. const index = tool_calls[0]?.index;
  138. const id = tool_calls[0]?.id;
  139. const args = tool_calls[0]?.function?.arguments;
  140. if (id) {
  141. runTools.push({
  142. id,
  143. type: tool_calls[0]?.type,
  144. function: {
  145. name: tool_calls[0]?.function?.name as string,
  146. arguments: args,
  147. },
  148. });
  149. } else {
  150. // @ts-ignore
  151. runTools[index]["function"]["arguments"] += args;
  152. }
  153. }
  154. const reasoning = choices[0]?.delta?.reasoning_content;
  155. const content = choices[0]?.delta?.content;
  156. // Skip if both content and reasoning_content are empty or null
  157. if (
  158. (!reasoning || reasoning.trim().length === 0) &&
  159. (!content || content.trim().length === 0)
  160. ) {
  161. return {
  162. isThinking: false,
  163. content: "",
  164. };
  165. }
  166. if (reasoning && reasoning.trim().length > 0) {
  167. return {
  168. isThinking: true,
  169. content: reasoning,
  170. };
  171. } else if (content && content.trim().length > 0) {
  172. return {
  173. isThinking: false,
  174. content: content,
  175. };
  176. }
  177. return {
  178. isThinking: false,
  179. content: "",
  180. };
  181. },
  182. // processToolMessage, include tool_calls message and tool call results
  183. (
  184. requestPayload: RequestPayload,
  185. toolCallMessage: any,
  186. toolCallResult: any[],
  187. ) => {
  188. // @ts-ignore
  189. requestPayload?.messages?.splice(
  190. // @ts-ignore
  191. requestPayload?.messages?.length,
  192. 0,
  193. toolCallMessage,
  194. ...toolCallResult,
  195. );
  196. },
  197. options,
  198. );
  199. } else {
  200. const res = await fetch(chatPath, chatPayload);
  201. clearTimeout(requestTimeoutId);
  202. const resJson = await res.json();
  203. const message = this.extractMessage(resJson);
  204. options.onFinish(message, res);
  205. }
  206. } catch (e) {
  207. console.log("[Request] failed to make a chat request", e);
  208. options.onError?.(e as Error);
  209. }
  210. }
  211. async usage() {
  212. return {
  213. used: 0,
  214. total: 0,
  215. };
  216. }
  217. async models(): Promise<LLMModel[]> {
  218. return [];
  219. }
  220. }