deepseek.ts 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. "use client";
  2. // azure and openai, using same models. so using same LLMApi.
  3. import { ApiPath, DEEPSEEK_BASE_URL, DeepSeek } from "@/app/constant";
  4. import {
  5. useAccessStore,
  6. useAppConfig,
  7. useChatStore,
  8. ChatMessageTool,
  9. usePluginStore,
  10. } from "@/app/store";
  11. import { streamWithThink } from "@/app/utils/chat";
  12. import {
  13. ChatOptions,
  14. getHeaders,
  15. LLMApi,
  16. LLMModel,
  17. SpeechOptions,
  18. } from "../api";
  19. import { getClientConfig } from "@/app/config/client";
  20. import {
  21. getMessageTextContent,
  22. getMessageTextContentWithoutThinking,
  23. getTimeoutMSByModel,
  24. } from "@/app/utils";
  25. import { RequestPayload } from "./openai";
  26. import { fetch } from "@/app/utils/stream";
  27. export class DeepSeekApi implements LLMApi {
  28. private disableListModels = true;
  29. path(path: string): string {
  30. const accessStore = useAccessStore.getState();
  31. let baseUrl = "";
  32. if (accessStore.useCustomConfig) {
  33. baseUrl = accessStore.deepseekUrl;
  34. }
  35. if (baseUrl.length === 0) {
  36. const isApp = !!getClientConfig()?.isApp;
  37. const apiPath = ApiPath.DeepSeek;
  38. baseUrl = isApp ? DEEPSEEK_BASE_URL : apiPath;
  39. }
  40. if (baseUrl.endsWith("/")) {
  41. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  42. }
  43. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.DeepSeek)) {
  44. baseUrl = "https://" + baseUrl;
  45. }
  46. console.log("[Proxy Endpoint] ", baseUrl, path);
  47. return [baseUrl, path].join("/");
  48. }
  49. extractMessage(res: any) {
  50. return res.choices?.at(0)?.message?.content ?? "";
  51. }
  52. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  53. throw new Error("Method not implemented.");
  54. }
  55. async chat(options: ChatOptions) {
  56. const messages: ChatOptions["messages"] = [];
  57. for (const v of options.messages) {
  58. if (v.role === "assistant") {
  59. const content = getMessageTextContentWithoutThinking(v);
  60. messages.push({ role: v.role, content });
  61. } else {
  62. const content = getMessageTextContent(v);
  63. messages.push({ role: v.role, content });
  64. }
  65. }
  66. // 检测并修复消息顺序,确保除system外的第一个消息是user
  67. const filteredMessages: ChatOptions["messages"] = [];
  68. let hasFoundFirstUser = false;
  69. for (const msg of messages) {
  70. if (msg.role === "system") {
  71. // Keep all system messages
  72. filteredMessages.push(msg);
  73. } else if (msg.role === "user") {
  74. // User message directly added
  75. filteredMessages.push(msg);
  76. hasFoundFirstUser = true;
  77. } else if (hasFoundFirstUser) {
  78. // After finding the first user message, all subsequent non-system messages are retained.
  79. filteredMessages.push(msg);
  80. }
  81. // If hasFoundFirstUser is false and it is not a system message, it will be skipped.
  82. }
  83. const modelConfig = {
  84. ...useAppConfig.getState().modelConfig,
  85. ...useChatStore.getState().currentSession().mask.modelConfig,
  86. ...{
  87. model: options.config.model,
  88. providerName: options.config.providerName,
  89. },
  90. };
  91. const requestPayload: RequestPayload = {
  92. messages: filteredMessages,
  93. stream: options.config.stream,
  94. model: modelConfig.model,
  95. temperature: modelConfig.temperature,
  96. presence_penalty: modelConfig.presence_penalty,
  97. frequency_penalty: modelConfig.frequency_penalty,
  98. top_p: modelConfig.top_p,
  99. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  100. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  101. };
  102. console.log("[Request] openai payload: ", requestPayload);
  103. const shouldStream = !!options.config.stream;
  104. const controller = new AbortController();
  105. options.onController?.(controller);
  106. try {
  107. const chatPath = this.path(DeepSeek.ChatPath);
  108. const chatPayload = {
  109. method: "POST",
  110. body: JSON.stringify(requestPayload),
  111. signal: controller.signal,
  112. headers: getHeaders(),
  113. };
  114. // make a fetch request
  115. const requestTimeoutId = setTimeout(
  116. () => controller.abort(),
  117. getTimeoutMSByModel(options.config.model),
  118. );
  119. if (shouldStream) {
  120. const [tools, funcs] = usePluginStore
  121. .getState()
  122. .getAsTools(
  123. useChatStore.getState().currentSession().mask?.plugin || [],
  124. );
  125. return streamWithThink(
  126. chatPath,
  127. requestPayload,
  128. getHeaders(),
  129. tools as any,
  130. funcs,
  131. controller,
  132. // parseSSE
  133. (text: string, runTools: ChatMessageTool[]) => {
  134. // console.log("parseSSE", text, runTools);
  135. const json = JSON.parse(text);
  136. const choices = json.choices as Array<{
  137. delta: {
  138. content: string | null;
  139. tool_calls: ChatMessageTool[];
  140. reasoning_content: string | null;
  141. };
  142. }>;
  143. const tool_calls = choices[0]?.delta?.tool_calls;
  144. if (tool_calls?.length > 0) {
  145. const index = tool_calls[0]?.index;
  146. const id = tool_calls[0]?.id;
  147. const args = tool_calls[0]?.function?.arguments;
  148. if (id) {
  149. runTools.push({
  150. id,
  151. type: tool_calls[0]?.type,
  152. function: {
  153. name: tool_calls[0]?.function?.name as string,
  154. arguments: args,
  155. },
  156. });
  157. } else {
  158. // @ts-ignore
  159. runTools[index]["function"]["arguments"] += args;
  160. }
  161. }
  162. const reasoning = choices[0]?.delta?.reasoning_content;
  163. const content = choices[0]?.delta?.content;
  164. // Skip if both content and reasoning_content are empty or null
  165. if (
  166. (!reasoning || reasoning.length === 0) &&
  167. (!content || content.length === 0)
  168. ) {
  169. return {
  170. isThinking: false,
  171. content: "",
  172. };
  173. }
  174. if (reasoning && reasoning.length > 0) {
  175. return {
  176. isThinking: true,
  177. content: reasoning,
  178. };
  179. } else if (content && content.length > 0) {
  180. return {
  181. isThinking: false,
  182. content: content,
  183. };
  184. }
  185. return {
  186. isThinking: false,
  187. content: "",
  188. };
  189. },
  190. // processToolMessage, include tool_calls message and tool call results
  191. (
  192. requestPayload: RequestPayload,
  193. toolCallMessage: any,
  194. toolCallResult: any[],
  195. ) => {
  196. // @ts-ignore
  197. requestPayload?.messages?.splice(
  198. // @ts-ignore
  199. requestPayload?.messages?.length,
  200. 0,
  201. toolCallMessage,
  202. ...toolCallResult,
  203. );
  204. },
  205. options,
  206. );
  207. } else {
  208. const res = await fetch(chatPath, chatPayload);
  209. clearTimeout(requestTimeoutId);
  210. const resJson = await res.json();
  211. const message = this.extractMessage(resJson);
  212. options.onFinish(message, res);
  213. }
  214. } catch (e) {
  215. console.log("[Request] failed to make a chat request", e);
  216. options.onError?.(e as Error);
  217. }
  218. }
  219. async usage() {
  220. return {
  221. used: 0,
  222. total: 0,
  223. };
  224. }
  225. async models(): Promise<LLMModel[]> {
  226. return [];
  227. }
  228. }