moonshot.ts 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. "use client";
  2. // azure and openai, using same models. so using same LLMApi.
  3. import {
  4. ApiPath,
  5. MOONSHOT_BASE_URL,
  6. Moonshot,
  7. REQUEST_TIMEOUT_MS,
  8. } from "@/app/constant";
  9. import {
  10. useAccessStore,
  11. useAppConfig,
  12. useChatStore,
  13. ChatMessageTool,
  14. usePluginStore,
  15. } from "@/app/store";
  16. import { stream } from "@/app/utils/chat";
  17. import {
  18. ChatOptions,
  19. getHeaders,
  20. LLMApi,
  21. LLMModel,
  22. SpeechOptions,
  23. } from "../api";
  24. import { getClientConfig } from "@/app/config/client";
  25. import { getMessageTextContent } from "@/app/utils";
  26. import { RequestPayload } from "./openai";
  27. import { fetch } from "@/app/utils/stream";
  28. export class MoonshotApi implements LLMApi {
  29. private disableListModels = true;
  30. path(path: string): string {
  31. const accessStore = useAccessStore.getState();
  32. let baseUrl = "";
  33. if (accessStore.useCustomConfig) {
  34. baseUrl = accessStore.moonshotUrl;
  35. }
  36. if (baseUrl.length === 0) {
  37. const isApp = !!getClientConfig()?.isApp;
  38. const apiPath = ApiPath.Moonshot;
  39. baseUrl = isApp ? MOONSHOT_BASE_URL : apiPath;
  40. }
  41. if (baseUrl.endsWith("/")) {
  42. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  43. }
  44. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Moonshot)) {
  45. baseUrl = "https://" + baseUrl;
  46. }
  47. console.log("[Proxy Endpoint] ", baseUrl, path);
  48. return [baseUrl, path].join("/");
  49. }
  50. extractMessage(res: any) {
  51. return res.choices?.at(0)?.message?.content ?? "";
  52. }
  53. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  54. throw new Error("Method not implemented.");
  55. }
  56. async chat(options: ChatOptions) {
  57. const messages: ChatOptions["messages"] = [];
  58. for (const v of options.messages) {
  59. const content = getMessageTextContent(v);
  60. messages.push({ role: v.role, content });
  61. }
  62. const modelConfig = {
  63. ...useAppConfig.getState().modelConfig,
  64. ...useChatStore.getState().currentSession().mask.modelConfig,
  65. ...{
  66. model: options.config.model,
  67. providerName: options.config.providerName,
  68. },
  69. };
  70. const requestPayload: RequestPayload = {
  71. messages,
  72. stream: options.config.stream,
  73. model: modelConfig.model,
  74. temperature: modelConfig.temperature,
  75. presence_penalty: modelConfig.presence_penalty,
  76. frequency_penalty: modelConfig.frequency_penalty,
  77. top_p: modelConfig.top_p,
  78. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  79. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  80. };
  81. console.log("[Request] openai payload: ", requestPayload);
  82. const shouldStream = !!options.config.stream;
  83. const controller = new AbortController();
  84. options.onController?.(controller);
  85. try {
  86. const chatPath = this.path(Moonshot.ChatPath);
  87. const chatPayload = {
  88. method: "POST",
  89. body: JSON.stringify(requestPayload),
  90. signal: controller.signal,
  91. headers: getHeaders(),
  92. };
  93. // make a fetch request
  94. const requestTimeoutId = setTimeout(
  95. () => controller.abort(),
  96. REQUEST_TIMEOUT_MS,
  97. );
  98. if (shouldStream) {
  99. const [tools, funcs] = usePluginStore
  100. .getState()
  101. .getAsTools(
  102. useChatStore.getState().currentSession().mask?.plugin || [],
  103. );
  104. return stream(
  105. chatPath,
  106. requestPayload,
  107. getHeaders(),
  108. tools as any,
  109. funcs,
  110. controller,
  111. // parseSSE
  112. (text: string, runTools: ChatMessageTool[]) => {
  113. // console.log("parseSSE", text, runTools);
  114. const json = JSON.parse(text);
  115. const choices = json.choices as Array<{
  116. delta: {
  117. content: string;
  118. tool_calls: ChatMessageTool[];
  119. };
  120. }>;
  121. const tool_calls = choices[0]?.delta?.tool_calls;
  122. if (tool_calls?.length > 0) {
  123. const index = tool_calls[0]?.index;
  124. const id = tool_calls[0]?.id;
  125. const args = tool_calls[0]?.function?.arguments;
  126. if (id) {
  127. runTools.push({
  128. id,
  129. type: tool_calls[0]?.type,
  130. function: {
  131. name: tool_calls[0]?.function?.name as string,
  132. arguments: args,
  133. },
  134. });
  135. } else {
  136. // @ts-ignore
  137. runTools[index]["function"]["arguments"] += args;
  138. }
  139. }
  140. return choices[0]?.delta?.content;
  141. },
  142. // processToolMessage, include tool_calls message and tool call results
  143. (
  144. requestPayload: RequestPayload,
  145. toolCallMessage: any,
  146. toolCallResult: any[],
  147. ) => {
  148. // @ts-ignore
  149. requestPayload?.messages?.splice(
  150. // @ts-ignore
  151. requestPayload?.messages?.length,
  152. 0,
  153. toolCallMessage,
  154. ...toolCallResult,
  155. );
  156. },
  157. options,
  158. );
  159. } else {
  160. const res = await fetch(chatPath, chatPayload);
  161. clearTimeout(requestTimeoutId);
  162. const resJson = await res.json();
  163. const message = this.extractMessage(resJson);
  164. options.onFinish(message);
  165. }
  166. } catch (e) {
  167. console.log("[Request] failed to make a chat request", e);
  168. options.onError?.(e as Error);
  169. }
  170. }
  171. async usage() {
  172. return {
  173. used: 0,
  174. total: 0,
  175. };
  176. }
  177. async models(): Promise<LLMModel[]> {
  178. return [];
  179. }
  180. }