moonshot.ts 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. "use client";
  2. // azure and openai, using same models. so using same LLMApi.
  3. import {
  4. ApiPath,
  5. DEFAULT_API_HOST,
  6. Moonshot,
  7. REQUEST_TIMEOUT_MS,
  8. } from "@/app/constant";
  9. import {
  10. useAccessStore,
  11. useAppConfig,
  12. useChatStore,
  13. ChatMessageTool,
  14. usePluginStore,
  15. } from "@/app/store";
  16. import { stream } from "@/app/utils/chat";
  17. import { ChatOptions, getHeaders, LLMApi, LLMModel } from "../api";
  18. import { getClientConfig } from "@/app/config/client";
  19. import { getMessageTextContent } from "@/app/utils";
  20. import { RequestPayload } from "./openai";
  21. export class MoonshotApi implements LLMApi {
  22. private disableListModels = true;
  23. path(path: string): string {
  24. const accessStore = useAccessStore.getState();
  25. let baseUrl = "";
  26. if (accessStore.useCustomConfig) {
  27. baseUrl = accessStore.moonshotUrl;
  28. }
  29. if (baseUrl.length === 0) {
  30. const isApp = !!getClientConfig()?.isApp;
  31. const apiPath = ApiPath.Moonshot;
  32. baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
  33. }
  34. if (baseUrl.endsWith("/")) {
  35. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  36. }
  37. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Moonshot)) {
  38. baseUrl = "https://" + baseUrl;
  39. }
  40. console.log("[Proxy Endpoint] ", baseUrl, path);
  41. return [baseUrl, path].join("/");
  42. }
  43. extractMessage(res: any) {
  44. return res.choices?.at(0)?.message?.content ?? "";
  45. }
  46. async chat(options: ChatOptions) {
  47. const messages: ChatOptions["messages"] = [];
  48. for (const v of options.messages) {
  49. const content = getMessageTextContent(v);
  50. messages.push({ role: v.role, content });
  51. }
  52. const modelConfig = {
  53. ...useAppConfig.getState().modelConfig,
  54. ...useChatStore.getState().currentSession().mask.modelConfig,
  55. ...{
  56. model: options.config.model,
  57. providerName: options.config.providerName,
  58. },
  59. };
  60. const requestPayload: RequestPayload = {
  61. messages,
  62. stream: options.config.stream,
  63. model: modelConfig.model,
  64. temperature: modelConfig.temperature,
  65. presence_penalty: modelConfig.presence_penalty,
  66. frequency_penalty: modelConfig.frequency_penalty,
  67. top_p: modelConfig.top_p,
  68. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  69. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  70. };
  71. console.log("[Request] openai payload: ", requestPayload);
  72. const shouldStream = !!options.config.stream;
  73. const controller = new AbortController();
  74. options.onController?.(controller);
  75. try {
  76. const chatPath = this.path(Moonshot.ChatPath);
  77. const chatPayload = {
  78. method: "POST",
  79. body: JSON.stringify(requestPayload),
  80. signal: controller.signal,
  81. headers: getHeaders(),
  82. };
  83. // make a fetch request
  84. const requestTimeoutId = setTimeout(
  85. () => controller.abort(),
  86. REQUEST_TIMEOUT_MS,
  87. );
  88. if (shouldStream) {
  89. const [tools, funcs] = usePluginStore
  90. .getState()
  91. .getAsTools(
  92. useChatStore.getState().currentSession().mask?.plugin || [],
  93. );
  94. return stream(
  95. chatPath,
  96. requestPayload,
  97. getHeaders(),
  98. tools as any,
  99. funcs,
  100. controller,
  101. // parseSSE
  102. (text: string, runTools: ChatMessageTool[]) => {
  103. // console.log("parseSSE", text, runTools);
  104. const json = JSON.parse(text);
  105. const choices = json.choices as Array<{
  106. delta: {
  107. content: string;
  108. tool_calls: ChatMessageTool[];
  109. };
  110. }>;
  111. const tool_calls = choices[0]?.delta?.tool_calls;
  112. if (tool_calls?.length > 0) {
  113. const index = tool_calls[0]?.index;
  114. const id = tool_calls[0]?.id;
  115. const args = tool_calls[0]?.function?.arguments;
  116. if (id) {
  117. runTools.push({
  118. id,
  119. type: tool_calls[0]?.type,
  120. function: {
  121. name: tool_calls[0]?.function?.name as string,
  122. arguments: args,
  123. },
  124. });
  125. } else {
  126. // @ts-ignore
  127. runTools[index]["function"]["arguments"] += args;
  128. }
  129. }
  130. return choices[0]?.delta?.content;
  131. },
  132. // processToolMessage, include tool_calls message and tool call results
  133. (
  134. requestPayload: RequestPayload,
  135. toolCallMessage: any,
  136. toolCallResult: any[],
  137. ) => {
  138. // @ts-ignore
  139. requestPayload?.messages?.splice(
  140. // @ts-ignore
  141. requestPayload?.messages?.length,
  142. 0,
  143. toolCallMessage,
  144. ...toolCallResult,
  145. );
  146. },
  147. options,
  148. );
  149. } else {
  150. const res = await fetch(chatPath, chatPayload);
  151. clearTimeout(requestTimeoutId);
  152. const resJson = await res.json();
  153. const message = this.extractMessage(resJson);
  154. options.onFinish(message);
  155. }
  156. } catch (e) {
  157. console.log("[Request] failed to make a chat request", e);
  158. options.onError?.(e as Error);
  159. }
  160. }
  161. async usage() {
  162. return {
  163. used: 0,
  164. total: 0,
  165. };
  166. }
  167. async models(): Promise<LLMModel[]> {
  168. return [];
  169. }
  170. }