glm.ts 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. "use client";
  2. import { ApiPath, GLM_BASE_URL, GLM, REQUEST_TIMEOUT_MS } from "@/app/constant";
  3. import {
  4. useAccessStore,
  5. useAppConfig,
  6. useChatStore,
  7. ChatMessageTool,
  8. usePluginStore,
  9. } from "@/app/store";
  10. import { stream } from "@/app/utils/chat";
  11. import {
  12. ChatOptions,
  13. getHeaders,
  14. LLMApi,
  15. LLMModel,
  16. SpeechOptions,
  17. } from "../api";
  18. import { getClientConfig } from "@/app/config/client";
  19. import { getMessageTextContent } from "@/app/utils";
  20. import { RequestPayload } from "./openai";
  21. import { fetch } from "@/app/utils/stream";
  22. export class GLMApi implements LLMApi {
  23. private disableListModels = true;
  24. path(path: string): string {
  25. const accessStore = useAccessStore.getState();
  26. let baseUrl = "";
  27. if (accessStore.useCustomConfig) {
  28. baseUrl = accessStore.glmUrl;
  29. }
  30. if (baseUrl.length === 0) {
  31. const isApp = !!getClientConfig()?.isApp;
  32. const apiPath = ApiPath.GLM;
  33. baseUrl = isApp ? GLM_BASE_URL : apiPath;
  34. }
  35. if (baseUrl.endsWith("/")) {
  36. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  37. }
  38. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.GLM)) {
  39. baseUrl = "https://" + baseUrl;
  40. }
  41. console.log("[Proxy Endpoint] ", baseUrl, path);
  42. return [baseUrl, path].join("/");
  43. }
  44. extractMessage(res: any) {
  45. return res.choices?.at(0)?.message?.content ?? "";
  46. }
  47. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  48. throw new Error("Method not implemented.");
  49. }
  50. async chat(options: ChatOptions) {
  51. const messages: ChatOptions["messages"] = [];
  52. for (const v of options.messages) {
  53. const content = getMessageTextContent(v);
  54. messages.push({ role: v.role, content });
  55. }
  56. const modelConfig = {
  57. ...useAppConfig.getState().modelConfig,
  58. ...useChatStore.getState().currentSession().mask.modelConfig,
  59. ...{
  60. model: options.config.model,
  61. providerName: options.config.providerName,
  62. },
  63. };
  64. const requestPayload: RequestPayload = {
  65. messages,
  66. stream: options.config.stream,
  67. model: modelConfig.model,
  68. temperature: modelConfig.temperature,
  69. presence_penalty: modelConfig.presence_penalty,
  70. frequency_penalty: modelConfig.frequency_penalty,
  71. top_p: modelConfig.top_p,
  72. };
  73. console.log("[Request] glm payload: ", requestPayload);
  74. const shouldStream = !!options.config.stream;
  75. const controller = new AbortController();
  76. options.onController?.(controller);
  77. try {
  78. const chatPath = this.path(GLM.ChatPath);
  79. const chatPayload = {
  80. method: "POST",
  81. body: JSON.stringify(requestPayload),
  82. signal: controller.signal,
  83. headers: getHeaders(),
  84. };
  85. // make a fetch request
  86. const requestTimeoutId = setTimeout(
  87. () => controller.abort(),
  88. REQUEST_TIMEOUT_MS,
  89. );
  90. if (shouldStream) {
  91. const [tools, funcs] = usePluginStore
  92. .getState()
  93. .getAsTools(
  94. useChatStore.getState().currentSession().mask?.plugin || [],
  95. );
  96. return stream(
  97. chatPath,
  98. requestPayload,
  99. getHeaders(),
  100. tools as any,
  101. funcs,
  102. controller,
  103. // parseSSE
  104. (text: string, runTools: ChatMessageTool[]) => {
  105. // console.log("parseSSE", text, runTools);
  106. const json = JSON.parse(text);
  107. const choices = json.choices as Array<{
  108. delta: {
  109. content: string;
  110. tool_calls: ChatMessageTool[];
  111. };
  112. }>;
  113. const tool_calls = choices[0]?.delta?.tool_calls;
  114. if (tool_calls?.length > 0) {
  115. const index = tool_calls[0]?.index;
  116. const id = tool_calls[0]?.id;
  117. const args = tool_calls[0]?.function?.arguments;
  118. if (id) {
  119. runTools.push({
  120. id,
  121. type: tool_calls[0]?.type,
  122. function: {
  123. name: tool_calls[0]?.function?.name as string,
  124. arguments: args,
  125. },
  126. });
  127. } else {
  128. // @ts-ignore
  129. runTools[index]["function"]["arguments"] += args;
  130. }
  131. }
  132. return choices[0]?.delta?.content;
  133. },
  134. // processToolMessage, include tool_calls message and tool call results
  135. (
  136. requestPayload: RequestPayload,
  137. toolCallMessage: any,
  138. toolCallResult: any[],
  139. ) => {
  140. // @ts-ignore
  141. requestPayload?.messages?.splice(
  142. // @ts-ignore
  143. requestPayload?.messages?.length,
  144. 0,
  145. toolCallMessage,
  146. ...toolCallResult,
  147. );
  148. },
  149. options,
  150. );
  151. } else {
  152. const res = await fetch(chatPath, chatPayload);
  153. clearTimeout(requestTimeoutId);
  154. const resJson = await res.json();
  155. const message = this.extractMessage(resJson);
  156. options.onFinish(message);
  157. }
  158. } catch (e) {
  159. console.log("[Request] failed to make a chat request", e);
  160. options.onError?.(e as Error);
  161. }
  162. }
  163. async usage() {
  164. return {
  165. used: 0,
  166. total: 0,
  167. };
  168. }
  169. async models(): Promise<LLMModel[]> {
  170. return [];
  171. }
  172. }