glm.ts 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. "use client";
  2. import { ApiPath, CHATGLM_BASE_URL, ChatGLM } from "@/app/constant";
  3. import {
  4. useAccessStore,
  5. useAppConfig,
  6. useChatStore,
  7. ChatMessageTool,
  8. usePluginStore,
  9. } from "@/app/store";
  10. import { stream } from "@/app/utils/chat";
  11. import {
  12. ChatOptions,
  13. getHeaders,
  14. LLMApi,
  15. LLMModel,
  16. SpeechOptions,
  17. } from "../api";
  18. import { getClientConfig } from "@/app/config/client";
  19. import {
  20. getMessageTextContent,
  21. isVisionModel,
  22. getTimeoutMSByModel,
  23. } from "@/app/utils";
  24. import { RequestPayload } from "./openai";
  25. import { fetch } from "@/app/utils/stream";
  26. import { preProcessImageContent } from "@/app/utils/chat";
  27. interface BasePayload {
  28. model: string;
  29. }
  30. interface ChatPayload extends BasePayload {
  31. messages: ChatOptions["messages"];
  32. stream?: boolean;
  33. temperature?: number;
  34. presence_penalty?: number;
  35. frequency_penalty?: number;
  36. top_p?: number;
  37. }
  38. interface ImageGenerationPayload extends BasePayload {
  39. prompt: string;
  40. size?: string;
  41. user_id?: string;
  42. }
  43. interface VideoGenerationPayload extends BasePayload {
  44. prompt: string;
  45. duration?: number;
  46. resolution?: string;
  47. user_id?: string;
  48. }
  49. type ModelType = "chat" | "image" | "video";
  50. export class ChatGLMApi implements LLMApi {
  51. private disableListModels = true;
  52. private getModelType(model: string): ModelType {
  53. if (model.startsWith("cogview-")) return "image";
  54. if (model.startsWith("cogvideo-")) return "video";
  55. return "chat";
  56. }
  57. private getModelPath(type: ModelType): string {
  58. switch (type) {
  59. case "image":
  60. return ChatGLM.ImagePath;
  61. case "video":
  62. return ChatGLM.VideoPath;
  63. default:
  64. return ChatGLM.ChatPath;
  65. }
  66. }
  67. private createPayload(
  68. messages: ChatOptions["messages"],
  69. modelConfig: any,
  70. options: ChatOptions,
  71. ): BasePayload {
  72. const modelType = this.getModelType(modelConfig.model);
  73. const lastMessage = messages[messages.length - 1];
  74. const prompt =
  75. typeof lastMessage.content === "string"
  76. ? lastMessage.content
  77. : lastMessage.content.map((c) => c.text).join("\n");
  78. switch (modelType) {
  79. case "image":
  80. return {
  81. model: modelConfig.model,
  82. prompt,
  83. size: options.config.size,
  84. } as ImageGenerationPayload;
  85. default:
  86. return {
  87. messages,
  88. stream: options.config.stream,
  89. model: modelConfig.model,
  90. temperature: modelConfig.temperature,
  91. presence_penalty: modelConfig.presence_penalty,
  92. frequency_penalty: modelConfig.frequency_penalty,
  93. top_p: modelConfig.top_p,
  94. } as ChatPayload;
  95. }
  96. }
  97. private parseResponse(modelType: ModelType, json: any): string {
  98. switch (modelType) {
  99. case "image": {
  100. const imageUrl = json.data?.[0]?.url;
  101. return imageUrl ? `![Generated Image](${imageUrl})` : "";
  102. }
  103. case "video": {
  104. const videoUrl = json.data?.[0]?.url;
  105. return videoUrl ? `<video controls src="${videoUrl}"></video>` : "";
  106. }
  107. default:
  108. return this.extractMessage(json);
  109. }
  110. }
  111. path(path: string): string {
  112. const accessStore = useAccessStore.getState();
  113. let baseUrl = "";
  114. if (accessStore.useCustomConfig) {
  115. baseUrl = accessStore.chatglmUrl;
  116. }
  117. if (baseUrl.length === 0) {
  118. const isApp = !!getClientConfig()?.isApp;
  119. const apiPath = ApiPath.ChatGLM;
  120. baseUrl = isApp ? CHATGLM_BASE_URL : apiPath;
  121. }
  122. if (baseUrl.endsWith("/")) {
  123. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  124. }
  125. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ChatGLM)) {
  126. baseUrl = "https://" + baseUrl;
  127. }
  128. console.log("[Proxy Endpoint] ", baseUrl, path);
  129. return [baseUrl, path].join("/");
  130. }
  131. extractMessage(res: any) {
  132. return res.choices?.at(0)?.message?.content ?? "";
  133. }
  134. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  135. throw new Error("Method not implemented.");
  136. }
  137. async chat(options: ChatOptions) {
  138. const visionModel = isVisionModel(options.config.model);
  139. const messages: ChatOptions["messages"] = [];
  140. for (const v of options.messages) {
  141. const content = visionModel
  142. ? await preProcessImageContent(v.content)
  143. : getMessageTextContent(v);
  144. messages.push({ role: v.role, content });
  145. }
  146. const modelConfig = {
  147. ...useAppConfig.getState().modelConfig,
  148. ...useChatStore.getState().currentSession().mask.modelConfig,
  149. ...{
  150. model: options.config.model,
  151. providerName: options.config.providerName,
  152. },
  153. };
  154. const modelType = this.getModelType(modelConfig.model);
  155. const requestPayload = this.createPayload(messages, modelConfig, options);
  156. const path = this.path(this.getModelPath(modelType));
  157. console.log(`[Request] glm ${modelType} payload: `, requestPayload);
  158. const controller = new AbortController();
  159. options.onController?.(controller);
  160. try {
  161. const chatPayload = {
  162. method: "POST",
  163. body: JSON.stringify(requestPayload),
  164. signal: controller.signal,
  165. headers: getHeaders(),
  166. };
  167. const requestTimeoutId = setTimeout(
  168. () => controller.abort(),
  169. getTimeoutMSByModel(options.config.model),
  170. );
  171. if (modelType === "image" || modelType === "video") {
  172. const res = await fetch(path, chatPayload);
  173. clearTimeout(requestTimeoutId);
  174. const resJson = await res.json();
  175. console.log(`[Response] glm ${modelType}:`, resJson);
  176. const message = this.parseResponse(modelType, resJson);
  177. options.onFinish(message, res);
  178. return;
  179. }
  180. const shouldStream = !!options.config.stream;
  181. if (shouldStream) {
  182. const [tools, funcs] = usePluginStore
  183. .getState()
  184. .getAsTools(
  185. useChatStore.getState().currentSession().mask?.plugin || [],
  186. );
  187. return stream(
  188. path,
  189. requestPayload,
  190. getHeaders(),
  191. tools as any,
  192. funcs,
  193. controller,
  194. // parseSSE
  195. (text: string, runTools: ChatMessageTool[]) => {
  196. const json = JSON.parse(text);
  197. const choices = json.choices as Array<{
  198. delta: {
  199. content: string;
  200. tool_calls: ChatMessageTool[];
  201. };
  202. }>;
  203. const tool_calls = choices[0]?.delta?.tool_calls;
  204. if (tool_calls?.length > 0) {
  205. const index = tool_calls[0]?.index;
  206. const id = tool_calls[0]?.id;
  207. const args = tool_calls[0]?.function?.arguments;
  208. if (id) {
  209. runTools.push({
  210. id,
  211. type: tool_calls[0]?.type,
  212. function: {
  213. name: tool_calls[0]?.function?.name as string,
  214. arguments: args,
  215. },
  216. });
  217. } else {
  218. // @ts-ignore
  219. runTools[index]["function"]["arguments"] += args;
  220. }
  221. }
  222. return choices[0]?.delta?.content;
  223. },
  224. // processToolMessage
  225. (
  226. requestPayload: RequestPayload,
  227. toolCallMessage: any,
  228. toolCallResult: any[],
  229. ) => {
  230. // @ts-ignore
  231. requestPayload?.messages?.splice(
  232. // @ts-ignore
  233. requestPayload?.messages?.length,
  234. 0,
  235. toolCallMessage,
  236. ...toolCallResult,
  237. );
  238. },
  239. options,
  240. );
  241. } else {
  242. const res = await fetch(path, chatPayload);
  243. clearTimeout(requestTimeoutId);
  244. const resJson = await res.json();
  245. const message = this.extractMessage(resJson);
  246. options.onFinish(message, res);
  247. }
  248. } catch (e) {
  249. console.log("[Request] failed to make a chat request", e);
  250. options.onError?.(e as Error);
  251. }
  252. }
  253. async usage() {
  254. return {
  255. used: 0,
  256. total: 0,
  257. };
  258. }
  259. async models(): Promise<LLMModel[]> {
  260. return [];
  261. }
  262. }