bigmodel.ts 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. "use client";
  2. import { REQUEST_TIMEOUT_MS } from "@/app/constant";
  3. import { useChatStore } from "@/app/store";
  4. import {
  5. ChatOptions,
  6. LLMApi,
  7. LLMModel,
  8. } from "../api";
  9. import Locale from "../../locales";
  10. import {
  11. EventStreamContentType,
  12. fetchEventSource,
  13. } from "@fortaine/fetch-event-source";
  14. import { prettyObject } from "@/app/utils/format";
  15. import { getMessageTextContent } from "@/app/utils";
  16. import { bigModelApiKey, knowledgeId, template } from "../config";
  17. export class BigModelApi implements LLMApi {
  18. public useApi: 'public' | 'private';
  19. public publicPath: string;
  20. public privatePath: string;
  21. constructor() {
  22. this.useApi = 'private';
  23. this.publicPath = 'https://open.bigmodel.cn/api/paas/v4/chat/completions';
  24. // 配置私有请求地址
  25. this.privatePath = 'https://open.bigmodel.cn/api/llm-application/open/model-api/1828613766624038913/sse-invoke';
  26. }
  27. async chat(options: ChatOptions) {
  28. const messages = options.messages.map((item) => {
  29. return {
  30. role: item.role,
  31. content: getMessageTextContent(item),
  32. }
  33. });
  34. const userMessages = messages.filter(item => item.content);
  35. if (userMessages.length % 2 === 0) {
  36. userMessages.unshift({
  37. role: "user",
  38. content: "⠀",
  39. });
  40. }
  41. // 开放大模型参数
  42. const publicParams: any = {
  43. messages: userMessages,
  44. stream: true,// 流式回复
  45. model: 'glm-4-0520',// 模型
  46. temperature: 0.01,// 采样温度
  47. top_p: 0.7,// 核取样
  48. // 进阶配置
  49. tools: [
  50. {
  51. type: 'retrieval', // 工具类型为检索
  52. retrieval: {
  53. // 知识库ID
  54. knowledge_id: knowledgeId,
  55. // 知识库模板
  56. prompt_template: template.content,
  57. },
  58. },
  59. ],
  60. };
  61. // 私有大模型参数
  62. const privateParams: any = {
  63. prompt: userMessages,
  64. // model: 'glm-4-0520',// 模型
  65. // temperature: 0.01,// 采样温度
  66. // top_p: 0.7,// 核取样
  67. // 进阶配置
  68. request_id: 'jkec2024',
  69. returnType: undefined,
  70. knowledge_ids: undefined,
  71. document_ids: undefined,
  72. };
  73. const controller = new AbortController();
  74. options.onController?.(controller);
  75. try {
  76. const chatPath = this.useApi === 'public' ? this.publicPath : this.privatePath;
  77. const chatPayload = {
  78. method: "POST",
  79. body: JSON.stringify(this.useApi === 'public' ? publicParams : privateParams),
  80. signal: controller.signal,
  81. headers: {
  82. 'Content-Type': 'application/json',
  83. // APIKey
  84. Authorization: bigModelApiKey
  85. },
  86. };
  87. const requestTimeoutId = setTimeout(() => controller.abort(), REQUEST_TIMEOUT_MS);
  88. let responseText = "";
  89. let remainText = "";
  90. let finished = false;
  91. function animateResponseText() {
  92. if (finished || controller.signal.aborted) {
  93. responseText += remainText;
  94. if (responseText?.length === 0) {
  95. options.onError?.(new Error("empty response from server"));
  96. }
  97. return;
  98. }
  99. if (remainText.length > 0) {
  100. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  101. const fetchText = remainText.slice(0, fetchCount);
  102. responseText += fetchText;
  103. remainText = remainText.slice(fetchCount);
  104. options.onUpdate?.(responseText, fetchText);
  105. }
  106. requestAnimationFrame(animateResponseText);
  107. }
  108. animateResponseText();
  109. const finish = () => {
  110. if (!finished) {
  111. finished = true;
  112. options.onFinish(responseText + remainText);
  113. }
  114. };
  115. controller.signal.onabort = finish;
  116. // 记录上次的 remainText
  117. let previousRemainText = "";
  118. fetchEventSource(chatPath, {
  119. ...chatPayload,
  120. async onopen(res) {
  121. clearTimeout(requestTimeoutId);
  122. const contentType = res.headers.get("content-type");
  123. if (contentType?.startsWith("text/plain")) {
  124. responseText = await res.clone().text();
  125. return finish();
  126. }
  127. if (
  128. !res.ok ||
  129. !res.headers.get("content-type")?.startsWith(EventStreamContentType) ||
  130. res.status !== 200
  131. ) {
  132. const responseTexts = [responseText];
  133. let extraInfo = await res.clone().text();
  134. try {
  135. const resJson = await res.clone().json();
  136. extraInfo = prettyObject(resJson);
  137. } catch { }
  138. if (res.status === 401) {
  139. responseTexts.push(Locale.Error.Unauthorized);
  140. }
  141. if (extraInfo) {
  142. responseTexts.push(extraInfo);
  143. }
  144. responseText = responseTexts.join("\n\n");
  145. return finish();
  146. }
  147. },
  148. onmessage: (msg) => {
  149. const handlePublicMessage = () => {
  150. if (msg.data === "[DONE]" || finished) {
  151. return finish();
  152. }
  153. const text = msg.data;
  154. try {
  155. const json = JSON.parse(text);
  156. const choices = json.choices as Array<{ delta: { content: string } }>;
  157. const delta = choices[0]?.delta?.content;
  158. if (delta) {
  159. remainText += delta;
  160. }
  161. } catch (e) {
  162. console.error("[Request] parse error", text, msg);
  163. }
  164. };
  165. const handlePrivateMessage = () => {
  166. if (msg.event === 'finish') {
  167. return finish();
  168. }
  169. // 获取当前的数据
  170. const currentData = msg.data;
  171. // 计算新增的字符
  172. const newChars = currentData.substring(previousRemainText.length);
  173. remainText += newChars;
  174. // 更新 previousRemainText
  175. previousRemainText = currentData;
  176. };
  177. if (this.useApi === 'public') {
  178. handlePublicMessage();
  179. } else {
  180. handlePrivateMessage();
  181. }
  182. },
  183. async onclose() {
  184. finish();
  185. const session = useChatStore.getState().sessions[0];
  186. const data = {
  187. id: session.id,
  188. messages: session.messages.map(item => ({
  189. id: item.id,
  190. date: item.date,
  191. role: item.role,
  192. content: item.content,
  193. })),
  194. };
  195. await fetch('/api/bigModel', {
  196. method: 'POST',
  197. body: JSON.stringify(data),
  198. });
  199. },
  200. onerror(e) {
  201. options.onError?.(e);
  202. throw e;
  203. },
  204. openWhenHidden: true,
  205. });
  206. } catch (e) {
  207. options.onError?.(e as Error);
  208. }
  209. }
  210. async usage() {
  211. return {
  212. used: 0,
  213. total: 0,
  214. };
  215. }
  216. async models(): Promise<LLMModel[]> {
  217. return [];
  218. }
  219. }