bigmodel.ts 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. "use client";
  2. import { REQUEST_TIMEOUT_MS } from "@/app/constant";
  3. import { useChatStore } from "@/app/store";
  4. import {
  5. ChatOptions,
  6. LLMApi,
  7. LLMModel,
  8. } from "../api";
  9. import Locale from "../../locales";
  10. import {
  11. EventStreamContentType,
  12. fetchEventSource,
  13. } from "@fortaine/fetch-event-source";
  14. import { prettyObject } from "@/app/utils/format";
  15. import { getMessageTextContent } from "@/app/utils";
  16. import { bigModelApiKey, knowledgeId, template } from "../config";
  17. export class BigModelApi implements LLMApi {
  18. public useApi: 'public' | 'private';
  19. public publicPath: string;
  20. public privatePath: string;
  21. constructor() {
  22. this.useApi = 'private';
  23. this.publicPath = 'https://open.bigmodel.cn/api/paas/v4/chat/completions';
  24. // 配置私有请求地址
  25. this.privatePath = 'https://open.bigmodel.cn/api/llm-application/open/model-api/1828613766624038913/sse-invoke';
  26. }
  27. async chat(options: ChatOptions) {
  28. const messages = options.messages.map((item) => {
  29. return {
  30. role: item.role,
  31. content: getMessageTextContent(item),
  32. }
  33. });
  34. const userMessages = messages.filter(item => item.content);
  35. if (userMessages.length % 2 === 0) {
  36. userMessages.unshift({
  37. role: "user",
  38. content: "⠀",
  39. });
  40. }
  41. // 开放大模型参数
  42. const publicParams: any = {
  43. messages: userMessages,
  44. stream: true,// 流式回复
  45. model: 'glm-4-0520',// 模型
  46. temperature: 0.01,// 采样温度
  47. top_p: 0.7,// 核取样
  48. // 进阶配置
  49. tools: [
  50. {
  51. type: 'retrieval', // 工具类型为检索
  52. retrieval: {
  53. // 知识库ID
  54. knowledge_id: knowledgeId,
  55. // 知识库模板
  56. prompt_template: template.content,
  57. },
  58. },
  59. ],
  60. };
  61. // 私有大模型参数
  62. const privateParams: any = {
  63. prompt: userMessages,
  64. // model: 'glm-4-0520',// 模型
  65. // temperature: 0.01,// 采样温度
  66. // top_p: 0.7,// 核取样
  67. // 进阶配置
  68. request_id: 'jkec2024',
  69. returnType: undefined,
  70. knowledge_ids: undefined,
  71. document_ids: undefined,
  72. };
  73. const controller = new AbortController();
  74. options.onController?.(controller);
  75. try {
  76. const chatPath = this.useApi === 'public' ? this.publicPath : this.privatePath;
  77. const chatPayload = {
  78. method: "POST",
  79. body: JSON.stringify(this.useApi === 'public' ? publicParams : privateParams),
  80. signal: controller.signal,
  81. headers: {
  82. 'Content-Type': 'application/json',
  83. // APIKey
  84. Authorization: bigModelApiKey
  85. },
  86. };
  87. const requestTimeoutId = setTimeout(() => controller.abort(), REQUEST_TIMEOUT_MS);
  88. let responseText = "";
  89. let remainText = "";
  90. let finished = false;
  91. function animateResponseText() {
  92. if (finished || controller.signal.aborted) {
  93. responseText += remainText;
  94. if (responseText?.length === 0) {
  95. // options.onError?.(new Error("empty response from server"));
  96. options.onError?.(new Error("请求已中止,请检查网络环境。"));
  97. }
  98. return;
  99. }
  100. if (remainText.length > 0) {
  101. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  102. const fetchText = remainText.slice(0, fetchCount);
  103. responseText += fetchText;
  104. remainText = remainText.slice(fetchCount);
  105. options.onUpdate?.(responseText, fetchText);
  106. }
  107. requestAnimationFrame(animateResponseText);
  108. }
  109. animateResponseText();
  110. const finish = () => {
  111. if (!finished) {
  112. finished = true;
  113. options.onFinish(responseText + remainText);
  114. }
  115. };
  116. controller.signal.onabort = finish;
  117. // 记录上次的 remainText
  118. let previousRemainText = "";
  119. fetchEventSource(chatPath, {
  120. ...chatPayload,
  121. async onopen(res) {
  122. clearTimeout(requestTimeoutId);
  123. const contentType = res.headers.get("content-type");
  124. if (contentType?.startsWith("text/plain")) {
  125. responseText = await res.clone().text();
  126. return finish();
  127. }
  128. if (
  129. !res.ok ||
  130. !res.headers.get("content-type")?.startsWith(EventStreamContentType) ||
  131. res.status !== 200
  132. ) {
  133. const responseTexts = [responseText];
  134. let extraInfo = await res.clone().text();
  135. try {
  136. const resJson = await res.clone().json();
  137. extraInfo = prettyObject(resJson);
  138. } catch { }
  139. if (res.status === 401) {
  140. responseTexts.push(Locale.Error.Unauthorized);
  141. }
  142. if (extraInfo) {
  143. responseTexts.push(extraInfo);
  144. }
  145. responseText = responseTexts.join("\n\n");
  146. return finish();
  147. }
  148. },
  149. onmessage: (msg) => {
  150. const handlePublicMessage = () => {
  151. if (msg.data === "[DONE]" || finished) {
  152. return finish();
  153. }
  154. const text = msg.data;
  155. try {
  156. const json = JSON.parse(text);
  157. const choices = json.choices as Array<{ delta: { content: string } }>;
  158. const delta = choices[0]?.delta?.content;
  159. if (delta) {
  160. remainText += delta;
  161. }
  162. } catch (e) {
  163. console.error("[Request] parse error", text, msg);
  164. }
  165. };
  166. const handlePrivateMessage = () => {
  167. if (msg.event === 'finish') {
  168. return finish();
  169. }
  170. // 获取当前的数据
  171. const currentData = msg.data;
  172. // 计算新增的字符
  173. const newChars = currentData.substring(previousRemainText.length);
  174. remainText += newChars;
  175. // 更新 previousRemainText
  176. previousRemainText = currentData;
  177. };
  178. if (this.useApi === 'public') {
  179. handlePublicMessage();
  180. } else {
  181. handlePrivateMessage();
  182. }
  183. },
  184. async onclose() {
  185. finish();
  186. const session = useChatStore.getState().sessions[0];
  187. const data = {
  188. id: session.id,
  189. messages: session.messages.map(item => ({
  190. id: item.id,
  191. date: item.date,
  192. role: item.role,
  193. content: item.content,
  194. })),
  195. };
  196. await fetch('/api/bigModel', {
  197. method: 'POST',
  198. body: JSON.stringify(data),
  199. });
  200. },
  201. onerror(e) {
  202. options.onError?.(e);
  203. throw e;
  204. },
  205. openWhenHidden: true,
  206. });
  207. } catch (e) {
  208. options.onError?.(e as Error);
  209. }
  210. }
  211. async usage() {
  212. return {
  213. used: 0,
  214. total: 0,
  215. };
  216. }
  217. async models(): Promise<LLMModel[]> {
  218. return [];
  219. }
  220. }