bigmodel.ts 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. "use client";
  2. import { REQUEST_TIMEOUT_MS } from "@/app/constant";
  3. import { useChatStore } from "@/app/store";
  4. import {
  5. ChatOptions,
  6. LLMApi,
  7. LLMModel,
  8. } from "../api";
  9. import Locale from "../../locales";
  10. import {
  11. EventStreamContentType,
  12. fetchEventSource,
  13. } from "@fortaine/fetch-event-source";
  14. import { prettyObject } from "@/app/utils/format";
  15. import { getMessageTextContent } from "@/app/utils";
  16. import { bigModelApiKey, knowledgeId, mask } from "../config";
  17. export class BigModelApi implements LLMApi {
  18. path(): string {
  19. return 'https://open.bigmodel.cn/api/paas/v4/chat/completions'
  20. }
  21. async chat(options: ChatOptions) {
  22. const messages = options.messages.map((v) => ({
  23. role: v.role,
  24. content: getMessageTextContent(v),
  25. }));
  26. // if (messages.length % 2 === 0) {
  27. // messages.unshift({
  28. // role: "user",
  29. // content: " ",
  30. // });
  31. // }
  32. const shouldStream = true;
  33. if (mask.enabled) {
  34. messages.unshift({
  35. role: "user",
  36. content: mask.content,
  37. })
  38. }
  39. // 通用大模型参数
  40. const requestPayload: any = {
  41. messages: messages,
  42. stream: shouldStream,// 流式回复
  43. model: 'glm-4-0520',// 模型
  44. temperature: 0.01,// 采样温度
  45. top_p: 0.7,// 核取样
  46. tools: [
  47. {
  48. type: 'retrieval', // 工具类型为检索
  49. retrieval: {
  50. knowledge_id: knowledgeId,// 知识库ID
  51. },
  52. },
  53. ],
  54. };
  55. const controller = new AbortController();
  56. options.onController?.(controller);
  57. try {
  58. const chatPath = this.path();
  59. const chatPayload = {
  60. method: "POST",
  61. body: JSON.stringify(requestPayload),
  62. signal: controller.signal,
  63. headers: {
  64. 'Content-Type': 'application/json',
  65. // APIKey
  66. Authorization: bigModelApiKey
  67. },
  68. };
  69. // make a fetch request
  70. const requestTimeoutId = setTimeout(
  71. () => controller.abort(),
  72. REQUEST_TIMEOUT_MS,
  73. );
  74. if (shouldStream) {
  75. let responseText = "";
  76. let remainText = "";
  77. let finished = false;
  78. // animate response to make it looks smooth
  79. function animateResponseText() {
  80. if (finished || controller.signal.aborted) {
  81. responseText += remainText;
  82. if (responseText?.length === 0) {
  83. options.onError?.(new Error("empty response from server"));
  84. }
  85. return;
  86. }
  87. if (remainText.length > 0) {
  88. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  89. const fetchText = remainText.slice(0, fetchCount);
  90. responseText += fetchText;
  91. remainText = remainText.slice(fetchCount);
  92. options.onUpdate?.(responseText, fetchText);
  93. }
  94. requestAnimationFrame(animateResponseText);
  95. }
  96. // start animaion
  97. animateResponseText();
  98. const finish = () => {
  99. if (!finished) {
  100. finished = true;
  101. options.onFinish(responseText + remainText);
  102. }
  103. };
  104. controller.signal.onabort = finish;
  105. fetchEventSource(chatPath, {
  106. ...chatPayload,
  107. async onopen(res) {
  108. clearTimeout(requestTimeoutId);
  109. const contentType = res.headers.get("content-type");
  110. console.log("[Baidu] request response content type: ", contentType);
  111. if (contentType?.startsWith("text/plain")) {
  112. responseText = await res.clone().text();
  113. return finish();
  114. }
  115. if (
  116. !res.ok ||
  117. !res.headers
  118. .get("content-type")
  119. ?.startsWith(EventStreamContentType) ||
  120. res.status !== 200
  121. ) {
  122. const responseTexts = [responseText];
  123. let extraInfo = await res.clone().text();
  124. try {
  125. const resJson = await res.clone().json();
  126. extraInfo = prettyObject(resJson);
  127. } catch { }
  128. if (res.status === 401) {
  129. responseTexts.push(Locale.Error.Unauthorized);
  130. }
  131. if (extraInfo) {
  132. responseTexts.push(extraInfo);
  133. }
  134. responseText = responseTexts.join("\n\n");
  135. return finish();
  136. }
  137. },
  138. onmessage(msg) {
  139. if (msg.data === "[DONE]" || finished) {
  140. return finish();
  141. }
  142. const text = msg.data;
  143. try {
  144. const json = JSON.parse(text);
  145. const choices = json.choices as Array<{
  146. delta: { content: string };
  147. }>;
  148. const delta = choices[0]?.delta?.content;
  149. if (delta) {
  150. remainText += delta;
  151. }
  152. } catch (e) {
  153. console.error("[Request] parse error", text, msg);
  154. }
  155. },
  156. async onclose() {
  157. finish();
  158. const session = useChatStore.getState().sessions[0];
  159. const data = {
  160. id: session.id,
  161. messages: session.messages.map(item => {
  162. return {
  163. id: item.id,
  164. date: item.date,
  165. role: item.role,
  166. content: item.content,
  167. }
  168. })
  169. }
  170. await fetch('/api/bigModel', {
  171. method: 'POST',
  172. body: JSON.stringify(data)
  173. });
  174. },
  175. onerror(e) {
  176. options.onError?.(e);
  177. throw e;
  178. },
  179. openWhenHidden: true,
  180. });
  181. } else {
  182. const res = await fetch(chatPath, chatPayload);
  183. clearTimeout(requestTimeoutId);
  184. const resJson = await res.json();
  185. const message = resJson?.result;
  186. options.onFinish(message);
  187. }
  188. } catch (e) {
  189. options.onError?.(e as Error);
  190. }
  191. }
  192. async usage() {
  193. return {
  194. used: 0,
  195. total: 0,
  196. };
  197. }
  198. async models(): Promise<LLMModel[]> {
  199. return [];
  200. }
  201. }