bigmodel.ts 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. "use client";
  2. import { REQUEST_TIMEOUT_MS } from "@/app/constant";
  3. import { useChatStore } from "@/app/store";
  4. import {
  5. ChatOptions,
  6. LLMApi,
  7. LLMModel,
  8. } from "../api";
  9. import Locale from "../../locales";
  10. import {
  11. EventStreamContentType,
  12. fetchEventSource,
  13. } from "@fortaine/fetch-event-source";
  14. import { prettyObject } from "@/app/utils/format";
  15. import { getMessageTextContent } from "@/app/utils";
  16. import { bigModelApiKey, knowledgeId, template } from "../config";
  17. export class BigModelApi implements LLMApi {
  18. path(): string {
  19. return 'https://open.bigmodel.cn/api/paas/v4/chat/completions'
  20. }
  21. async chat(options: ChatOptions) {
  22. const messages = options.messages.map((v) => ({
  23. role: v.role,
  24. content: getMessageTextContent(v),
  25. }));
  26. if (messages.length % 2 === 0) {
  27. messages.unshift({
  28. role: "user",
  29. content: " ",
  30. });
  31. }
  32. const shouldStream = true;
  33. // 通用大模型参数
  34. const requestPayload: any = {
  35. messages: messages,
  36. stream: shouldStream,// 流式回复
  37. model: 'glm-4-0520',// 模型
  38. temperature: 0.01,// 采样温度
  39. top_p: 0.7,// 核取样
  40. tools: [
  41. {
  42. type: 'retrieval', // 工具类型为检索
  43. retrieval: {
  44. knowledge_id: knowledgeId,// 知识库ID
  45. prompt_template: template.content,
  46. },
  47. },
  48. ],
  49. };
  50. const controller = new AbortController();
  51. options.onController?.(controller);
  52. try {
  53. const chatPath = this.path();
  54. const chatPayload = {
  55. method: "POST",
  56. body: JSON.stringify(requestPayload),
  57. signal: controller.signal,
  58. headers: {
  59. 'Content-Type': 'application/json',
  60. // APIKey
  61. Authorization: bigModelApiKey
  62. },
  63. };
  64. // make a fetch request
  65. const requestTimeoutId = setTimeout(
  66. () => controller.abort(),
  67. REQUEST_TIMEOUT_MS,
  68. );
  69. if (shouldStream) {
  70. let responseText = "";
  71. let remainText = "";
  72. let finished = false;
  73. // animate response to make it looks smooth
  74. function animateResponseText() {
  75. if (finished || controller.signal.aborted) {
  76. responseText += remainText;
  77. if (responseText?.length === 0) {
  78. options.onError?.(new Error("empty response from server"));
  79. }
  80. return;
  81. }
  82. if (remainText.length > 0) {
  83. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  84. const fetchText = remainText.slice(0, fetchCount);
  85. responseText += fetchText;
  86. remainText = remainText.slice(fetchCount);
  87. options.onUpdate?.(responseText, fetchText);
  88. }
  89. requestAnimationFrame(animateResponseText);
  90. }
  91. // start animaion
  92. animateResponseText();
  93. const finish = () => {
  94. if (!finished) {
  95. finished = true;
  96. options.onFinish(responseText + remainText);
  97. }
  98. };
  99. controller.signal.onabort = finish;
  100. fetchEventSource(chatPath, {
  101. ...chatPayload,
  102. async onopen(res) {
  103. clearTimeout(requestTimeoutId);
  104. const contentType = res.headers.get("content-type");
  105. console.log("[Baidu] request response content type: ", contentType);
  106. if (contentType?.startsWith("text/plain")) {
  107. responseText = await res.clone().text();
  108. return finish();
  109. }
  110. if (
  111. !res.ok ||
  112. !res.headers
  113. .get("content-type")
  114. ?.startsWith(EventStreamContentType) ||
  115. res.status !== 200
  116. ) {
  117. const responseTexts = [responseText];
  118. let extraInfo = await res.clone().text();
  119. try {
  120. const resJson = await res.clone().json();
  121. extraInfo = prettyObject(resJson);
  122. } catch { }
  123. if (res.status === 401) {
  124. responseTexts.push(Locale.Error.Unauthorized);
  125. }
  126. if (extraInfo) {
  127. responseTexts.push(extraInfo);
  128. }
  129. responseText = responseTexts.join("\n\n");
  130. return finish();
  131. }
  132. },
  133. onmessage(msg) {
  134. if (msg.data === "[DONE]" || finished) {
  135. return finish();
  136. }
  137. const text = msg.data;
  138. try {
  139. const json = JSON.parse(text);
  140. const choices = json.choices as Array<{
  141. delta: { content: string };
  142. }>;
  143. const delta = choices[0]?.delta?.content;
  144. if (delta) {
  145. remainText += delta;
  146. }
  147. } catch (e) {
  148. console.error("[Request] parse error", text, msg);
  149. }
  150. },
  151. async onclose() {
  152. finish();
  153. const session = useChatStore.getState().sessions[0];
  154. const data = {
  155. id: session.id,
  156. messages: session.messages.map(item => {
  157. return {
  158. id: item.id,
  159. date: item.date,
  160. role: item.role,
  161. content: item.content,
  162. }
  163. })
  164. }
  165. await fetch('/api/bigModel', {
  166. method: 'POST',
  167. body: JSON.stringify(data)
  168. });
  169. },
  170. onerror(e) {
  171. options.onError?.(e);
  172. throw e;
  173. },
  174. openWhenHidden: true,
  175. });
  176. } else {
  177. const res = await fetch(chatPath, chatPayload);
  178. clearTimeout(requestTimeoutId);
  179. const resJson = await res.json();
  180. const message = resJson?.result;
  181. options.onFinish(message);
  182. }
  183. } catch (e) {
  184. options.onError?.(e as Error);
  185. }
  186. }
  187. async usage() {
  188. return {
  189. used: 0,
  190. total: 0,
  191. };
  192. }
  193. async models(): Promise<LLMModel[]> {
  194. return [];
  195. }
  196. }