bigmodel.ts 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. "use client";
  2. import { REQUEST_TIMEOUT_MS } from "@/app/constant";
  3. import { useChatStore } from "@/app/store";
  4. import {
  5. ChatOptions,
  6. LLMApi,
  7. LLMModel,
  8. } from "../api";
  9. import Locale from "../../locales";
  10. import {
  11. EventStreamContentType,
  12. fetchEventSource,
  13. } from "@fortaine/fetch-event-source";
  14. import { prettyObject } from "@/app/utils/format";
  15. import { getMessageTextContent } from "@/app/utils";
  16. import { bigModelApiKey, knowledgeId } from "../config";
  17. export class BigModelApi implements LLMApi {
  18. path(): string {
  19. return 'https://open.bigmodel.cn/api/paas/v4/chat/completions'
  20. }
  21. async chat(options: ChatOptions) {
  22. const messages = options.messages.map((v) => ({
  23. role: v.role,
  24. content: getMessageTextContent(v),
  25. }));
  26. if (messages.length % 2 === 0) {
  27. messages.unshift({
  28. role: "user",
  29. content: " ",
  30. });
  31. }
  32. const shouldStream = true;
  33. // 通用大模型参数
  34. const requestPayload: any = {
  35. messages,
  36. stream: shouldStream,// 流式回复
  37. model: 'glm-4-flash',// 模型
  38. temperature: 0.95,// 采样温度
  39. top_p: 0.7,// 核取样
  40. tools: [
  41. {
  42. type: 'retrieval', // 工具类型为检索
  43. retrieval: {
  44. knowledge_id: knowledgeId,// 知识库ID
  45. },
  46. },
  47. ],
  48. };
  49. const controller = new AbortController();
  50. options.onController?.(controller);
  51. try {
  52. const chatPath = this.path();
  53. const chatPayload = {
  54. method: "POST",
  55. body: JSON.stringify(requestPayload),
  56. signal: controller.signal,
  57. headers: {
  58. 'Content-Type': 'application/json',
  59. // APIKey
  60. Authorization: bigModelApiKey
  61. },
  62. };
  63. // make a fetch request
  64. const requestTimeoutId = setTimeout(
  65. () => controller.abort(),
  66. REQUEST_TIMEOUT_MS,
  67. );
  68. if (shouldStream) {
  69. let responseText = "";
  70. let remainText = "";
  71. let finished = false;
  72. // animate response to make it looks smooth
  73. function animateResponseText() {
  74. if (finished || controller.signal.aborted) {
  75. responseText += remainText;
  76. if (responseText?.length === 0) {
  77. options.onError?.(new Error("empty response from server"));
  78. }
  79. return;
  80. }
  81. if (remainText.length > 0) {
  82. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  83. const fetchText = remainText.slice(0, fetchCount);
  84. responseText += fetchText;
  85. remainText = remainText.slice(fetchCount);
  86. options.onUpdate?.(responseText, fetchText);
  87. }
  88. requestAnimationFrame(animateResponseText);
  89. }
  90. // start animaion
  91. animateResponseText();
  92. const finish = async () => {
  93. if (!finished) {
  94. finished = true;
  95. options.onFinish(responseText + remainText);
  96. }
  97. const session = useChatStore.getState().sessions[0];
  98. const data = {
  99. id: session.id,
  100. messages: session.messages.map(item => {
  101. return {
  102. id: item.id,
  103. date: item.date,
  104. role: item.role,
  105. content: item.content,
  106. }
  107. })
  108. }
  109. await fetch('/api/bigModel', {
  110. method: 'POST',
  111. body: JSON.stringify(data)
  112. });
  113. };
  114. controller.signal.onabort = finish;
  115. fetchEventSource(chatPath, {
  116. ...chatPayload,
  117. async onopen(res) {
  118. clearTimeout(requestTimeoutId);
  119. const contentType = res.headers.get("content-type");
  120. console.log("[Baidu] request response content type: ", contentType);
  121. if (contentType?.startsWith("text/plain")) {
  122. responseText = await res.clone().text();
  123. return finish();
  124. }
  125. if (
  126. !res.ok ||
  127. !res.headers
  128. .get("content-type")
  129. ?.startsWith(EventStreamContentType) ||
  130. res.status !== 200
  131. ) {
  132. const responseTexts = [responseText];
  133. let extraInfo = await res.clone().text();
  134. try {
  135. const resJson = await res.clone().json();
  136. extraInfo = prettyObject(resJson);
  137. } catch { }
  138. if (res.status === 401) {
  139. responseTexts.push(Locale.Error.Unauthorized);
  140. }
  141. if (extraInfo) {
  142. responseTexts.push(extraInfo);
  143. }
  144. responseText = responseTexts.join("\n\n");
  145. return finish();
  146. }
  147. },
  148. onmessage(msg) {
  149. if (msg.data === "[DONE]" || finished) {
  150. return finish();
  151. }
  152. const text = msg.data;
  153. try {
  154. const json = JSON.parse(text);
  155. const choices = json.choices as Array<{
  156. delta: { content: string };
  157. }>;
  158. const delta = choices[0]?.delta?.content;
  159. if (delta) {
  160. remainText += delta;
  161. }
  162. } catch (e) {
  163. console.error("[Request] parse error", text, msg);
  164. }
  165. },
  166. onclose() {
  167. finish();
  168. },
  169. onerror(e) {
  170. options.onError?.(e);
  171. throw e;
  172. },
  173. openWhenHidden: true,
  174. });
  175. } else {
  176. const res = await fetch(chatPath, chatPayload);
  177. clearTimeout(requestTimeoutId);
  178. const resJson = await res.json();
  179. const message = resJson?.result;
  180. options.onFinish(message);
  181. }
  182. } catch (e) {
  183. options.onError?.(e as Error);
  184. }
  185. }
  186. async usage() {
  187. return {
  188. used: 0,
  189. total: 0,
  190. };
  191. }
  192. async models(): Promise<LLMModel[]> {
  193. return [];
  194. }
  195. }