|
|
@@ -16,32 +16,41 @@ import { getMessageTextContent } from "@/app/utils";
|
|
|
import { bigModelApiKey, knowledgeId, template } from "../config";
|
|
|
|
|
|
export class BigModelApi implements LLMApi {
|
|
|
- path(): string {
|
|
|
- return 'https://open.bigmodel.cn/api/paas/v4/chat/completions'
|
|
|
+ public useApi: 'public' | 'private';
|
|
|
+ public publicPath: string;
|
|
|
+ public privatePath: string;
|
|
|
+
|
|
|
+ constructor() {
|
|
|
+ this.useApi = 'private';
|
|
|
+ this.publicPath = 'https://open.bigmodel.cn/api/paas/v4/chat/completions';
|
|
|
+ this.privatePath = 'https://open.bigmodel.cn/api/llm-application/open/model-api/1828613766624038913/sse-invoke';
|
|
|
}
|
|
|
|
|
|
async chat(options: ChatOptions) {
|
|
|
- const messages = options.messages.map((v) => ({
|
|
|
- role: v.role,
|
|
|
- content: getMessageTextContent(v),
|
|
|
- }));
|
|
|
+ const messages = options.messages.map((item) => {
|
|
|
+ return {
|
|
|
+ role: item.role,
|
|
|
+ content: getMessageTextContent(item),
|
|
|
+ }
|
|
|
+ });
|
|
|
+
|
|
|
+ const userMessages = messages.filter(item => item.content);
|
|
|
|
|
|
- if (messages.length % 2 === 0) {
|
|
|
- messages.unshift({
|
|
|
+ if (userMessages.length % 2 === 0) {
|
|
|
+ userMessages.unshift({
|
|
|
role: "user",
|
|
|
- content: " ",
|
|
|
+ content: "⠀",
|
|
|
});
|
|
|
}
|
|
|
|
|
|
- const shouldStream = true;
|
|
|
-
|
|
|
- // 通用大模型参数
|
|
|
- const requestPayload: any = {
|
|
|
- messages: messages,
|
|
|
- stream: shouldStream,// 流式回复
|
|
|
+ // 开放大模型参数
|
|
|
+ const publicParams: any = {
|
|
|
+ messages: userMessages,
|
|
|
+ stream: true,// 流式回复
|
|
|
model: 'glm-4-0520',// 模型
|
|
|
temperature: 0.01,// 采样温度
|
|
|
top_p: 0.7,// 核取样
|
|
|
+ // 进阶配置
|
|
|
tools: [
|
|
|
{
|
|
|
type: 'retrieval', // 工具类型为检索
|
|
|
@@ -55,15 +64,28 @@ export class BigModelApi implements LLMApi {
|
|
|
],
|
|
|
};
|
|
|
|
|
|
+ // 私有大模型参数
|
|
|
+ const privateParams: any = {
|
|
|
+ prompt: userMessages,
|
|
|
+ model: 'glm-4-0520',// 模型
|
|
|
+ temperature: 0.01,// 采样温度
|
|
|
+ top_p: 0.7,// 核取样
|
|
|
+ // 进阶配置
|
|
|
+ request_id: 'jianke2024',
|
|
|
+ returnType: undefined,
|
|
|
+ knowledge_ids: undefined,
|
|
|
+ document_ids: undefined,
|
|
|
+ };
|
|
|
+
|
|
|
const controller = new AbortController();
|
|
|
|
|
|
options.onController?.(controller);
|
|
|
|
|
|
try {
|
|
|
- const chatPath = this.path();
|
|
|
+ const chatPath = this.useApi === 'public' ? this.publicPath : this.privatePath;
|
|
|
const chatPayload = {
|
|
|
method: "POST",
|
|
|
- body: JSON.stringify(requestPayload),
|
|
|
+ body: JSON.stringify(this.useApi === 'public' ? publicParams : privateParams),
|
|
|
signal: controller.signal,
|
|
|
headers: {
|
|
|
'Content-Type': 'application/json',
|
|
|
@@ -72,99 +94,89 @@ export class BigModelApi implements LLMApi {
|
|
|
},
|
|
|
};
|
|
|
|
|
|
- // make a fetch request
|
|
|
- const requestTimeoutId = setTimeout(
|
|
|
- () => controller.abort(),
|
|
|
- REQUEST_TIMEOUT_MS,
|
|
|
- );
|
|
|
-
|
|
|
- if (shouldStream) {
|
|
|
- let responseText = "";
|
|
|
- let remainText = "";
|
|
|
- let finished = false;
|
|
|
-
|
|
|
- // animate response to make it looks smooth
|
|
|
- function animateResponseText() {
|
|
|
- if (finished || controller.signal.aborted) {
|
|
|
- responseText += remainText;
|
|
|
- if (responseText?.length === 0) {
|
|
|
- options.onError?.(new Error("empty response from server"));
|
|
|
- }
|
|
|
- return;
|
|
|
- }
|
|
|
+ const requestTimeoutId = setTimeout(() => controller.abort(), REQUEST_TIMEOUT_MS);
|
|
|
+
|
|
|
+ let responseText = "";
|
|
|
+ let remainText = "";
|
|
|
+ let finished = false;
|
|
|
|
|
|
- if (remainText.length > 0) {
|
|
|
- const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
|
|
- const fetchText = remainText.slice(0, fetchCount);
|
|
|
- responseText += fetchText;
|
|
|
- remainText = remainText.slice(fetchCount);
|
|
|
- options.onUpdate?.(responseText, fetchText);
|
|
|
+ function animateResponseText() {
|
|
|
+ if (finished || controller.signal.aborted) {
|
|
|
+ responseText += remainText;
|
|
|
+ if (responseText?.length === 0) {
|
|
|
+ options.onError?.(new Error("empty response from server"));
|
|
|
}
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
- requestAnimationFrame(animateResponseText);
|
|
|
+ if (remainText.length > 0) {
|
|
|
+ const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
|
|
+ const fetchText = remainText.slice(0, fetchCount);
|
|
|
+ responseText += fetchText;
|
|
|
+ remainText = remainText.slice(fetchCount);
|
|
|
+ options.onUpdate?.(responseText, fetchText);
|
|
|
}
|
|
|
|
|
|
- // start animaion
|
|
|
- animateResponseText();
|
|
|
+ requestAnimationFrame(animateResponseText);
|
|
|
+ }
|
|
|
|
|
|
- const finish = () => {
|
|
|
- if (!finished) {
|
|
|
- finished = true;
|
|
|
- options.onFinish(responseText + remainText);
|
|
|
- }
|
|
|
- };
|
|
|
+ animateResponseText();
|
|
|
|
|
|
- controller.signal.onabort = finish;
|
|
|
+ const finish = () => {
|
|
|
+ if (!finished) {
|
|
|
+ finished = true;
|
|
|
+ options.onFinish(responseText + remainText);
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
- fetchEventSource(chatPath, {
|
|
|
- ...chatPayload,
|
|
|
- async onopen(res) {
|
|
|
- clearTimeout(requestTimeoutId);
|
|
|
- const contentType = res.headers.get("content-type");
|
|
|
- console.log("[Baidu] request response content type: ", contentType);
|
|
|
+ controller.signal.onabort = finish;
|
|
|
+ // 记录上次的 remainText
|
|
|
+ let previousRemainText = "";
|
|
|
+ fetchEventSource(chatPath, {
|
|
|
+ ...chatPayload,
|
|
|
+ async onopen(res) {
|
|
|
+ clearTimeout(requestTimeoutId);
|
|
|
+ const contentType = res.headers.get("content-type");
|
|
|
+
|
|
|
+ if (contentType?.startsWith("text/plain")) {
|
|
|
+ responseText = await res.clone().text();
|
|
|
+ return finish();
|
|
|
+ }
|
|
|
|
|
|
- if (contentType?.startsWith("text/plain")) {
|
|
|
- responseText = await res.clone().text();
|
|
|
- return finish();
|
|
|
- }
|
|
|
+ if (
|
|
|
+ !res.ok ||
|
|
|
+ !res.headers.get("content-type")?.startsWith(EventStreamContentType) ||
|
|
|
+ res.status !== 200
|
|
|
+ ) {
|
|
|
+ const responseTexts = [responseText];
|
|
|
+ let extraInfo = await res.clone().text();
|
|
|
+ try {
|
|
|
+ const resJson = await res.clone().json();
|
|
|
+ extraInfo = prettyObject(resJson);
|
|
|
+ } catch { }
|
|
|
|
|
|
- if (
|
|
|
- !res.ok ||
|
|
|
- !res.headers
|
|
|
- .get("content-type")
|
|
|
- ?.startsWith(EventStreamContentType) ||
|
|
|
- res.status !== 200
|
|
|
- ) {
|
|
|
- const responseTexts = [responseText];
|
|
|
- let extraInfo = await res.clone().text();
|
|
|
- try {
|
|
|
- const resJson = await res.clone().json();
|
|
|
- extraInfo = prettyObject(resJson);
|
|
|
- } catch { }
|
|
|
-
|
|
|
- if (res.status === 401) {
|
|
|
- responseTexts.push(Locale.Error.Unauthorized);
|
|
|
- }
|
|
|
+ if (res.status === 401) {
|
|
|
+ responseTexts.push(Locale.Error.Unauthorized);
|
|
|
+ }
|
|
|
|
|
|
- if (extraInfo) {
|
|
|
- responseTexts.push(extraInfo);
|
|
|
- }
|
|
|
+ if (extraInfo) {
|
|
|
+ responseTexts.push(extraInfo);
|
|
|
+ }
|
|
|
|
|
|
- responseText = responseTexts.join("\n\n");
|
|
|
+ responseText = responseTexts.join("\n\n");
|
|
|
|
|
|
- return finish();
|
|
|
- }
|
|
|
- },
|
|
|
- onmessage(msg) {
|
|
|
+ return finish();
|
|
|
+ }
|
|
|
+ },
|
|
|
+ onmessage: (msg) => {
|
|
|
+ const handlePublicMessage = () => {
|
|
|
if (msg.data === "[DONE]" || finished) {
|
|
|
return finish();
|
|
|
}
|
|
|
const text = msg.data;
|
|
|
try {
|
|
|
const json = JSON.parse(text);
|
|
|
- const choices = json.choices as Array<{
|
|
|
- delta: { content: string };
|
|
|
- }>;
|
|
|
+ const choices = json.choices as Array<{ delta: { content: string } }>;
|
|
|
const delta = choices[0]?.delta?.content;
|
|
|
if (delta) {
|
|
|
remainText += delta;
|
|
|
@@ -172,43 +184,55 @@ export class BigModelApi implements LLMApi {
|
|
|
} catch (e) {
|
|
|
console.error("[Request] parse error", text, msg);
|
|
|
}
|
|
|
- },
|
|
|
- async onclose() {
|
|
|
- finish();
|
|
|
- const session = useChatStore.getState().sessions[0];
|
|
|
- const data = {
|
|
|
- id: session.id,
|
|
|
- messages: session.messages.map(item => {
|
|
|
- return {
|
|
|
- id: item.id,
|
|
|
- date: item.date,
|
|
|
- role: item.role,
|
|
|
- content: item.content,
|
|
|
- }
|
|
|
- })
|
|
|
+ };
|
|
|
+
|
|
|
+ const handlePrivateMessage = () => {
|
|
|
+ if (msg.event === 'finish') {
|
|
|
+ return finish();
|
|
|
}
|
|
|
- await fetch('/api/bigModel', {
|
|
|
- method: 'POST',
|
|
|
- body: JSON.stringify(data)
|
|
|
- });
|
|
|
- },
|
|
|
- onerror(e) {
|
|
|
- options.onError?.(e);
|
|
|
- throw e;
|
|
|
- },
|
|
|
- openWhenHidden: true,
|
|
|
- });
|
|
|
- } else {
|
|
|
- const res = await fetch(chatPath, chatPayload);
|
|
|
- clearTimeout(requestTimeoutId);
|
|
|
- const resJson = await res.json();
|
|
|
- const message = resJson?.result;
|
|
|
- options.onFinish(message);
|
|
|
- }
|
|
|
+ // 获取当前的数据
|
|
|
+ const currentData = msg.data;
|
|
|
+ // 计算新增的字符
|
|
|
+ const newChars = currentData.substring(previousRemainText.length);
|
|
|
+ remainText += newChars;
|
|
|
+ // 更新 previousRemainText
|
|
|
+ previousRemainText = currentData;
|
|
|
+ };
|
|
|
+
|
|
|
+ if (this.useApi === 'public') {
|
|
|
+ handlePublicMessage();
|
|
|
+ } else {
|
|
|
+ handlePrivateMessage();
|
|
|
+ }
|
|
|
+ },
|
|
|
+ async onclose() {
|
|
|
+ finish();
|
|
|
+ const session = useChatStore.getState().sessions[0];
|
|
|
+ const data = {
|
|
|
+ id: session.id,
|
|
|
+ messages: session.messages.map(item => ({
|
|
|
+ id: item.id,
|
|
|
+ date: item.date,
|
|
|
+ role: item.role,
|
|
|
+ content: item.content,
|
|
|
+ })),
|
|
|
+ };
|
|
|
+ await fetch('/api/bigModel', {
|
|
|
+ method: 'POST',
|
|
|
+ body: JSON.stringify(data),
|
|
|
+ });
|
|
|
+ },
|
|
|
+ onerror(e) {
|
|
|
+ options.onError?.(e);
|
|
|
+ throw e;
|
|
|
+ },
|
|
|
+ openWhenHidden: true,
|
|
|
+ });
|
|
|
} catch (e) {
|
|
|
options.onError?.(e as Error);
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
async usage() {
|
|
|
return {
|
|
|
used: 0,
|