chat.ts 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. import {
  2. getMessageTextContent,
  3. isDalle3,
  4. safeLocalStorage,
  5. trimTopic,
  6. } from "../utils";
  7. import { indexedDBStorage } from "@/app/utils/indexedDB-storage";
  8. import { nanoid } from "nanoid";
  9. import type {
  10. ClientApi,
  11. MultimodalContent,
  12. RequestMessage,
  13. } from "../client/api";
  14. import { getClientApi } from "../client/api";
  15. import { ChatControllerPool } from "../client/controller";
  16. import { showToast } from "../components/ui-lib";
  17. import {
  18. DEFAULT_INPUT_TEMPLATE,
  19. DEFAULT_MODELS,
  20. DEFAULT_SYSTEM_TEMPLATE,
  21. GEMINI_SUMMARIZE_MODEL,
  22. KnowledgeCutOffDate,
  23. ServiceProvider,
  24. StoreKey,
  25. SUMMARIZE_MODEL,
  26. } from "../constant";
  27. import Locale, { getLang } from "../locales";
  28. import { prettyObject } from "../utils/format";
  29. import { createPersistStore } from "../utils/store";
  30. import { estimateTokenLength } from "../utils/token";
  31. import { ModelConfig, ModelType, useAppConfig } from "./config";
  32. import { useAccessStore } from "./access";
  33. import { collectModelsWithDefaultModel } from "../utils/model";
  34. import { createEmptyMask, Mask } from "./mask";
  35. import { executeMcpAction } from "../mcp/actions";
  36. import { extractMcpJson, isMcpJson } from "../mcp/utils";
  37. const localStorage = safeLocalStorage();
  38. export type ChatMessageTool = {
  39. id: string;
  40. index?: number;
  41. type?: string;
  42. function?: {
  43. name: string;
  44. arguments?: string;
  45. };
  46. content?: string;
  47. isError?: boolean;
  48. errorMsg?: string;
  49. };
  50. export type ChatMessage = RequestMessage & {
  51. date: string;
  52. streaming?: boolean;
  53. isError?: boolean;
  54. id: string;
  55. model?: ModelType;
  56. tools?: ChatMessageTool[];
  57. audio_url?: string;
  58. isMcpResponse?: boolean;
  59. };
  60. export function createMessage(override: Partial<ChatMessage>): ChatMessage {
  61. return {
  62. id: nanoid(),
  63. date: new Date().toLocaleString(),
  64. role: "user",
  65. content: "",
  66. ...override,
  67. };
  68. }
  69. export interface ChatStat {
  70. tokenCount: number;
  71. wordCount: number;
  72. charCount: number;
  73. }
  74. export interface ChatSession {
  75. id: string;
  76. topic: string;
  77. memoryPrompt: string;
  78. messages: ChatMessage[];
  79. stat: ChatStat;
  80. lastUpdate: number;
  81. lastSummarizeIndex: number;
  82. clearContextIndex?: number;
  83. mask: Mask;
  84. }
  85. export const DEFAULT_TOPIC = Locale.Store.DefaultTopic;
  86. export const BOT_HELLO: ChatMessage = createMessage({
  87. role: "assistant",
  88. content: Locale.Store.BotHello,
  89. });
  90. function createEmptySession(): ChatSession {
  91. return {
  92. id: nanoid(),
  93. topic: DEFAULT_TOPIC,
  94. memoryPrompt: "",
  95. messages: [],
  96. stat: {
  97. tokenCount: 0,
  98. wordCount: 0,
  99. charCount: 0,
  100. },
  101. lastUpdate: Date.now(),
  102. lastSummarizeIndex: 0,
  103. mask: createEmptyMask(),
  104. };
  105. }
  106. function getSummarizeModel(
  107. currentModel: string,
  108. providerName: string,
  109. ): string[] {
  110. // if it is using gpt-* models, force to use 4o-mini to summarize
  111. if (currentModel.startsWith("gpt") || currentModel.startsWith("chatgpt")) {
  112. const configStore = useAppConfig.getState();
  113. const accessStore = useAccessStore.getState();
  114. const allModel = collectModelsWithDefaultModel(
  115. configStore.models,
  116. [configStore.customModels, accessStore.customModels].join(","),
  117. accessStore.defaultModel,
  118. );
  119. const summarizeModel = allModel.find(
  120. (m) => m.name === SUMMARIZE_MODEL && m.available,
  121. );
  122. if (summarizeModel) {
  123. return [
  124. summarizeModel.name,
  125. summarizeModel.provider?.providerName as string,
  126. ];
  127. }
  128. }
  129. if (currentModel.startsWith("gemini")) {
  130. return [GEMINI_SUMMARIZE_MODEL, ServiceProvider.Google];
  131. }
  132. return [currentModel, providerName];
  133. }
  134. function countMessages(msgs: ChatMessage[]) {
  135. return msgs.reduce(
  136. (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
  137. 0,
  138. );
  139. }
  140. function fillTemplateWith(input: string, modelConfig: ModelConfig) {
  141. const cutoff =
  142. KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
  143. // Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
  144. const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
  145. var serviceProvider = "OpenAI";
  146. if (modelInfo) {
  147. // TODO: auto detect the providerName from the modelConfig.model
  148. // Directly use the providerName from the modelInfo
  149. serviceProvider = modelInfo.provider.providerName;
  150. }
  151. const vars = {
  152. ServiceProvider: serviceProvider,
  153. cutoff,
  154. model: modelConfig.model,
  155. time: new Date().toString(),
  156. lang: getLang(),
  157. input: input,
  158. };
  159. let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE;
  160. // remove duplicate
  161. if (input.startsWith(output)) {
  162. output = "";
  163. }
  164. // must contains {{input}}
  165. const inputVar = "{{input}}";
  166. if (!output.includes(inputVar)) {
  167. output += "\n" + inputVar;
  168. }
  169. Object.entries(vars).forEach(([name, value]) => {
  170. const regex = new RegExp(`{{${name}}}`, "g");
  171. output = output.replace(regex, value.toString()); // Ensure value is a string
  172. });
  173. return output;
  174. }
  175. const DEFAULT_CHAT_STATE = {
  176. sessions: [createEmptySession()],
  177. currentSessionIndex: 0,
  178. lastInput: "",
  179. };
  180. export const useChatStore = createPersistStore(
  181. DEFAULT_CHAT_STATE,
  182. (set, _get) => {
  183. function get() {
  184. return {
  185. ..._get(),
  186. ...methods,
  187. };
  188. }
  189. const methods = {
  190. forkSession() {
  191. // 获取当前会话
  192. const currentSession = get().currentSession();
  193. if (!currentSession) return;
  194. const newSession = createEmptySession();
  195. newSession.topic = currentSession.topic;
  196. newSession.messages = [...currentSession.messages];
  197. newSession.mask = {
  198. ...currentSession.mask,
  199. modelConfig: {
  200. ...currentSession.mask.modelConfig,
  201. },
  202. };
  203. set((state) => ({
  204. currentSessionIndex: 0,
  205. sessions: [newSession, ...state.sessions],
  206. }));
  207. },
  208. clearSessions() {
  209. set(() => ({
  210. sessions: [createEmptySession()],
  211. currentSessionIndex: 0,
  212. }));
  213. },
  214. selectSession(index: number) {
  215. set({
  216. currentSessionIndex: index,
  217. });
  218. },
  219. moveSession(from: number, to: number) {
  220. set((state) => {
  221. const { sessions, currentSessionIndex: oldIndex } = state;
  222. // move the session
  223. const newSessions = [...sessions];
  224. const session = newSessions[from];
  225. newSessions.splice(from, 1);
  226. newSessions.splice(to, 0, session);
  227. // modify current session id
  228. let newIndex = oldIndex === from ? to : oldIndex;
  229. if (oldIndex > from && oldIndex <= to) {
  230. newIndex -= 1;
  231. } else if (oldIndex < from && oldIndex >= to) {
  232. newIndex += 1;
  233. }
  234. return {
  235. currentSessionIndex: newIndex,
  236. sessions: newSessions,
  237. };
  238. });
  239. },
  240. newSession(mask?: Mask) {
  241. const session = createEmptySession();
  242. if (mask) {
  243. const config = useAppConfig.getState();
  244. const globalModelConfig = config.modelConfig;
  245. session.mask = {
  246. ...mask,
  247. modelConfig: {
  248. ...globalModelConfig,
  249. ...mask.modelConfig,
  250. },
  251. };
  252. session.topic = mask.name;
  253. }
  254. set((state) => ({
  255. currentSessionIndex: 0,
  256. sessions: [session].concat(state.sessions),
  257. }));
  258. },
  259. nextSession(delta: number) {
  260. const n = get().sessions.length;
  261. const limit = (x: number) => (x + n) % n;
  262. const i = get().currentSessionIndex;
  263. get().selectSession(limit(i + delta));
  264. },
  265. deleteSession(index: number) {
  266. const deletingLastSession = get().sessions.length === 1;
  267. const deletedSession = get().sessions.at(index);
  268. if (!deletedSession) return;
  269. const sessions = get().sessions.slice();
  270. sessions.splice(index, 1);
  271. const currentIndex = get().currentSessionIndex;
  272. let nextIndex = Math.min(
  273. currentIndex - Number(index < currentIndex),
  274. sessions.length - 1,
  275. );
  276. if (deletingLastSession) {
  277. nextIndex = 0;
  278. sessions.push(createEmptySession());
  279. }
  280. // for undo delete action
  281. const restoreState = {
  282. currentSessionIndex: get().currentSessionIndex,
  283. sessions: get().sessions.slice(),
  284. };
  285. set(() => ({
  286. currentSessionIndex: nextIndex,
  287. sessions,
  288. }));
  289. showToast(
  290. Locale.Home.DeleteToast,
  291. {
  292. text: Locale.Home.Revert,
  293. onClick() {
  294. set(() => restoreState);
  295. },
  296. },
  297. 5000,
  298. );
  299. },
  300. currentSession() {
  301. let index = get().currentSessionIndex;
  302. const sessions = get().sessions;
  303. if (index < 0 || index >= sessions.length) {
  304. index = Math.min(sessions.length - 1, Math.max(0, index));
  305. set(() => ({ currentSessionIndex: index }));
  306. }
  307. const session = sessions[index];
  308. return session;
  309. },
  310. onNewMessage(message: ChatMessage, targetSession: ChatSession) {
  311. get().updateTargetSession(targetSession, (session) => {
  312. session.messages = session.messages.concat();
  313. session.lastUpdate = Date.now();
  314. });
  315. get().updateStat(message, targetSession);
  316. get().checkMcpJson(message);
  317. get().summarizeSession(false, targetSession);
  318. },
  319. async onUserInput(
  320. content: string,
  321. attachImages?: string[],
  322. isMcpResponse?: boolean,
  323. ) {
  324. const session = get().currentSession();
  325. const modelConfig = session.mask.modelConfig;
  326. // MCP Response no need to fill template
  327. let mContent: string | MultimodalContent[] = isMcpResponse
  328. ? content
  329. : fillTemplateWith(content, modelConfig);
  330. if (!isMcpResponse && attachImages && attachImages.length > 0) {
  331. mContent = [
  332. ...(content ? [{ type: "text" as const, text: content }] : []),
  333. ...attachImages.map((url) => ({
  334. type: "image_url" as const,
  335. image_url: { url },
  336. })),
  337. ];
  338. }
  339. let userMessage: ChatMessage = createMessage({
  340. role: "user",
  341. content: mContent,
  342. isMcpResponse,
  343. });
  344. const botMessage: ChatMessage = createMessage({
  345. role: "assistant",
  346. streaming: true,
  347. model: modelConfig.model,
  348. });
  349. // get recent messages
  350. const recentMessages = get().getMessagesWithMemory();
  351. const sendMessages = recentMessages.concat(userMessage);
  352. const messageIndex = session.messages.length + 1;
  353. // save user's and bot's message
  354. get().updateTargetSession(session, (session) => {
  355. const savedUserMessage = {
  356. ...userMessage,
  357. content: mContent,
  358. };
  359. session.messages = session.messages.concat([
  360. savedUserMessage,
  361. botMessage,
  362. ]);
  363. });
  364. const api: ClientApi = getClientApi(modelConfig.providerName);
  365. // make request
  366. api.llm.chat({
  367. messages: sendMessages,
  368. config: { ...modelConfig, stream: true },
  369. onUpdate(message) {
  370. botMessage.streaming = true;
  371. if (message) {
  372. botMessage.content = message;
  373. }
  374. get().updateTargetSession(session, (session) => {
  375. session.messages = session.messages.concat();
  376. });
  377. },
  378. async onFinish(message) {
  379. botMessage.streaming = false;
  380. if (message) {
  381. botMessage.content = message;
  382. botMessage.date = new Date().toLocaleString();
  383. get().onNewMessage(botMessage, session);
  384. }
  385. ChatControllerPool.remove(session.id, botMessage.id);
  386. },
  387. onBeforeTool(tool: ChatMessageTool) {
  388. (botMessage.tools = botMessage?.tools || []).push(tool);
  389. get().updateTargetSession(session, (session) => {
  390. session.messages = session.messages.concat();
  391. });
  392. },
  393. onAfterTool(tool: ChatMessageTool) {
  394. botMessage?.tools?.forEach((t, i, tools) => {
  395. if (tool.id == t.id) {
  396. tools[i] = { ...tool };
  397. }
  398. });
  399. get().updateTargetSession(session, (session) => {
  400. session.messages = session.messages.concat();
  401. });
  402. },
  403. onError(error) {
  404. const isAborted = error.message?.includes?.("aborted");
  405. botMessage.content +=
  406. "\n\n" +
  407. prettyObject({
  408. error: true,
  409. message: error.message,
  410. });
  411. botMessage.streaming = false;
  412. userMessage.isError = !isAborted;
  413. botMessage.isError = !isAborted;
  414. get().updateTargetSession(session, (session) => {
  415. session.messages = session.messages.concat();
  416. });
  417. ChatControllerPool.remove(
  418. session.id,
  419. botMessage.id ?? messageIndex,
  420. );
  421. console.error("[Chat] failed ", error);
  422. },
  423. onController(controller) {
  424. // collect controller for stop/retry
  425. ChatControllerPool.addController(
  426. session.id,
  427. botMessage.id ?? messageIndex,
  428. controller,
  429. );
  430. },
  431. });
  432. },
  433. getMemoryPrompt() {
  434. const session = get().currentSession();
  435. if (session.memoryPrompt.length) {
  436. return {
  437. role: "system",
  438. content: Locale.Store.Prompt.History(session.memoryPrompt),
  439. date: "",
  440. } as ChatMessage;
  441. }
  442. },
  443. getMessagesWithMemory() {
  444. const session = get().currentSession();
  445. const modelConfig = session.mask.modelConfig;
  446. const clearContextIndex = session.clearContextIndex ?? 0;
  447. const messages = session.messages.slice();
  448. const totalMessageCount = session.messages.length;
  449. // in-context prompts
  450. const contextPrompts = session.mask.context.slice();
  451. // system prompts, to get close to OpenAI Web ChatGPT
  452. const shouldInjectSystemPrompts =
  453. modelConfig.enableInjectSystemPrompts &&
  454. (session.mask.modelConfig.model.startsWith("gpt-") ||
  455. session.mask.modelConfig.model.startsWith("chatgpt-"));
  456. var systemPrompts: ChatMessage[] = [];
  457. systemPrompts = shouldInjectSystemPrompts
  458. ? [
  459. createMessage({
  460. role: "system",
  461. content: fillTemplateWith("", {
  462. ...modelConfig,
  463. template: DEFAULT_SYSTEM_TEMPLATE,
  464. }),
  465. }),
  466. ]
  467. : [];
  468. if (shouldInjectSystemPrompts) {
  469. console.log(
  470. "[Global System Prompt] ",
  471. systemPrompts.at(0)?.content ?? "empty",
  472. );
  473. }
  474. const memoryPrompt = get().getMemoryPrompt();
  475. // long term memory
  476. const shouldSendLongTermMemory =
  477. modelConfig.sendMemory &&
  478. session.memoryPrompt &&
  479. session.memoryPrompt.length > 0 &&
  480. session.lastSummarizeIndex > clearContextIndex;
  481. const longTermMemoryPrompts =
  482. shouldSendLongTermMemory && memoryPrompt ? [memoryPrompt] : [];
  483. const longTermMemoryStartIndex = session.lastSummarizeIndex;
  484. // short term memory
  485. const shortTermMemoryStartIndex = Math.max(
  486. 0,
  487. totalMessageCount - modelConfig.historyMessageCount,
  488. );
  489. // lets concat send messages, including 4 parts:
  490. // 0. system prompt: to get close to OpenAI Web ChatGPT
  491. // 1. long term memory: summarized memory messages
  492. // 2. pre-defined in-context prompts
  493. // 3. short term memory: latest n messages
  494. // 4. newest input message
  495. const memoryStartIndex = shouldSendLongTermMemory
  496. ? Math.min(longTermMemoryStartIndex, shortTermMemoryStartIndex)
  497. : shortTermMemoryStartIndex;
  498. // and if user has cleared history messages, we should exclude the memory too.
  499. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
  500. const maxTokenThreshold = modelConfig.max_tokens;
  501. // get recent messages as much as possible
  502. const reversedRecentMessages = [];
  503. for (
  504. let i = totalMessageCount - 1, tokenCount = 0;
  505. i >= contextStartIndex && tokenCount < maxTokenThreshold;
  506. i -= 1
  507. ) {
  508. const msg = messages[i];
  509. if (!msg || msg.isError) continue;
  510. tokenCount += estimateTokenLength(getMessageTextContent(msg));
  511. reversedRecentMessages.push(msg);
  512. }
  513. // concat all messages
  514. const recentMessages = [
  515. ...systemPrompts,
  516. ...longTermMemoryPrompts,
  517. ...contextPrompts,
  518. ...reversedRecentMessages.reverse(),
  519. ];
  520. return recentMessages;
  521. },
  522. updateMessage(
  523. sessionIndex: number,
  524. messageIndex: number,
  525. updater: (message?: ChatMessage) => void,
  526. ) {
  527. const sessions = get().sessions;
  528. const session = sessions.at(sessionIndex);
  529. const messages = session?.messages;
  530. updater(messages?.at(messageIndex));
  531. set(() => ({ sessions }));
  532. },
  533. resetSession(session: ChatSession) {
  534. get().updateTargetSession(session, (session) => {
  535. session.messages = [];
  536. session.memoryPrompt = "";
  537. });
  538. },
  539. summarizeSession(
  540. refreshTitle: boolean = false,
  541. targetSession: ChatSession,
  542. ) {
  543. const config = useAppConfig.getState();
  544. const session = targetSession;
  545. const modelConfig = session.mask.modelConfig;
  546. // skip summarize when using dalle3?
  547. if (isDalle3(modelConfig.model)) {
  548. return;
  549. }
  550. // if not config compressModel, then using getSummarizeModel
  551. const [model, providerName] = modelConfig.compressModel
  552. ? [modelConfig.compressModel, modelConfig.compressProviderName]
  553. : getSummarizeModel(
  554. session.mask.modelConfig.model,
  555. session.mask.modelConfig.providerName,
  556. );
  557. const api: ClientApi = getClientApi(providerName as ServiceProvider);
  558. // remove error messages if any
  559. const messages = session.messages;
  560. // should summarize topic after chating more than 50 words
  561. const SUMMARIZE_MIN_LEN = 50;
  562. if (
  563. (config.enableAutoGenerateTitle &&
  564. session.topic === DEFAULT_TOPIC &&
  565. countMessages(messages) >= SUMMARIZE_MIN_LEN) ||
  566. refreshTitle
  567. ) {
  568. const startIndex = Math.max(
  569. 0,
  570. messages.length - modelConfig.historyMessageCount,
  571. );
  572. const topicMessages = messages
  573. .slice(
  574. startIndex < messages.length ? startIndex : messages.length - 1,
  575. messages.length,
  576. )
  577. .concat(
  578. createMessage({
  579. role: "user",
  580. content: Locale.Store.Prompt.Topic,
  581. }),
  582. );
  583. api.llm.chat({
  584. messages: topicMessages,
  585. config: {
  586. model,
  587. stream: false,
  588. providerName,
  589. },
  590. onFinish(message, responseRes) {
  591. if (responseRes?.status === 200) {
  592. get().updateTargetSession(
  593. session,
  594. (session) =>
  595. (session.topic =
  596. message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
  597. );
  598. }
  599. },
  600. });
  601. }
  602. const summarizeIndex = Math.max(
  603. session.lastSummarizeIndex,
  604. session.clearContextIndex ?? 0,
  605. );
  606. let toBeSummarizedMsgs = messages
  607. .filter((msg) => !msg.isError)
  608. .slice(summarizeIndex);
  609. const historyMsgLength = countMessages(toBeSummarizedMsgs);
  610. if (historyMsgLength > (modelConfig?.max_tokens || 4000)) {
  611. const n = toBeSummarizedMsgs.length;
  612. toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
  613. Math.max(0, n - modelConfig.historyMessageCount),
  614. );
  615. }
  616. const memoryPrompt = get().getMemoryPrompt();
  617. if (memoryPrompt) {
  618. // add memory prompt
  619. toBeSummarizedMsgs.unshift(memoryPrompt);
  620. }
  621. const lastSummarizeIndex = session.messages.length;
  622. console.log(
  623. "[Chat History] ",
  624. toBeSummarizedMsgs,
  625. historyMsgLength,
  626. modelConfig.compressMessageLengthThreshold,
  627. );
  628. if (
  629. historyMsgLength > modelConfig.compressMessageLengthThreshold &&
  630. modelConfig.sendMemory
  631. ) {
  632. /** Destruct max_tokens while summarizing
  633. * this param is just shit
  634. **/
  635. const { max_tokens, ...modelcfg } = modelConfig;
  636. api.llm.chat({
  637. messages: toBeSummarizedMsgs.concat(
  638. createMessage({
  639. role: "system",
  640. content: Locale.Store.Prompt.Summarize,
  641. date: "",
  642. }),
  643. ),
  644. config: {
  645. ...modelcfg,
  646. stream: true,
  647. model,
  648. providerName,
  649. },
  650. onUpdate(message) {
  651. session.memoryPrompt = message;
  652. },
  653. onFinish(message, responseRes) {
  654. if (responseRes?.status === 200) {
  655. console.log("[Memory] ", message);
  656. get().updateTargetSession(session, (session) => {
  657. session.lastSummarizeIndex = lastSummarizeIndex;
  658. session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
  659. });
  660. }
  661. },
  662. onError(err) {
  663. console.error("[Summarize] ", err);
  664. },
  665. });
  666. }
  667. },
  668. updateStat(message: ChatMessage, session: ChatSession) {
  669. get().updateTargetSession(session, (session) => {
  670. session.stat.charCount += message.content.length;
  671. // TODO: should update chat count and word count
  672. });
  673. },
  674. updateTargetSession(
  675. targetSession: ChatSession,
  676. updater: (session: ChatSession) => void,
  677. ) {
  678. const sessions = get().sessions;
  679. const index = sessions.findIndex((s) => s.id === targetSession.id);
  680. if (index < 0) return;
  681. updater(sessions[index]);
  682. set(() => ({ sessions }));
  683. },
  684. async clearAllData() {
  685. await indexedDBStorage.clear();
  686. localStorage.clear();
  687. location.reload();
  688. },
  689. setLastInput(lastInput: string) {
  690. set({
  691. lastInput,
  692. });
  693. },
  694. /** check if the message contains MCP JSON and execute the MCP action */
  695. checkMcpJson(message: ChatMessage) {
  696. const content = getMessageTextContent(message);
  697. if (isMcpJson(content)) {
  698. try {
  699. const mcpRequest = extractMcpJson(content);
  700. if (mcpRequest) {
  701. console.debug("[MCP Request]", mcpRequest);
  702. executeMcpAction(mcpRequest.clientId, mcpRequest.mcp)
  703. .then((result) => {
  704. console.log("[MCP Response]", result);
  705. const mcpResponse =
  706. typeof result === "object"
  707. ? JSON.stringify(result)
  708. : String(result);
  709. get().onUserInput(
  710. `\`\`\`json:mcp:${mcpRequest.clientId}\n${mcpResponse}\n\`\`\``,
  711. [],
  712. true,
  713. );
  714. })
  715. .catch((error) => showToast(String(error)));
  716. }
  717. } catch (error) {
  718. console.error("[MCP Error]", error);
  719. }
  720. }
  721. },
  722. };
  723. return methods;
  724. },
  725. {
  726. name: StoreKey.Chat,
  727. version: 3.3,
  728. migrate(persistedState, version) {
  729. const state = persistedState as any;
  730. const newState = JSON.parse(
  731. JSON.stringify(state),
  732. ) as typeof DEFAULT_CHAT_STATE;
  733. if (version < 2) {
  734. newState.sessions = [];
  735. const oldSessions = state.sessions;
  736. for (const oldSession of oldSessions) {
  737. const newSession = createEmptySession();
  738. newSession.topic = oldSession.topic;
  739. newSession.messages = [...oldSession.messages];
  740. newSession.mask.modelConfig.sendMemory = true;
  741. newSession.mask.modelConfig.historyMessageCount = 4;
  742. newSession.mask.modelConfig.compressMessageLengthThreshold = 1000;
  743. newState.sessions.push(newSession);
  744. }
  745. }
  746. if (version < 3) {
  747. // migrate id to nanoid
  748. newState.sessions.forEach((s) => {
  749. s.id = nanoid();
  750. s.messages.forEach((m) => (m.id = nanoid()));
  751. });
  752. }
  753. // Enable `enableInjectSystemPrompts` attribute for old sessions.
  754. // Resolve issue of old sessions not automatically enabling.
  755. if (version < 3.1) {
  756. newState.sessions.forEach((s) => {
  757. if (
  758. // Exclude those already set by user
  759. !s.mask.modelConfig.hasOwnProperty("enableInjectSystemPrompts")
  760. ) {
  761. // Because users may have changed this configuration,
  762. // the user's current configuration is used instead of the default
  763. const config = useAppConfig.getState();
  764. s.mask.modelConfig.enableInjectSystemPrompts =
  765. config.modelConfig.enableInjectSystemPrompts;
  766. }
  767. });
  768. }
  769. // add default summarize model for every session
  770. if (version < 3.2) {
  771. newState.sessions.forEach((s) => {
  772. const config = useAppConfig.getState();
  773. s.mask.modelConfig.compressModel = config.modelConfig.compressModel;
  774. s.mask.modelConfig.compressProviderName =
  775. config.modelConfig.compressProviderName;
  776. });
  777. }
  778. // revert default summarize model for every session
  779. if (version < 3.3) {
  780. newState.sessions.forEach((s) => {
  781. const config = useAppConfig.getState();
  782. s.mask.modelConfig.compressModel = "";
  783. s.mask.modelConfig.compressProviderName = "";
  784. });
  785. }
  786. return newState as any;
  787. },
  788. },
  789. );