chat.ts 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. import { getMessageTextContent, trimTopic } from "../utils";
  2. import { indexedDBStorage } from "@/app/utils/indexedDB-storage";
  3. import { nanoid } from "nanoid";
  4. import type {
  5. ClientApi,
  6. MultimodalContent,
  7. RequestMessage,
  8. } from "../client/api";
  9. import { getClientApi } from "../client/api";
  10. import { ChatControllerPool } from "../client/controller";
  11. import { showToast } from "../components/ui-lib";
  12. import {
  13. DEFAULT_INPUT_TEMPLATE,
  14. DEFAULT_MODELS,
  15. DEFAULT_SYSTEM_TEMPLATE,
  16. KnowledgeCutOffDate,
  17. StoreKey,
  18. } from "../constant";
  19. import Locale, { getLang } from "../locales";
  20. import { isDalle3, safeLocalStorage } from "../utils";
  21. import { prettyObject } from "../utils/format";
  22. import { createPersistStore } from "../utils/store";
  23. import { estimateTokenLength } from "../utils/token";
  24. import { ModelConfig, ModelType, useAppConfig } from "./config";
  25. import { createEmptyMask, Mask } from "./mask";
  26. const localStorage = safeLocalStorage();
  27. export type ChatMessageTool = {
  28. id: string;
  29. index?: number;
  30. type?: string;
  31. function?: {
  32. name: string;
  33. arguments?: string;
  34. };
  35. content?: string;
  36. isError?: boolean;
  37. };
  38. export type ChatMessage = RequestMessage & {
  39. date: string;
  40. streaming?: boolean;
  41. isError?: boolean;
  42. id: string;
  43. model?: ModelType;
  44. tools?: ChatMessageTool[];
  45. };
  46. export function createMessage(override: Partial<ChatMessage>): ChatMessage {
  47. return {
  48. id: nanoid(),
  49. date: new Date().toLocaleString(),
  50. role: "user",
  51. content: "",
  52. ...override,
  53. };
  54. }
  55. export interface ChatStat {
  56. tokenCount: number;
  57. wordCount: number;
  58. charCount: number;
  59. }
  60. export interface ChatSession {
  61. id: string;
  62. topic: string;
  63. memoryPrompt: string;
  64. messages: ChatMessage[];
  65. stat: ChatStat;
  66. lastUpdate: number;
  67. lastSummarizeIndex: number;
  68. clearContextIndex?: number;
  69. mask: Mask;
  70. }
  71. export const DEFAULT_TOPIC = Locale.Store.DefaultTopic;
  72. export const BOT_HELLO: ChatMessage = createMessage({
  73. role: "assistant",
  74. content: Locale.Store.BotHello,
  75. });
  76. function createEmptySession(): ChatSession {
  77. return {
  78. id: nanoid(),
  79. topic: DEFAULT_TOPIC,
  80. memoryPrompt: "",
  81. messages: [],
  82. stat: {
  83. tokenCount: 0,
  84. wordCount: 0,
  85. charCount: 0,
  86. },
  87. lastUpdate: Date.now(),
  88. lastSummarizeIndex: 0,
  89. mask: createEmptyMask(),
  90. };
  91. }
  92. function countMessages(msgs: ChatMessage[]) {
  93. return msgs.reduce(
  94. (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
  95. 0,
  96. );
  97. }
  98. function fillTemplateWith(input: string, modelConfig: ModelConfig) {
  99. const cutoff =
  100. KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
  101. // Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
  102. const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
  103. var serviceProvider = "OpenAI";
  104. if (modelInfo) {
  105. // TODO: auto detect the providerName from the modelConfig.model
  106. // Directly use the providerName from the modelInfo
  107. serviceProvider = modelInfo.provider.providerName;
  108. }
  109. const vars = {
  110. ServiceProvider: serviceProvider,
  111. cutoff,
  112. model: modelConfig.model,
  113. time: new Date().toString(),
  114. lang: getLang(),
  115. input: input,
  116. };
  117. let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE;
  118. // remove duplicate
  119. if (input.startsWith(output)) {
  120. output = "";
  121. }
  122. // must contains {{input}}
  123. const inputVar = "{{input}}";
  124. if (!output.includes(inputVar)) {
  125. output += "\n" + inputVar;
  126. }
  127. Object.entries(vars).forEach(([name, value]) => {
  128. const regex = new RegExp(`{{${name}}}`, "g");
  129. output = output.replace(regex, value.toString()); // Ensure value is a string
  130. });
  131. return output;
  132. }
  133. const DEFAULT_CHAT_STATE = {
  134. sessions: [createEmptySession()],
  135. currentSessionIndex: 0,
  136. lastInput: "",
  137. };
  138. export const useChatStore = createPersistStore(
  139. DEFAULT_CHAT_STATE,
  140. (set, _get) => {
  141. function get() {
  142. return {
  143. ..._get(),
  144. ...methods,
  145. };
  146. }
  147. const methods = {
  148. forkSession() {
  149. // 获取当前会话
  150. const currentSession = get().currentSession();
  151. if (!currentSession) return;
  152. const newSession = createEmptySession();
  153. newSession.topic = currentSession.topic;
  154. newSession.messages = [...currentSession.messages];
  155. newSession.mask = {
  156. ...currentSession.mask,
  157. modelConfig: {
  158. ...currentSession.mask.modelConfig,
  159. },
  160. };
  161. set((state) => ({
  162. currentSessionIndex: 0,
  163. sessions: [newSession, ...state.sessions],
  164. }));
  165. },
  166. clearSessions() {
  167. set(() => ({
  168. sessions: [createEmptySession()],
  169. currentSessionIndex: 0,
  170. }));
  171. },
  172. selectSession(index: number) {
  173. set({
  174. currentSessionIndex: index,
  175. });
  176. },
  177. moveSession(from: number, to: number) {
  178. set((state) => {
  179. const { sessions, currentSessionIndex: oldIndex } = state;
  180. // move the session
  181. const newSessions = [...sessions];
  182. const session = newSessions[from];
  183. newSessions.splice(from, 1);
  184. newSessions.splice(to, 0, session);
  185. // modify current session id
  186. let newIndex = oldIndex === from ? to : oldIndex;
  187. if (oldIndex > from && oldIndex <= to) {
  188. newIndex -= 1;
  189. } else if (oldIndex < from && oldIndex >= to) {
  190. newIndex += 1;
  191. }
  192. return {
  193. currentSessionIndex: newIndex,
  194. sessions: newSessions,
  195. };
  196. });
  197. },
  198. newSession(mask?: Mask) {
  199. const session = createEmptySession();
  200. if (mask) {
  201. const config = useAppConfig.getState();
  202. const globalModelConfig = config.modelConfig;
  203. session.mask = {
  204. ...mask,
  205. modelConfig: {
  206. ...globalModelConfig,
  207. ...mask.modelConfig,
  208. },
  209. };
  210. session.topic = mask.name;
  211. }
  212. set((state) => ({
  213. currentSessionIndex: 0,
  214. sessions: [session].concat(state.sessions),
  215. }));
  216. },
  217. nextSession(delta: number) {
  218. const n = get().sessions.length;
  219. const limit = (x: number) => (x + n) % n;
  220. const i = get().currentSessionIndex;
  221. get().selectSession(limit(i + delta));
  222. },
  223. deleteSession(index: number) {
  224. const deletingLastSession = get().sessions.length === 1;
  225. const deletedSession = get().sessions.at(index);
  226. if (!deletedSession) return;
  227. const sessions = get().sessions.slice();
  228. sessions.splice(index, 1);
  229. const currentIndex = get().currentSessionIndex;
  230. let nextIndex = Math.min(
  231. currentIndex - Number(index < currentIndex),
  232. sessions.length - 1,
  233. );
  234. if (deletingLastSession) {
  235. nextIndex = 0;
  236. sessions.push(createEmptySession());
  237. }
  238. // for undo delete action
  239. const restoreState = {
  240. currentSessionIndex: get().currentSessionIndex,
  241. sessions: get().sessions.slice(),
  242. };
  243. set(() => ({
  244. currentSessionIndex: nextIndex,
  245. sessions,
  246. }));
  247. showToast(
  248. Locale.Home.DeleteToast,
  249. {
  250. text: Locale.Home.Revert,
  251. onClick() {
  252. set(() => restoreState);
  253. },
  254. },
  255. 5000,
  256. );
  257. },
  258. currentSession() {
  259. let index = get().currentSessionIndex;
  260. const sessions = get().sessions;
  261. if (index < 0 || index >= sessions.length) {
  262. index = Math.min(sessions.length - 1, Math.max(0, index));
  263. set(() => ({ currentSessionIndex: index }));
  264. }
  265. const session = sessions[index];
  266. return session;
  267. },
  268. onNewMessage(message: ChatMessage) {
  269. get().updateCurrentSession((session) => {
  270. session.messages = session.messages.concat();
  271. session.lastUpdate = Date.now();
  272. });
  273. get().updateStat(message);
  274. get().summarizeSession();
  275. },
  276. async onUserInput(content: string, attachImages?: string[]) {
  277. const session = get().currentSession();
  278. const modelConfig = session.mask.modelConfig;
  279. const userContent = fillTemplateWith(content, modelConfig);
  280. console.log("[User Input] after template: ", userContent);
  281. let mContent: string | MultimodalContent[] = userContent;
  282. if (attachImages && attachImages.length > 0) {
  283. mContent = [
  284. {
  285. type: "text",
  286. text: userContent,
  287. },
  288. ];
  289. mContent = mContent.concat(
  290. attachImages.map((url) => {
  291. return {
  292. type: "image_url",
  293. image_url: {
  294. url: url,
  295. },
  296. };
  297. }),
  298. );
  299. }
  300. let userMessage: ChatMessage = createMessage({
  301. role: "user",
  302. content: mContent,
  303. });
  304. const botMessage: ChatMessage = createMessage({
  305. role: "assistant",
  306. streaming: true,
  307. model: modelConfig.model,
  308. });
  309. // get recent messages
  310. const recentMessages = get().getMessagesWithMemory();
  311. const sendMessages = recentMessages.concat(userMessage);
  312. const messageIndex = get().currentSession().messages.length + 1;
  313. // save user's and bot's message
  314. get().updateCurrentSession((session) => {
  315. const savedUserMessage = {
  316. ...userMessage,
  317. content: mContent,
  318. };
  319. session.messages = session.messages.concat([
  320. savedUserMessage,
  321. botMessage,
  322. ]);
  323. });
  324. const api: ClientApi = getClientApi(modelConfig.providerName);
  325. // make request
  326. api.llm.chat({
  327. messages: sendMessages,
  328. config: { ...modelConfig, stream: true },
  329. onUpdate(message) {
  330. botMessage.streaming = true;
  331. if (message) {
  332. botMessage.content = message;
  333. }
  334. get().updateCurrentSession((session) => {
  335. session.messages = session.messages.concat();
  336. });
  337. },
  338. onFinish(message) {
  339. botMessage.streaming = false;
  340. if (message) {
  341. botMessage.content = message;
  342. get().onNewMessage(botMessage);
  343. }
  344. ChatControllerPool.remove(session.id, botMessage.id);
  345. },
  346. onBeforeTool(tool: ChatMessageTool) {
  347. (botMessage.tools = botMessage?.tools || []).push(tool);
  348. get().updateCurrentSession((session) => {
  349. session.messages = session.messages.concat();
  350. });
  351. },
  352. onAfterTool(tool: ChatMessageTool) {
  353. botMessage?.tools?.forEach((t, i, tools) => {
  354. if (tool.id == t.id) {
  355. tools[i] = { ...tool };
  356. }
  357. });
  358. get().updateCurrentSession((session) => {
  359. session.messages = session.messages.concat();
  360. });
  361. },
  362. onError(error) {
  363. const isAborted = error.message?.includes?.("aborted");
  364. botMessage.content +=
  365. "\n\n" +
  366. prettyObject({
  367. error: true,
  368. message: error.message,
  369. });
  370. botMessage.streaming = false;
  371. userMessage.isError = !isAborted;
  372. botMessage.isError = !isAborted;
  373. get().updateCurrentSession((session) => {
  374. session.messages = session.messages.concat();
  375. });
  376. ChatControllerPool.remove(
  377. session.id,
  378. botMessage.id ?? messageIndex,
  379. );
  380. console.error("[Chat] failed ", error);
  381. },
  382. onController(controller) {
  383. // collect controller for stop/retry
  384. ChatControllerPool.addController(
  385. session.id,
  386. botMessage.id ?? messageIndex,
  387. controller,
  388. );
  389. },
  390. });
  391. },
  392. getMemoryPrompt() {
  393. const session = get().currentSession();
  394. if (session.memoryPrompt.length) {
  395. return {
  396. role: "system",
  397. content: Locale.Store.Prompt.History(session.memoryPrompt),
  398. date: "",
  399. } as ChatMessage;
  400. }
  401. },
  402. getMessagesWithMemory() {
  403. const session = get().currentSession();
  404. const modelConfig = session.mask.modelConfig;
  405. const clearContextIndex = session.clearContextIndex ?? 0;
  406. const messages = session.messages.slice();
  407. const totalMessageCount = session.messages.length;
  408. // in-context prompts
  409. const contextPrompts = session.mask.context.slice();
  410. // system prompts, to get close to OpenAI Web ChatGPT
  411. const shouldInjectSystemPrompts =
  412. modelConfig.enableInjectSystemPrompts &&
  413. (session.mask.modelConfig.model.startsWith("gpt-") ||
  414. session.mask.modelConfig.model.startsWith("chatgpt-"));
  415. var systemPrompts: ChatMessage[] = [];
  416. systemPrompts = shouldInjectSystemPrompts
  417. ? [
  418. createMessage({
  419. role: "system",
  420. content: fillTemplateWith("", {
  421. ...modelConfig,
  422. template: DEFAULT_SYSTEM_TEMPLATE,
  423. }),
  424. }),
  425. ]
  426. : [];
  427. if (shouldInjectSystemPrompts) {
  428. console.log(
  429. "[Global System Prompt] ",
  430. systemPrompts.at(0)?.content ?? "empty",
  431. );
  432. }
  433. const memoryPrompt = get().getMemoryPrompt();
  434. // long term memory
  435. const shouldSendLongTermMemory =
  436. modelConfig.sendMemory &&
  437. session.memoryPrompt &&
  438. session.memoryPrompt.length > 0 &&
  439. session.lastSummarizeIndex > clearContextIndex;
  440. const longTermMemoryPrompts =
  441. shouldSendLongTermMemory && memoryPrompt ? [memoryPrompt] : [];
  442. const longTermMemoryStartIndex = session.lastSummarizeIndex;
  443. // short term memory
  444. const shortTermMemoryStartIndex = Math.max(
  445. 0,
  446. totalMessageCount - modelConfig.historyMessageCount,
  447. );
  448. // lets concat send messages, including 4 parts:
  449. // 0. system prompt: to get close to OpenAI Web ChatGPT
  450. // 1. long term memory: summarized memory messages
  451. // 2. pre-defined in-context prompts
  452. // 3. short term memory: latest n messages
  453. // 4. newest input message
  454. const memoryStartIndex = shouldSendLongTermMemory
  455. ? Math.min(longTermMemoryStartIndex, shortTermMemoryStartIndex)
  456. : shortTermMemoryStartIndex;
  457. // and if user has cleared history messages, we should exclude the memory too.
  458. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
  459. const maxTokenThreshold = modelConfig.max_tokens;
  460. // get recent messages as much as possible
  461. const reversedRecentMessages = [];
  462. for (
  463. let i = totalMessageCount - 1, tokenCount = 0;
  464. i >= contextStartIndex && tokenCount < maxTokenThreshold;
  465. i -= 1
  466. ) {
  467. const msg = messages[i];
  468. if (!msg || msg.isError) continue;
  469. tokenCount += estimateTokenLength(getMessageTextContent(msg));
  470. reversedRecentMessages.push(msg);
  471. }
  472. // concat all messages
  473. const recentMessages = [
  474. ...systemPrompts,
  475. ...longTermMemoryPrompts,
  476. ...contextPrompts,
  477. ...reversedRecentMessages.reverse(),
  478. ];
  479. return recentMessages;
  480. },
  481. updateMessage(
  482. sessionIndex: number,
  483. messageIndex: number,
  484. updater: (message?: ChatMessage) => void,
  485. ) {
  486. const sessions = get().sessions;
  487. const session = sessions.at(sessionIndex);
  488. const messages = session?.messages;
  489. updater(messages?.at(messageIndex));
  490. set(() => ({ sessions }));
  491. },
  492. resetSession() {
  493. get().updateCurrentSession((session) => {
  494. session.messages = [];
  495. session.memoryPrompt = "";
  496. });
  497. },
  498. summarizeSession(refreshTitle: boolean = false) {
  499. const config = useAppConfig.getState();
  500. const session = get().currentSession();
  501. const modelConfig = session.mask.modelConfig;
  502. // skip summarize when using dalle3?
  503. if (isDalle3(modelConfig.model)) {
  504. return;
  505. }
  506. const providerName = modelConfig.compressProviderName;
  507. const api: ClientApi = getClientApi(providerName);
  508. // remove error messages if any
  509. const messages = session.messages;
  510. // should summarize topic after chating more than 50 words
  511. const SUMMARIZE_MIN_LEN = 50;
  512. if (
  513. (config.enableAutoGenerateTitle &&
  514. session.topic === DEFAULT_TOPIC &&
  515. countMessages(messages) >= SUMMARIZE_MIN_LEN) ||
  516. refreshTitle
  517. ) {
  518. const startIndex = Math.max(
  519. 0,
  520. messages.length - modelConfig.historyMessageCount,
  521. );
  522. const topicMessages = messages
  523. .slice(
  524. startIndex < messages.length ? startIndex : messages.length - 1,
  525. messages.length,
  526. )
  527. .concat(
  528. createMessage({
  529. role: "user",
  530. content: Locale.Store.Prompt.Topic,
  531. }),
  532. );
  533. api.llm.chat({
  534. messages: topicMessages,
  535. config: {
  536. model: modelConfig.compressModel,
  537. stream: false,
  538. providerName,
  539. },
  540. onFinish(message) {
  541. get().updateCurrentSession(
  542. (session) =>
  543. (session.topic =
  544. message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
  545. );
  546. },
  547. });
  548. }
  549. const summarizeIndex = Math.max(
  550. session.lastSummarizeIndex,
  551. session.clearContextIndex ?? 0,
  552. );
  553. let toBeSummarizedMsgs = messages
  554. .filter((msg) => !msg.isError)
  555. .slice(summarizeIndex);
  556. const historyMsgLength = countMessages(toBeSummarizedMsgs);
  557. if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
  558. const n = toBeSummarizedMsgs.length;
  559. toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
  560. Math.max(0, n - modelConfig.historyMessageCount),
  561. );
  562. }
  563. const memoryPrompt = get().getMemoryPrompt();
  564. if (memoryPrompt) {
  565. // add memory prompt
  566. toBeSummarizedMsgs.unshift(memoryPrompt);
  567. }
  568. const lastSummarizeIndex = session.messages.length;
  569. console.log(
  570. "[Chat History] ",
  571. toBeSummarizedMsgs,
  572. historyMsgLength,
  573. modelConfig.compressMessageLengthThreshold,
  574. );
  575. if (
  576. historyMsgLength > modelConfig.compressMessageLengthThreshold &&
  577. modelConfig.sendMemory
  578. ) {
  579. /** Destruct max_tokens while summarizing
  580. * this param is just shit
  581. **/
  582. const { max_tokens, ...modelcfg } = modelConfig;
  583. api.llm.chat({
  584. messages: toBeSummarizedMsgs.concat(
  585. createMessage({
  586. role: "system",
  587. content: Locale.Store.Prompt.Summarize,
  588. date: "",
  589. }),
  590. ),
  591. config: {
  592. ...modelcfg,
  593. stream: true,
  594. model: modelConfig.compressModel,
  595. },
  596. onUpdate(message) {
  597. session.memoryPrompt = message;
  598. },
  599. onFinish(message) {
  600. console.log("[Memory] ", message);
  601. get().updateCurrentSession((session) => {
  602. session.lastSummarizeIndex = lastSummarizeIndex;
  603. session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
  604. });
  605. },
  606. onError(err) {
  607. console.error("[Summarize] ", err);
  608. },
  609. });
  610. }
  611. },
  612. updateStat(message: ChatMessage) {
  613. get().updateCurrentSession((session) => {
  614. session.stat.charCount += message.content.length;
  615. // TODO: should update chat count and word count
  616. });
  617. },
  618. updateCurrentSession(updater: (session: ChatSession) => void) {
  619. const sessions = get().sessions;
  620. const index = get().currentSessionIndex;
  621. updater(sessions[index]);
  622. set(() => ({ sessions }));
  623. },
  624. async clearAllData() {
  625. await indexedDBStorage.clear();
  626. localStorage.clear();
  627. location.reload();
  628. },
  629. setLastInput(lastInput: string) {
  630. set({
  631. lastInput,
  632. });
  633. },
  634. };
  635. return methods;
  636. },
  637. {
  638. name: StoreKey.Chat,
  639. version: 3.2,
  640. migrate(persistedState, version) {
  641. const state = persistedState as any;
  642. const newState = JSON.parse(
  643. JSON.stringify(state),
  644. ) as typeof DEFAULT_CHAT_STATE;
  645. if (version < 2) {
  646. newState.sessions = [];
  647. const oldSessions = state.sessions;
  648. for (const oldSession of oldSessions) {
  649. const newSession = createEmptySession();
  650. newSession.topic = oldSession.topic;
  651. newSession.messages = [...oldSession.messages];
  652. newSession.mask.modelConfig.sendMemory = true;
  653. newSession.mask.modelConfig.historyMessageCount = 4;
  654. newSession.mask.modelConfig.compressMessageLengthThreshold = 1000;
  655. newState.sessions.push(newSession);
  656. }
  657. }
  658. if (version < 3) {
  659. // migrate id to nanoid
  660. newState.sessions.forEach((s) => {
  661. s.id = nanoid();
  662. s.messages.forEach((m) => (m.id = nanoid()));
  663. });
  664. }
  665. // Enable `enableInjectSystemPrompts` attribute for old sessions.
  666. // Resolve issue of old sessions not automatically enabling.
  667. if (version < 3.1) {
  668. newState.sessions.forEach((s) => {
  669. if (
  670. // Exclude those already set by user
  671. !s.mask.modelConfig.hasOwnProperty("enableInjectSystemPrompts")
  672. ) {
  673. // Because users may have changed this configuration,
  674. // the user's current configuration is used instead of the default
  675. const config = useAppConfig.getState();
  676. s.mask.modelConfig.enableInjectSystemPrompts =
  677. config.modelConfig.enableInjectSystemPrompts;
  678. }
  679. });
  680. }
  681. // add default summarize model for every session
  682. if (version < 3.2) {
  683. newState.sessions.forEach((s) => {
  684. const config = useAppConfig.getState();
  685. s.mask.modelConfig.compressModel = config.modelConfig.compressModel;
  686. s.mask.modelConfig.compressProviderName =
  687. config.modelConfig.compressProviderName;
  688. });
  689. }
  690. return newState as any;
  691. },
  692. },
  693. );