chat.ts 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. import { getMessageTextContent, trimTopic } from "../utils";
  2. import { indexedDBStorage } from "@/app/utils/indexedDB-storage";
  3. import { nanoid } from "nanoid";
  4. import type {
  5. ClientApi,
  6. MultimodalContent,
  7. RequestMessage,
  8. } from "../client/api";
  9. import { getClientApi } from "../client/api";
  10. import { ChatControllerPool } from "../client/controller";
  11. import { showToast } from "../components/ui-lib";
  12. import {
  13. DEFAULT_INPUT_TEMPLATE,
  14. DEFAULT_MODELS,
  15. DEFAULT_SYSTEM_TEMPLATE,
  16. KnowledgeCutOffDate,
  17. StoreKey,
  18. } from "../constant";
  19. import Locale, { getLang } from "../locales";
  20. import { isDalle3, safeLocalStorage } from "../utils";
  21. import { prettyObject } from "../utils/format";
  22. import { createPersistStore } from "../utils/store";
  23. import { estimateTokenLength } from "../utils/token";
  24. import { ModelConfig, ModelType, useAppConfig } from "./config";
  25. import { createEmptyMask, Mask } from "./mask";
  26. const localStorage = safeLocalStorage();
  27. export type ChatMessageTool = {
  28. id: string;
  29. index?: number;
  30. type?: string;
  31. function?: {
  32. name: string;
  33. arguments?: string;
  34. };
  35. content?: string;
  36. isError?: boolean;
  37. };
  38. export type ChatMessage = RequestMessage & {
  39. date: string;
  40. streaming?: boolean;
  41. isError?: boolean;
  42. id: string;
  43. model?: ModelType;
  44. tools?: ChatMessageTool[];
  45. };
  46. export function createMessage(override: Partial<ChatMessage>): ChatMessage {
  47. return {
  48. id: nanoid(),
  49. date: new Date().toLocaleString(),
  50. role: "user",
  51. content: "",
  52. ...override,
  53. };
  54. }
  55. export interface ChatStat {
  56. tokenCount: number;
  57. wordCount: number;
  58. charCount: number;
  59. }
  60. export interface ChatSession {
  61. id: string;
  62. topic: string;
  63. memoryPrompt: string;
  64. messages: ChatMessage[];
  65. stat: ChatStat;
  66. lastUpdate: number;
  67. lastSummarizeIndex: number;
  68. clearContextIndex?: number;
  69. mask: Mask;
  70. }
  71. export const DEFAULT_TOPIC = Locale.Store.DefaultTopic;
  72. export const BOT_HELLO: ChatMessage = createMessage({
  73. role: "assistant",
  74. content: Locale.Store.BotHello,
  75. });
  76. function createEmptySession(): ChatSession {
  77. return {
  78. id: nanoid(),
  79. topic: DEFAULT_TOPIC,
  80. memoryPrompt: "",
  81. messages: [],
  82. stat: {
  83. tokenCount: 0,
  84. wordCount: 0,
  85. charCount: 0,
  86. },
  87. lastUpdate: Date.now(),
  88. lastSummarizeIndex: 0,
  89. mask: createEmptyMask(),
  90. };
  91. }
  92. function countMessages(msgs: ChatMessage[]) {
  93. return msgs.reduce(
  94. (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
  95. 0,
  96. );
  97. }
  98. function fillTemplateWith(input: string, modelConfig: ModelConfig) {
  99. const cutoff =
  100. KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
  101. // Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
  102. const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
  103. var serviceProvider = "OpenAI";
  104. if (modelInfo) {
  105. // TODO: auto detect the providerName from the modelConfig.model
  106. // Directly use the providerName from the modelInfo
  107. serviceProvider = modelInfo.provider.providerName;
  108. }
  109. const vars = {
  110. ServiceProvider: serviceProvider,
  111. cutoff,
  112. model: modelConfig.model,
  113. time: new Date().toString(),
  114. lang: getLang(),
  115. input: input,
  116. };
  117. let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE;
  118. // remove duplicate
  119. if (input.startsWith(output)) {
  120. output = "";
  121. }
  122. // must contains {{input}}
  123. const inputVar = "{{input}}";
  124. if (!output.includes(inputVar)) {
  125. output += "\n" + inputVar;
  126. }
  127. Object.entries(vars).forEach(([name, value]) => {
  128. const regex = new RegExp(`{{${name}}}`, "g");
  129. output = output.replace(regex, value.toString()); // Ensure value is a string
  130. });
  131. return output;
  132. }
  133. const DEFAULT_CHAT_STATE = {
  134. sessions: [createEmptySession()],
  135. currentSessionIndex: 0,
  136. lastInput: "",
  137. };
  138. export const useChatStore = createPersistStore(
  139. DEFAULT_CHAT_STATE,
  140. (set, _get) => {
  141. function get() {
  142. return {
  143. ..._get(),
  144. ...methods,
  145. };
  146. }
  147. const methods = {
  148. forkSession() {
  149. // 获取当前会话
  150. const currentSession = get().currentSession();
  151. if (!currentSession) return;
  152. const newSession = createEmptySession();
  153. newSession.topic = currentSession.topic;
  154. newSession.messages = [...currentSession.messages];
  155. newSession.mask = {
  156. ...currentSession.mask,
  157. modelConfig: {
  158. ...currentSession.mask.modelConfig,
  159. },
  160. };
  161. set((state) => ({
  162. currentSessionIndex: 0,
  163. sessions: [newSession, ...state.sessions],
  164. }));
  165. },
  166. clearSessions() {
  167. set(() => ({
  168. sessions: [createEmptySession()],
  169. currentSessionIndex: 0,
  170. }));
  171. },
  172. selectSession(index: number) {
  173. set({
  174. currentSessionIndex: index,
  175. });
  176. },
  177. moveSession(from: number, to: number) {
  178. set((state) => {
  179. const { sessions, currentSessionIndex: oldIndex } = state;
  180. // move the session
  181. const newSessions = [...sessions];
  182. const session = newSessions[from];
  183. newSessions.splice(from, 1);
  184. newSessions.splice(to, 0, session);
  185. // modify current session id
  186. let newIndex = oldIndex === from ? to : oldIndex;
  187. if (oldIndex > from && oldIndex <= to) {
  188. newIndex -= 1;
  189. } else if (oldIndex < from && oldIndex >= to) {
  190. newIndex += 1;
  191. }
  192. return {
  193. currentSessionIndex: newIndex,
  194. sessions: newSessions,
  195. };
  196. });
  197. },
  198. newSession(mask?: Mask) {
  199. const session = createEmptySession();
  200. if (mask) {
  201. const config = useAppConfig.getState();
  202. const globalModelConfig = config.modelConfig;
  203. session.mask = {
  204. ...mask,
  205. modelConfig: {
  206. ...globalModelConfig,
  207. ...mask.modelConfig,
  208. },
  209. };
  210. session.topic = mask.name;
  211. }
  212. set((state) => ({
  213. currentSessionIndex: 0,
  214. sessions: [session].concat(state.sessions),
  215. }));
  216. },
  217. nextSession(delta: number) {
  218. const n = get().sessions.length;
  219. const limit = (x: number) => (x + n) % n;
  220. const i = get().currentSessionIndex;
  221. get().selectSession(limit(i + delta));
  222. },
  223. deleteSession(index: number) {
  224. const deletingLastSession = get().sessions.length === 1;
  225. const deletedSession = get().sessions.at(index);
  226. if (!deletedSession) return;
  227. const sessions = get().sessions.slice();
  228. sessions.splice(index, 1);
  229. const currentIndex = get().currentSessionIndex;
  230. let nextIndex = Math.min(
  231. currentIndex - Number(index < currentIndex),
  232. sessions.length - 1,
  233. );
  234. if (deletingLastSession) {
  235. nextIndex = 0;
  236. sessions.push(createEmptySession());
  237. }
  238. // for undo delete action
  239. const restoreState = {
  240. currentSessionIndex: get().currentSessionIndex,
  241. sessions: get().sessions.slice(),
  242. };
  243. set(() => ({
  244. currentSessionIndex: nextIndex,
  245. sessions,
  246. }));
  247. showToast(
  248. Locale.Home.DeleteToast,
  249. {
  250. text: Locale.Home.Revert,
  251. onClick() {
  252. set(() => restoreState);
  253. },
  254. },
  255. 5000,
  256. );
  257. },
  258. currentSession() {
  259. let index = get().currentSessionIndex;
  260. const sessions = get().sessions;
  261. if (index < 0 || index >= sessions.length) {
  262. index = Math.min(sessions.length - 1, Math.max(0, index));
  263. set(() => ({ currentSessionIndex: index }));
  264. }
  265. const session = sessions[index];
  266. return session;
  267. },
  268. onNewMessage(message: ChatMessage) {
  269. get().updateCurrentSession((session) => {
  270. session.messages = session.messages.concat();
  271. session.lastUpdate = Date.now();
  272. });
  273. get().updateStat(message);
  274. get().summarizeSession();
  275. },
  276. async onUserInput(content: string, attachImages?: string[]) {
  277. const session = get().currentSession();
  278. const modelConfig = session.mask.modelConfig;
  279. const userContent = fillTemplateWith(content, modelConfig);
  280. console.log("[User Input] after template: ", userContent);
  281. let mContent: string | MultimodalContent[] = userContent;
  282. if (attachImages && attachImages.length > 0) {
  283. mContent = [
  284. ...(userContent
  285. ? [{ type: "text" as const, text: userContent }]
  286. : []),
  287. ...attachImages.map((url) => ({
  288. type: "image_url" as const,
  289. image_url: { url },
  290. })),
  291. ];
  292. }
  293. let userMessage: ChatMessage = createMessage({
  294. role: "user",
  295. content: mContent,
  296. });
  297. const botMessage: ChatMessage = createMessage({
  298. role: "assistant",
  299. streaming: true,
  300. model: modelConfig.model,
  301. });
  302. // get recent messages
  303. const recentMessages = get().getMessagesWithMemory();
  304. const sendMessages = recentMessages.concat(userMessage);
  305. const messageIndex = get().currentSession().messages.length + 1;
  306. // save user's and bot's message
  307. get().updateCurrentSession((session) => {
  308. const savedUserMessage = {
  309. ...userMessage,
  310. content: mContent,
  311. };
  312. session.messages = session.messages.concat([
  313. savedUserMessage,
  314. botMessage,
  315. ]);
  316. });
  317. const api: ClientApi = getClientApi(modelConfig.providerName);
  318. // make request
  319. api.llm.chat({
  320. messages: sendMessages,
  321. config: { ...modelConfig, stream: true },
  322. onUpdate(message) {
  323. botMessage.streaming = true;
  324. if (message) {
  325. botMessage.content = message;
  326. }
  327. get().updateCurrentSession((session) => {
  328. session.messages = session.messages.concat();
  329. });
  330. },
  331. onFinish(message) {
  332. botMessage.streaming = false;
  333. if (message) {
  334. botMessage.content = message;
  335. get().onNewMessage(botMessage);
  336. }
  337. ChatControllerPool.remove(session.id, botMessage.id);
  338. },
  339. onBeforeTool(tool: ChatMessageTool) {
  340. (botMessage.tools = botMessage?.tools || []).push(tool);
  341. get().updateCurrentSession((session) => {
  342. session.messages = session.messages.concat();
  343. });
  344. },
  345. onAfterTool(tool: ChatMessageTool) {
  346. botMessage?.tools?.forEach((t, i, tools) => {
  347. if (tool.id == t.id) {
  348. tools[i] = { ...tool };
  349. }
  350. });
  351. get().updateCurrentSession((session) => {
  352. session.messages = session.messages.concat();
  353. });
  354. },
  355. onError(error) {
  356. const isAborted = error.message?.includes?.("aborted");
  357. botMessage.content +=
  358. "\n\n" +
  359. prettyObject({
  360. error: true,
  361. message: error.message,
  362. });
  363. botMessage.streaming = false;
  364. userMessage.isError = !isAborted;
  365. botMessage.isError = !isAborted;
  366. get().updateCurrentSession((session) => {
  367. session.messages = session.messages.concat();
  368. });
  369. ChatControllerPool.remove(
  370. session.id,
  371. botMessage.id ?? messageIndex,
  372. );
  373. console.error("[Chat] failed ", error);
  374. },
  375. onController(controller) {
  376. // collect controller for stop/retry
  377. ChatControllerPool.addController(
  378. session.id,
  379. botMessage.id ?? messageIndex,
  380. controller,
  381. );
  382. },
  383. });
  384. },
  385. getMemoryPrompt() {
  386. const session = get().currentSession();
  387. if (session.memoryPrompt.length) {
  388. return {
  389. role: "system",
  390. content: Locale.Store.Prompt.History(session.memoryPrompt),
  391. date: "",
  392. } as ChatMessage;
  393. }
  394. },
  395. getMessagesWithMemory() {
  396. const session = get().currentSession();
  397. const modelConfig = session.mask.modelConfig;
  398. const clearContextIndex = session.clearContextIndex ?? 0;
  399. const messages = session.messages.slice();
  400. const totalMessageCount = session.messages.length;
  401. // in-context prompts
  402. const contextPrompts = session.mask.context.slice();
  403. // system prompts, to get close to OpenAI Web ChatGPT
  404. const shouldInjectSystemPrompts =
  405. modelConfig.enableInjectSystemPrompts &&
  406. (session.mask.modelConfig.model.startsWith("gpt-") ||
  407. session.mask.modelConfig.model.startsWith("chatgpt-"));
  408. var systemPrompts: ChatMessage[] = [];
  409. systemPrompts = shouldInjectSystemPrompts
  410. ? [
  411. createMessage({
  412. role: "system",
  413. content: fillTemplateWith("", {
  414. ...modelConfig,
  415. template: DEFAULT_SYSTEM_TEMPLATE,
  416. }),
  417. }),
  418. ]
  419. : [];
  420. if (shouldInjectSystemPrompts) {
  421. console.log(
  422. "[Global System Prompt] ",
  423. systemPrompts.at(0)?.content ?? "empty",
  424. );
  425. }
  426. const memoryPrompt = get().getMemoryPrompt();
  427. // long term memory
  428. const shouldSendLongTermMemory =
  429. modelConfig.sendMemory &&
  430. session.memoryPrompt &&
  431. session.memoryPrompt.length > 0 &&
  432. session.lastSummarizeIndex > clearContextIndex;
  433. const longTermMemoryPrompts =
  434. shouldSendLongTermMemory && memoryPrompt ? [memoryPrompt] : [];
  435. const longTermMemoryStartIndex = session.lastSummarizeIndex;
  436. // short term memory
  437. const shortTermMemoryStartIndex = Math.max(
  438. 0,
  439. totalMessageCount - modelConfig.historyMessageCount,
  440. );
  441. // lets concat send messages, including 4 parts:
  442. // 0. system prompt: to get close to OpenAI Web ChatGPT
  443. // 1. long term memory: summarized memory messages
  444. // 2. pre-defined in-context prompts
  445. // 3. short term memory: latest n messages
  446. // 4. newest input message
  447. const memoryStartIndex = shouldSendLongTermMemory
  448. ? Math.min(longTermMemoryStartIndex, shortTermMemoryStartIndex)
  449. : shortTermMemoryStartIndex;
  450. // and if user has cleared history messages, we should exclude the memory too.
  451. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
  452. const maxTokenThreshold = modelConfig.max_tokens;
  453. // get recent messages as much as possible
  454. const reversedRecentMessages = [];
  455. for (
  456. let i = totalMessageCount - 1, tokenCount = 0;
  457. i >= contextStartIndex && tokenCount < maxTokenThreshold;
  458. i -= 1
  459. ) {
  460. const msg = messages[i];
  461. if (!msg || msg.isError) continue;
  462. tokenCount += estimateTokenLength(getMessageTextContent(msg));
  463. reversedRecentMessages.push(msg);
  464. }
  465. // concat all messages
  466. const recentMessages = [
  467. ...systemPrompts,
  468. ...longTermMemoryPrompts,
  469. ...contextPrompts,
  470. ...reversedRecentMessages.reverse(),
  471. ];
  472. return recentMessages;
  473. },
  474. updateMessage(
  475. sessionIndex: number,
  476. messageIndex: number,
  477. updater: (message?: ChatMessage) => void,
  478. ) {
  479. const sessions = get().sessions;
  480. const session = sessions.at(sessionIndex);
  481. const messages = session?.messages;
  482. updater(messages?.at(messageIndex));
  483. set(() => ({ sessions }));
  484. },
  485. resetSession() {
  486. get().updateCurrentSession((session) => {
  487. session.messages = [];
  488. session.memoryPrompt = "";
  489. });
  490. },
  491. summarizeSession(refreshTitle: boolean = false) {
  492. const config = useAppConfig.getState();
  493. const session = get().currentSession();
  494. const modelConfig = session.mask.modelConfig;
  495. // skip summarize when using dalle3?
  496. if (isDalle3(modelConfig.model)) {
  497. return;
  498. }
  499. const providerName = modelConfig.compressProviderName;
  500. const api: ClientApi = getClientApi(providerName);
  501. // remove error messages if any
  502. const messages = session.messages;
  503. // should summarize topic after chating more than 50 words
  504. const SUMMARIZE_MIN_LEN = 50;
  505. if (
  506. (config.enableAutoGenerateTitle &&
  507. session.topic === DEFAULT_TOPIC &&
  508. countMessages(messages) >= SUMMARIZE_MIN_LEN) ||
  509. refreshTitle
  510. ) {
  511. const startIndex = Math.max(
  512. 0,
  513. messages.length - modelConfig.historyMessageCount,
  514. );
  515. const topicMessages = messages
  516. .slice(
  517. startIndex < messages.length ? startIndex : messages.length - 1,
  518. messages.length,
  519. )
  520. .concat(
  521. createMessage({
  522. role: "user",
  523. content: Locale.Store.Prompt.Topic,
  524. }),
  525. );
  526. api.llm.chat({
  527. messages: topicMessages,
  528. config: {
  529. model: modelConfig.compressModel,
  530. stream: false,
  531. providerName,
  532. },
  533. onFinish(message) {
  534. if (!isValidMessage(message)) return;
  535. get().updateCurrentSession(
  536. (session) =>
  537. (session.topic =
  538. message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
  539. );
  540. },
  541. });
  542. }
  543. const summarizeIndex = Math.max(
  544. session.lastSummarizeIndex,
  545. session.clearContextIndex ?? 0,
  546. );
  547. let toBeSummarizedMsgs = messages
  548. .filter((msg) => !msg.isError)
  549. .slice(summarizeIndex);
  550. const historyMsgLength = countMessages(toBeSummarizedMsgs);
  551. if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
  552. const n = toBeSummarizedMsgs.length;
  553. toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
  554. Math.max(0, n - modelConfig.historyMessageCount),
  555. );
  556. }
  557. const memoryPrompt = get().getMemoryPrompt();
  558. if (memoryPrompt) {
  559. // add memory prompt
  560. toBeSummarizedMsgs.unshift(memoryPrompt);
  561. }
  562. const lastSummarizeIndex = session.messages.length;
  563. console.log(
  564. "[Chat History] ",
  565. toBeSummarizedMsgs,
  566. historyMsgLength,
  567. modelConfig.compressMessageLengthThreshold,
  568. );
  569. if (
  570. historyMsgLength > modelConfig.compressMessageLengthThreshold &&
  571. modelConfig.sendMemory
  572. ) {
  573. /** Destruct max_tokens while summarizing
  574. * this param is just shit
  575. **/
  576. const { max_tokens, ...modelcfg } = modelConfig;
  577. api.llm.chat({
  578. messages: toBeSummarizedMsgs.concat(
  579. createMessage({
  580. role: "system",
  581. content: Locale.Store.Prompt.Summarize,
  582. date: "",
  583. }),
  584. ),
  585. config: {
  586. ...modelcfg,
  587. stream: true,
  588. model: modelConfig.compressModel,
  589. },
  590. onUpdate(message) {
  591. session.memoryPrompt = message;
  592. },
  593. onFinish(message) {
  594. console.log("[Memory] ", message);
  595. get().updateCurrentSession((session) => {
  596. session.lastSummarizeIndex = lastSummarizeIndex;
  597. session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
  598. });
  599. },
  600. onError(err) {
  601. console.error("[Summarize] ", err);
  602. },
  603. });
  604. }
  605. function isValidMessage(message: any): boolean {
  606. return typeof message === "string" && !message.startsWith("```json");
  607. }
  608. },
  609. updateStat(message: ChatMessage) {
  610. get().updateCurrentSession((session) => {
  611. session.stat.charCount += message.content.length;
  612. // TODO: should update chat count and word count
  613. });
  614. },
  615. updateCurrentSession(updater: (session: ChatSession) => void) {
  616. const sessions = get().sessions;
  617. const index = get().currentSessionIndex;
  618. updater(sessions[index]);
  619. set(() => ({ sessions }));
  620. },
  621. async clearAllData() {
  622. await indexedDBStorage.clear();
  623. localStorage.clear();
  624. location.reload();
  625. },
  626. setLastInput(lastInput: string) {
  627. set({
  628. lastInput,
  629. });
  630. },
  631. };
  632. return methods;
  633. },
  634. {
  635. name: StoreKey.Chat,
  636. version: 3.2,
  637. migrate(persistedState, version) {
  638. const state = persistedState as any;
  639. const newState = JSON.parse(
  640. JSON.stringify(state),
  641. ) as typeof DEFAULT_CHAT_STATE;
  642. if (version < 2) {
  643. newState.sessions = [];
  644. const oldSessions = state.sessions;
  645. for (const oldSession of oldSessions) {
  646. const newSession = createEmptySession();
  647. newSession.topic = oldSession.topic;
  648. newSession.messages = [...oldSession.messages];
  649. newSession.mask.modelConfig.sendMemory = true;
  650. newSession.mask.modelConfig.historyMessageCount = 4;
  651. newSession.mask.modelConfig.compressMessageLengthThreshold = 1000;
  652. newState.sessions.push(newSession);
  653. }
  654. }
  655. if (version < 3) {
  656. // migrate id to nanoid
  657. newState.sessions.forEach((s) => {
  658. s.id = nanoid();
  659. s.messages.forEach((m) => (m.id = nanoid()));
  660. });
  661. }
  662. // Enable `enableInjectSystemPrompts` attribute for old sessions.
  663. // Resolve issue of old sessions not automatically enabling.
  664. if (version < 3.1) {
  665. newState.sessions.forEach((s) => {
  666. if (
  667. // Exclude those already set by user
  668. !s.mask.modelConfig.hasOwnProperty("enableInjectSystemPrompts")
  669. ) {
  670. // Because users may have changed this configuration,
  671. // the user's current configuration is used instead of the default
  672. const config = useAppConfig.getState();
  673. s.mask.modelConfig.enableInjectSystemPrompts =
  674. config.modelConfig.enableInjectSystemPrompts;
  675. }
  676. });
  677. }
  678. // add default summarize model for every session
  679. if (version < 3.2) {
  680. newState.sessions.forEach((s) => {
  681. const config = useAppConfig.getState();
  682. s.mask.modelConfig.compressModel = config.modelConfig.compressModel;
  683. s.mask.modelConfig.compressProviderName =
  684. config.modelConfig.compressProviderName;
  685. });
  686. }
  687. return newState as any;
  688. },
  689. },
  690. );