chat.ts 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. import { trimTopic, getMessageTextContent } from "../utils";
  2. import Locale, { getLang } from "../locales";
  3. import { showToast } from "../components/ui-lib";
  4. import { ModelConfig, ModelType, useAppConfig } from "./config";
  5. import { createEmptyMask, Mask } from "./mask";
  6. import {
  7. DEFAULT_INPUT_TEMPLATE,
  8. DEFAULT_MODELS,
  9. DEFAULT_SYSTEM_TEMPLATE,
  10. KnowledgeCutOffDate,
  11. StoreKey,
  12. SUMMARIZE_MODEL,
  13. GEMINI_SUMMARIZE_MODEL,
  14. } from "../constant";
  15. import { getClientApi } from "../client/api";
  16. import type {
  17. ClientApi,
  18. RequestMessage,
  19. MultimodalContent,
  20. } from "../client/api";
  21. import { ChatControllerPool } from "../client/controller";
  22. import { prettyObject } from "../utils/format";
  23. import { estimateTokenLength } from "../utils/token";
  24. import { nanoid } from "nanoid";
  25. import { createPersistStore } from "../utils/store";
  26. import { collectModelsWithDefaultModel } from "../utils/model";
  27. import { useAccessStore } from "./access";
  28. import { isDalle3 } from "../utils";
  29. import { indexedDBStorage } from "@/app/utils/indexedDB-storage";
  30. export type ChatMessageTool = {
  31. id: string;
  32. index?: number;
  33. type?: string;
  34. function?: {
  35. name: string;
  36. arguments?: string;
  37. };
  38. content?: string;
  39. isError?: boolean;
  40. };
  41. export type ChatMessage = RequestMessage & {
  42. date: string;
  43. streaming?: boolean;
  44. isError?: boolean;
  45. id: string;
  46. model?: ModelType;
  47. tools?: ChatMessageTool[];
  48. };
  49. export function createMessage(override: Partial<ChatMessage>): ChatMessage {
  50. return {
  51. id: nanoid(),
  52. date: new Date().toLocaleString(),
  53. role: "user",
  54. content: "",
  55. ...override,
  56. };
  57. }
  58. export interface ChatStat {
  59. tokenCount: number;
  60. wordCount: number;
  61. charCount: number;
  62. }
  63. export interface ChatSession {
  64. id: string;
  65. topic: string;
  66. memoryPrompt: string;
  67. messages: ChatMessage[];
  68. stat: ChatStat;
  69. lastUpdate: number;
  70. lastSummarizeIndex: number;
  71. clearContextIndex?: number;
  72. mask: Mask;
  73. }
  74. export const DEFAULT_TOPIC = Locale.Store.DefaultTopic;
  75. export const BOT_HELLO: ChatMessage = createMessage({
  76. role: "assistant",
  77. content: Locale.Store.BotHello,
  78. });
  79. function createEmptySession(): ChatSession {
  80. return {
  81. id: nanoid(),
  82. topic: DEFAULT_TOPIC,
  83. memoryPrompt: "",
  84. messages: [],
  85. stat: {
  86. tokenCount: 0,
  87. wordCount: 0,
  88. charCount: 0,
  89. },
  90. lastUpdate: Date.now(),
  91. lastSummarizeIndex: 0,
  92. mask: createEmptyMask(),
  93. };
  94. }
  95. function getSummarizeModel(currentModel: string) {
  96. // if it is using gpt-* models, force to use 4o-mini to summarize
  97. if (currentModel.startsWith("gpt")) {
  98. const configStore = useAppConfig.getState();
  99. const accessStore = useAccessStore.getState();
  100. const allModel = collectModelsWithDefaultModel(
  101. configStore.models,
  102. [configStore.customModels, accessStore.customModels].join(","),
  103. accessStore.defaultModel,
  104. );
  105. const summarizeModel = allModel.find(
  106. (m) => m.name === SUMMARIZE_MODEL && m.available,
  107. );
  108. return summarizeModel?.name ?? currentModel;
  109. }
  110. if (currentModel.startsWith("gemini")) {
  111. return GEMINI_SUMMARIZE_MODEL;
  112. }
  113. return currentModel;
  114. }
  115. function countMessages(msgs: ChatMessage[]) {
  116. return msgs.reduce(
  117. (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
  118. 0,
  119. );
  120. }
  121. function fillTemplateWith(input: string, modelConfig: ModelConfig) {
  122. const cutoff =
  123. KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
  124. // Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
  125. const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
  126. var serviceProvider = "OpenAI";
  127. if (modelInfo) {
  128. // TODO: auto detect the providerName from the modelConfig.model
  129. // Directly use the providerName from the modelInfo
  130. serviceProvider = modelInfo.provider.providerName;
  131. }
  132. const vars = {
  133. ServiceProvider: serviceProvider,
  134. cutoff,
  135. model: modelConfig.model,
  136. time: new Date().toString(),
  137. lang: getLang(),
  138. input: input,
  139. };
  140. let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE;
  141. // remove duplicate
  142. if (input.startsWith(output)) {
  143. output = "";
  144. }
  145. // must contains {{input}}
  146. const inputVar = "{{input}}";
  147. if (!output.includes(inputVar)) {
  148. output += "\n" + inputVar;
  149. }
  150. Object.entries(vars).forEach(([name, value]) => {
  151. const regex = new RegExp(`{{${name}}}`, "g");
  152. output = output.replace(regex, value.toString()); // Ensure value is a string
  153. });
  154. return output;
  155. }
  156. const DEFAULT_CHAT_STATE = {
  157. sessions: [createEmptySession()],
  158. currentSessionIndex: 0,
  159. };
  160. export const useChatStore = createPersistStore(
  161. DEFAULT_CHAT_STATE,
  162. (set, _get) => {
  163. function get() {
  164. return {
  165. ..._get(),
  166. ...methods,
  167. };
  168. }
  169. const methods = {
  170. clearSessions() {
  171. set(() => ({
  172. sessions: [createEmptySession()],
  173. currentSessionIndex: 0,
  174. }));
  175. },
  176. selectSession(index: number) {
  177. set({
  178. currentSessionIndex: index,
  179. });
  180. },
  181. moveSession(from: number, to: number) {
  182. set((state) => {
  183. const { sessions, currentSessionIndex: oldIndex } = state;
  184. // move the session
  185. const newSessions = [...sessions];
  186. const session = newSessions[from];
  187. newSessions.splice(from, 1);
  188. newSessions.splice(to, 0, session);
  189. // modify current session id
  190. let newIndex = oldIndex === from ? to : oldIndex;
  191. if (oldIndex > from && oldIndex <= to) {
  192. newIndex -= 1;
  193. } else if (oldIndex < from && oldIndex >= to) {
  194. newIndex += 1;
  195. }
  196. return {
  197. currentSessionIndex: newIndex,
  198. sessions: newSessions,
  199. };
  200. });
  201. },
  202. newSession(mask?: Mask) {
  203. const session = createEmptySession();
  204. if (mask) {
  205. const config = useAppConfig.getState();
  206. const globalModelConfig = config.modelConfig;
  207. session.mask = {
  208. ...mask,
  209. modelConfig: {
  210. ...globalModelConfig,
  211. ...mask.modelConfig,
  212. },
  213. };
  214. session.topic = mask.name;
  215. }
  216. set((state) => ({
  217. currentSessionIndex: 0,
  218. sessions: [session].concat(state.sessions),
  219. }));
  220. },
  221. nextSession(delta: number) {
  222. const n = get().sessions.length;
  223. const limit = (x: number) => (x + n) % n;
  224. const i = get().currentSessionIndex;
  225. get().selectSession(limit(i + delta));
  226. },
  227. deleteSession(index: number) {
  228. const deletingLastSession = get().sessions.length === 1;
  229. const deletedSession = get().sessions.at(index);
  230. if (!deletedSession) return;
  231. const sessions = get().sessions.slice();
  232. sessions.splice(index, 1);
  233. const currentIndex = get().currentSessionIndex;
  234. let nextIndex = Math.min(
  235. currentIndex - Number(index < currentIndex),
  236. sessions.length - 1,
  237. );
  238. if (deletingLastSession) {
  239. nextIndex = 0;
  240. sessions.push(createEmptySession());
  241. }
  242. // for undo delete action
  243. const restoreState = {
  244. currentSessionIndex: get().currentSessionIndex,
  245. sessions: get().sessions.slice(),
  246. };
  247. set(() => ({
  248. currentSessionIndex: nextIndex,
  249. sessions,
  250. }));
  251. showToast(
  252. Locale.Home.DeleteToast,
  253. {
  254. text: Locale.Home.Revert,
  255. onClick() {
  256. set(() => restoreState);
  257. },
  258. },
  259. 5000,
  260. );
  261. },
  262. currentSession() {
  263. let index = get().currentSessionIndex;
  264. const sessions = get().sessions;
  265. if (index < 0 || index >= sessions.length) {
  266. index = Math.min(sessions.length - 1, Math.max(0, index));
  267. set(() => ({ currentSessionIndex: index }));
  268. }
  269. const session = sessions[index];
  270. return session;
  271. },
  272. onNewMessage(message: ChatMessage) {
  273. get().updateCurrentSession((session) => {
  274. session.messages = session.messages.concat();
  275. session.lastUpdate = Date.now();
  276. });
  277. get().updateStat(message);
  278. get().summarizeSession();
  279. },
  280. async onUserInput(content: string, attachImages?: string[]) {
  281. const session = get().currentSession();
  282. const modelConfig = session.mask.modelConfig;
  283. const userContent = fillTemplateWith(content, modelConfig);
  284. console.log("[User Input] after template: ", userContent);
  285. let mContent: string | MultimodalContent[] = userContent;
  286. if (attachImages && attachImages.length > 0) {
  287. mContent = [
  288. {
  289. type: "text",
  290. text: userContent,
  291. },
  292. ];
  293. mContent = mContent.concat(
  294. attachImages.map((url) => {
  295. return {
  296. type: "image_url",
  297. image_url: {
  298. url: url,
  299. },
  300. };
  301. }),
  302. );
  303. }
  304. let userMessage: ChatMessage = createMessage({
  305. role: "user",
  306. content: mContent,
  307. });
  308. const botMessage: ChatMessage = createMessage({
  309. role: "assistant",
  310. streaming: true,
  311. model: modelConfig.model,
  312. });
  313. // get recent messages
  314. const recentMessages = get().getMessagesWithMemory();
  315. const sendMessages = recentMessages.concat(userMessage);
  316. const messageIndex = get().currentSession().messages.length + 1;
  317. // save user's and bot's message
  318. get().updateCurrentSession((session) => {
  319. const savedUserMessage = {
  320. ...userMessage,
  321. content: mContent,
  322. };
  323. session.messages = session.messages.concat([
  324. savedUserMessage,
  325. botMessage,
  326. ]);
  327. });
  328. const api: ClientApi = getClientApi(modelConfig.providerName);
  329. // make request
  330. api.llm.chat({
  331. messages: sendMessages,
  332. config: { ...modelConfig, stream: true },
  333. onUpdate(message) {
  334. botMessage.streaming = true;
  335. if (message) {
  336. botMessage.content = message;
  337. }
  338. get().updateCurrentSession((session) => {
  339. session.messages = session.messages.concat();
  340. });
  341. },
  342. onFinish(message) {
  343. botMessage.streaming = false;
  344. if (message) {
  345. botMessage.content = message;
  346. get().onNewMessage(botMessage);
  347. }
  348. ChatControllerPool.remove(session.id, botMessage.id);
  349. },
  350. onBeforeTool(tool: ChatMessageTool) {
  351. (botMessage.tools = botMessage?.tools || []).push(tool);
  352. get().updateCurrentSession((session) => {
  353. session.messages = session.messages.concat();
  354. });
  355. },
  356. onAfterTool(tool: ChatMessageTool) {
  357. botMessage?.tools?.forEach((t, i, tools) => {
  358. if (tool.id == t.id) {
  359. tools[i] = { ...tool };
  360. }
  361. });
  362. get().updateCurrentSession((session) => {
  363. session.messages = session.messages.concat();
  364. });
  365. },
  366. onError(error) {
  367. const isAborted = error.message?.includes?.("aborted");
  368. botMessage.content +=
  369. "\n\n" +
  370. prettyObject({
  371. error: true,
  372. message: error.message,
  373. });
  374. botMessage.streaming = false;
  375. userMessage.isError = !isAborted;
  376. botMessage.isError = !isAborted;
  377. get().updateCurrentSession((session) => {
  378. session.messages = session.messages.concat();
  379. });
  380. ChatControllerPool.remove(
  381. session.id,
  382. botMessage.id ?? messageIndex,
  383. );
  384. console.error("[Chat] failed ", error);
  385. },
  386. onController(controller) {
  387. // collect controller for stop/retry
  388. ChatControllerPool.addController(
  389. session.id,
  390. botMessage.id ?? messageIndex,
  391. controller,
  392. );
  393. },
  394. });
  395. },
  396. getMemoryPrompt() {
  397. const session = get().currentSession();
  398. if (session.memoryPrompt.length) {
  399. return {
  400. role: "system",
  401. content: Locale.Store.Prompt.History(session.memoryPrompt),
  402. date: "",
  403. } as ChatMessage;
  404. }
  405. },
  406. getMessagesWithMemory() {
  407. const session = get().currentSession();
  408. const modelConfig = session.mask.modelConfig;
  409. const clearContextIndex = session.clearContextIndex ?? 0;
  410. const messages = session.messages.slice();
  411. const totalMessageCount = session.messages.length;
  412. // in-context prompts
  413. const contextPrompts = session.mask.context.slice();
  414. // system prompts, to get close to OpenAI Web ChatGPT
  415. const shouldInjectSystemPrompts =
  416. modelConfig.enableInjectSystemPrompts &&
  417. session.mask.modelConfig.model.startsWith("gpt-");
  418. var systemPrompts: ChatMessage[] = [];
  419. systemPrompts = shouldInjectSystemPrompts
  420. ? [
  421. createMessage({
  422. role: "system",
  423. content: fillTemplateWith("", {
  424. ...modelConfig,
  425. template: DEFAULT_SYSTEM_TEMPLATE,
  426. }),
  427. }),
  428. ]
  429. : [];
  430. if (shouldInjectSystemPrompts) {
  431. console.log(
  432. "[Global System Prompt] ",
  433. systemPrompts.at(0)?.content ?? "empty",
  434. );
  435. }
  436. const memoryPrompt = get().getMemoryPrompt();
  437. // long term memory
  438. const shouldSendLongTermMemory =
  439. modelConfig.sendMemory &&
  440. session.memoryPrompt &&
  441. session.memoryPrompt.length > 0 &&
  442. session.lastSummarizeIndex > clearContextIndex;
  443. const longTermMemoryPrompts =
  444. shouldSendLongTermMemory && memoryPrompt ? [memoryPrompt] : [];
  445. const longTermMemoryStartIndex = session.lastSummarizeIndex;
  446. // short term memory
  447. const shortTermMemoryStartIndex = Math.max(
  448. 0,
  449. totalMessageCount - modelConfig.historyMessageCount,
  450. );
  451. // lets concat send messages, including 4 parts:
  452. // 0. system prompt: to get close to OpenAI Web ChatGPT
  453. // 1. long term memory: summarized memory messages
  454. // 2. pre-defined in-context prompts
  455. // 3. short term memory: latest n messages
  456. // 4. newest input message
  457. const memoryStartIndex = shouldSendLongTermMemory
  458. ? Math.min(longTermMemoryStartIndex, shortTermMemoryStartIndex)
  459. : shortTermMemoryStartIndex;
  460. // and if user has cleared history messages, we should exclude the memory too.
  461. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
  462. const maxTokenThreshold = modelConfig.max_tokens;
  463. // get recent messages as much as possible
  464. const reversedRecentMessages = [];
  465. for (
  466. let i = totalMessageCount - 1, tokenCount = 0;
  467. i >= contextStartIndex && tokenCount < maxTokenThreshold;
  468. i -= 1
  469. ) {
  470. const msg = messages[i];
  471. if (!msg || msg.isError) continue;
  472. tokenCount += estimateTokenLength(getMessageTextContent(msg));
  473. reversedRecentMessages.push(msg);
  474. }
  475. // concat all messages
  476. const recentMessages = [
  477. ...systemPrompts,
  478. ...longTermMemoryPrompts,
  479. ...contextPrompts,
  480. ...reversedRecentMessages.reverse(),
  481. ];
  482. return recentMessages;
  483. },
  484. updateMessage(
  485. sessionIndex: number,
  486. messageIndex: number,
  487. updater: (message?: ChatMessage) => void,
  488. ) {
  489. const sessions = get().sessions;
  490. const session = sessions.at(sessionIndex);
  491. const messages = session?.messages;
  492. updater(messages?.at(messageIndex));
  493. set(() => ({ sessions }));
  494. },
  495. resetSession() {
  496. get().updateCurrentSession((session) => {
  497. session.messages = [];
  498. session.memoryPrompt = "";
  499. });
  500. },
  501. summarizeSession() {
  502. const config = useAppConfig.getState();
  503. const session = get().currentSession();
  504. const modelConfig = session.mask.modelConfig;
  505. // skip summarize when using dalle3?
  506. if (isDalle3(modelConfig.model)) {
  507. return;
  508. }
  509. const providerName = modelConfig.providerName;
  510. const api: ClientApi = getClientApi(providerName);
  511. // remove error messages if any
  512. const messages = session.messages;
  513. // should summarize topic after chating more than 50 words
  514. const SUMMARIZE_MIN_LEN = 50;
  515. if (
  516. config.enableAutoGenerateTitle &&
  517. session.topic === DEFAULT_TOPIC &&
  518. countMessages(messages) >= SUMMARIZE_MIN_LEN
  519. ) {
  520. const topicMessages = messages.concat(
  521. createMessage({
  522. role: "user",
  523. content: Locale.Store.Prompt.Topic,
  524. }),
  525. );
  526. api.llm.chat({
  527. messages: topicMessages,
  528. config: {
  529. model: getSummarizeModel(session.mask.modelConfig.model),
  530. stream: false,
  531. providerName,
  532. },
  533. onFinish(message) {
  534. get().updateCurrentSession(
  535. (session) =>
  536. (session.topic =
  537. message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
  538. );
  539. },
  540. });
  541. }
  542. const summarizeIndex = Math.max(
  543. session.lastSummarizeIndex,
  544. session.clearContextIndex ?? 0,
  545. );
  546. let toBeSummarizedMsgs = messages
  547. .filter((msg) => !msg.isError)
  548. .slice(summarizeIndex);
  549. const historyMsgLength = countMessages(toBeSummarizedMsgs);
  550. if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
  551. const n = toBeSummarizedMsgs.length;
  552. toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
  553. Math.max(0, n - modelConfig.historyMessageCount),
  554. );
  555. }
  556. const memoryPrompt = get().getMemoryPrompt();
  557. if (memoryPrompt) {
  558. // add memory prompt
  559. toBeSummarizedMsgs.unshift(memoryPrompt);
  560. }
  561. const lastSummarizeIndex = session.messages.length;
  562. console.log(
  563. "[Chat History] ",
  564. toBeSummarizedMsgs,
  565. historyMsgLength,
  566. modelConfig.compressMessageLengthThreshold,
  567. );
  568. if (
  569. historyMsgLength > modelConfig.compressMessageLengthThreshold &&
  570. modelConfig.sendMemory
  571. ) {
  572. /** Destruct max_tokens while summarizing
  573. * this param is just shit
  574. **/
  575. const { max_tokens, ...modelcfg } = modelConfig;
  576. api.llm.chat({
  577. messages: toBeSummarizedMsgs.concat(
  578. createMessage({
  579. role: "system",
  580. content: Locale.Store.Prompt.Summarize,
  581. date: "",
  582. }),
  583. ),
  584. config: {
  585. ...modelcfg,
  586. stream: true,
  587. model: getSummarizeModel(session.mask.modelConfig.model),
  588. },
  589. onUpdate(message) {
  590. session.memoryPrompt = message;
  591. },
  592. onFinish(message) {
  593. console.log("[Memory] ", message);
  594. get().updateCurrentSession((session) => {
  595. session.lastSummarizeIndex = lastSummarizeIndex;
  596. session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
  597. });
  598. },
  599. onError(err) {
  600. console.error("[Summarize] ", err);
  601. },
  602. });
  603. }
  604. },
  605. updateStat(message: ChatMessage) {
  606. get().updateCurrentSession((session) => {
  607. session.stat.charCount += message.content.length;
  608. // TODO: should update chat count and word count
  609. });
  610. },
  611. updateCurrentSession(updater: (session: ChatSession) => void) {
  612. const sessions = get().sessions;
  613. const index = get().currentSessionIndex;
  614. updater(sessions[index]);
  615. set(() => ({ sessions }));
  616. },
  617. async clearAllData() {
  618. await indexedDBStorage.clear();
  619. localStorage.clear();
  620. location.reload();
  621. },
  622. };
  623. return methods;
  624. },
  625. {
  626. name: StoreKey.Chat,
  627. version: 3.1,
  628. migrate(persistedState, version) {
  629. const state = persistedState as any;
  630. const newState = JSON.parse(
  631. JSON.stringify(state),
  632. ) as typeof DEFAULT_CHAT_STATE;
  633. if (version < 2) {
  634. newState.sessions = [];
  635. const oldSessions = state.sessions;
  636. for (const oldSession of oldSessions) {
  637. const newSession = createEmptySession();
  638. newSession.topic = oldSession.topic;
  639. newSession.messages = [...oldSession.messages];
  640. newSession.mask.modelConfig.sendMemory = true;
  641. newSession.mask.modelConfig.historyMessageCount = 4;
  642. newSession.mask.modelConfig.compressMessageLengthThreshold = 1000;
  643. newState.sessions.push(newSession);
  644. }
  645. }
  646. if (version < 3) {
  647. // migrate id to nanoid
  648. newState.sessions.forEach((s) => {
  649. s.id = nanoid();
  650. s.messages.forEach((m) => (m.id = nanoid()));
  651. });
  652. }
  653. // Enable `enableInjectSystemPrompts` attribute for old sessions.
  654. // Resolve issue of old sessions not automatically enabling.
  655. if (version < 3.1) {
  656. newState.sessions.forEach((s) => {
  657. if (
  658. // Exclude those already set by user
  659. !s.mask.modelConfig.hasOwnProperty("enableInjectSystemPrompts")
  660. ) {
  661. // Because users may have changed this configuration,
  662. // the user's current configuration is used instead of the default
  663. const config = useAppConfig.getState();
  664. s.mask.modelConfig.enableInjectSystemPrompts =
  665. config.modelConfig.enableInjectSystemPrompts;
  666. }
  667. });
  668. }
  669. return newState as any;
  670. },
  671. },
  672. );