chat.ts 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760
  1. import { trimTopic, getMessageTextContent } from "../utils";
  2. import Locale, { getLang } from "../locales";
  3. import { showToast } from "../components/ui-lib";
  4. import { ModelConfig, ModelType, useAppConfig } from "./config";
  5. import { createEmptyMask, Mask } from "./mask";
  6. import {
  7. DEFAULT_INPUT_TEMPLATE,
  8. DEFAULT_MODELS,
  9. DEFAULT_SYSTEM_TEMPLATE,
  10. KnowledgeCutOffDate,
  11. StoreKey,
  12. SUMMARIZE_MODEL,
  13. GEMINI_SUMMARIZE_MODEL,
  14. } from "../constant";
  15. import { getClientApi } from "../client/api";
  16. import type {
  17. ClientApi,
  18. RequestMessage,
  19. MultimodalContent,
  20. } from "../client/api";
  21. import { ChatControllerPool } from "../client/controller";
  22. import { prettyObject } from "../utils/format";
  23. import { estimateTokenLength } from "../utils/token";
  24. import { nanoid } from "nanoid";
  25. import { createPersistStore } from "../utils/store";
  26. import { collectModelsWithDefaultModel } from "../utils/model";
  27. import { useAccessStore } from "./access";
  28. import { isDalle3 } from "../utils";
  29. import { indexedDBStorage } from "@/app/utils/indexedDB-storage";
  30. export type ChatMessageTool = {
  31. id: string;
  32. index?: number;
  33. type?: string;
  34. function?: {
  35. name: string;
  36. arguments?: string;
  37. };
  38. content?: string;
  39. isError?: boolean;
  40. };
  41. export type ChatMessage = RequestMessage & {
  42. date: string;
  43. streaming?: boolean;
  44. isError?: boolean;
  45. id: string;
  46. model?: ModelType;
  47. tools?: ChatMessageTool[];
  48. };
  49. export function createMessage(override: Partial<ChatMessage>): ChatMessage {
  50. return {
  51. id: nanoid(),
  52. date: new Date().toLocaleString(),
  53. role: "user",
  54. content: "",
  55. ...override,
  56. };
  57. }
  58. export interface ChatStat {
  59. tokenCount: number;
  60. wordCount: number;
  61. charCount: number;
  62. }
  63. export interface ChatSession {
  64. id: string;
  65. topic: string;
  66. memoryPrompt: string;
  67. messages: ChatMessage[];
  68. stat: ChatStat;
  69. lastUpdate: number;
  70. lastSummarizeIndex: number;
  71. clearContextIndex?: number;
  72. mask: Mask;
  73. }
  74. export const DEFAULT_TOPIC = Locale.Store.DefaultTopic;
  75. export const BOT_HELLO: ChatMessage = createMessage({
  76. role: "assistant",
  77. content: Locale.Store.BotHello,
  78. });
  79. function createEmptySession(): ChatSession {
  80. return {
  81. id: nanoid(),
  82. topic: DEFAULT_TOPIC,
  83. memoryPrompt: "",
  84. messages: [],
  85. stat: {
  86. tokenCount: 0,
  87. wordCount: 0,
  88. charCount: 0,
  89. },
  90. lastUpdate: Date.now(),
  91. lastSummarizeIndex: 0,
  92. mask: createEmptyMask(),
  93. };
  94. }
  95. function getSummarizeModel(currentModel: string) {
  96. // if it is using gpt-* models, force to use 4o-mini to summarize
  97. if (currentModel.startsWith("gpt") || currentModel.startsWith("chatgpt")) {
  98. const configStore = useAppConfig.getState();
  99. const accessStore = useAccessStore.getState();
  100. const allModel = collectModelsWithDefaultModel(
  101. configStore.models,
  102. [configStore.customModels, accessStore.customModels].join(","),
  103. accessStore.defaultModel,
  104. );
  105. const summarizeModel = allModel.find(
  106. (m) => m.name === SUMMARIZE_MODEL && m.available,
  107. );
  108. return summarizeModel?.name ?? currentModel;
  109. }
  110. if (currentModel.startsWith("gemini")) {
  111. return GEMINI_SUMMARIZE_MODEL;
  112. }
  113. return currentModel;
  114. }
  115. function countMessages(msgs: ChatMessage[]) {
  116. return msgs.reduce(
  117. (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
  118. 0,
  119. );
  120. }
  121. function fillTemplateWith(input: string, modelConfig: ModelConfig) {
  122. const cutoff =
  123. KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
  124. // Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
  125. const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
  126. var serviceProvider = "OpenAI";
  127. if (modelInfo) {
  128. // TODO: auto detect the providerName from the modelConfig.model
  129. // Directly use the providerName from the modelInfo
  130. serviceProvider = modelInfo.provider.providerName;
  131. }
  132. const vars = {
  133. ServiceProvider: serviceProvider,
  134. cutoff,
  135. model: modelConfig.model,
  136. time: new Date().toString(),
  137. lang: getLang(),
  138. input: input,
  139. };
  140. let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE;
  141. // remove duplicate
  142. if (input.startsWith(output)) {
  143. output = "";
  144. }
  145. // must contains {{input}}
  146. const inputVar = "{{input}}";
  147. if (!output.includes(inputVar)) {
  148. output += "\n" + inputVar;
  149. }
  150. Object.entries(vars).forEach(([name, value]) => {
  151. const regex = new RegExp(`{{${name}}}`, "g");
  152. output = output.replace(regex, value.toString()); // Ensure value is a string
  153. });
  154. return output;
  155. }
  156. const DEFAULT_CHAT_STATE = {
  157. sessions: [createEmptySession()],
  158. currentSessionIndex: 0,
  159. };
  160. export const useChatStore = createPersistStore(
  161. DEFAULT_CHAT_STATE,
  162. (set, _get) => {
  163. function get() {
  164. return {
  165. ..._get(),
  166. ...methods,
  167. };
  168. }
  169. const methods = {
  170. clearSessions() {
  171. set(() => ({
  172. sessions: [createEmptySession()],
  173. currentSessionIndex: 0,
  174. }));
  175. },
  176. selectSession(index: number) {
  177. set({
  178. currentSessionIndex: index,
  179. });
  180. },
  181. moveSession(from: number, to: number) {
  182. set((state) => {
  183. const { sessions, currentSessionIndex: oldIndex } = state;
  184. // move the session
  185. const newSessions = [...sessions];
  186. const session = newSessions[from];
  187. newSessions.splice(from, 1);
  188. newSessions.splice(to, 0, session);
  189. // modify current session id
  190. let newIndex = oldIndex === from ? to : oldIndex;
  191. if (oldIndex > from && oldIndex <= to) {
  192. newIndex -= 1;
  193. } else if (oldIndex < from && oldIndex >= to) {
  194. newIndex += 1;
  195. }
  196. return {
  197. currentSessionIndex: newIndex,
  198. sessions: newSessions,
  199. };
  200. });
  201. },
  202. newSession(mask?: Mask) {
  203. const session = createEmptySession();
  204. if (mask) {
  205. const config = useAppConfig.getState();
  206. const globalModelConfig = config.modelConfig;
  207. session.mask = {
  208. ...mask,
  209. modelConfig: {
  210. ...globalModelConfig,
  211. ...mask.modelConfig,
  212. },
  213. };
  214. session.topic = mask.name;
  215. }
  216. set((state) => ({
  217. currentSessionIndex: 0,
  218. sessions: [session].concat(state.sessions),
  219. }));
  220. },
  221. nextSession(delta: number) {
  222. const n = get().sessions.length;
  223. const limit = (x: number) => (x + n) % n;
  224. const i = get().currentSessionIndex;
  225. get().selectSession(limit(i + delta));
  226. },
  227. deleteSession(index: number) {
  228. const deletingLastSession = get().sessions.length === 1;
  229. const deletedSession = get().sessions.at(index);
  230. if (!deletedSession) return;
  231. const sessions = get().sessions.slice();
  232. sessions.splice(index, 1);
  233. const currentIndex = get().currentSessionIndex;
  234. let nextIndex = Math.min(
  235. currentIndex - Number(index < currentIndex),
  236. sessions.length - 1,
  237. );
  238. if (deletingLastSession) {
  239. nextIndex = 0;
  240. sessions.push(createEmptySession());
  241. }
  242. // for undo delete action
  243. const restoreState = {
  244. currentSessionIndex: get().currentSessionIndex,
  245. sessions: get().sessions.slice(),
  246. };
  247. set(() => ({
  248. currentSessionIndex: nextIndex,
  249. sessions,
  250. }));
  251. showToast(
  252. Locale.Home.DeleteToast,
  253. {
  254. text: Locale.Home.Revert,
  255. onClick() {
  256. set(() => restoreState);
  257. },
  258. },
  259. 5000,
  260. );
  261. },
  262. currentSession() {
  263. let index = get().currentSessionIndex;
  264. const sessions = get().sessions;
  265. if (index < 0 || index >= sessions.length) {
  266. index = Math.min(sessions.length - 1, Math.max(0, index));
  267. set(() => ({ currentSessionIndex: index }));
  268. }
  269. const session = sessions[index];
  270. return session;
  271. },
  272. onNewMessage(message: ChatMessage) {
  273. get().updateCurrentSession((session) => {
  274. session.messages = session.messages.concat();
  275. session.lastUpdate = Date.now();
  276. });
  277. get().updateStat(message);
  278. get().summarizeSession();
  279. },
  280. async onUserInput(content: string, attachImages?: string[]) {
  281. const session = get().currentSession();
  282. const modelConfig = session.mask.modelConfig;
  283. const userContent = fillTemplateWith(content, modelConfig);
  284. console.log("[User Input] after template: ", userContent);
  285. let mContent: string | MultimodalContent[] = userContent;
  286. if (attachImages && attachImages.length > 0) {
  287. mContent = [
  288. {
  289. type: "text",
  290. text: userContent,
  291. },
  292. ];
  293. mContent = mContent.concat(
  294. attachImages.map((url) => {
  295. return {
  296. type: "image_url",
  297. image_url: {
  298. url: url,
  299. },
  300. };
  301. }),
  302. );
  303. }
  304. let userMessage: ChatMessage = createMessage({
  305. role: "user",
  306. content: mContent,
  307. });
  308. const botMessage: ChatMessage = createMessage({
  309. role: "assistant",
  310. streaming: true,
  311. model: modelConfig.model,
  312. });
  313. // get recent messages
  314. const recentMessages = get().getMessagesWithMemory();
  315. const sendMessages = recentMessages.concat(userMessage);
  316. const messageIndex = get().currentSession().messages.length + 1;
  317. // save user's and bot's message
  318. get().updateCurrentSession((session) => {
  319. const savedUserMessage = {
  320. ...userMessage,
  321. content: mContent,
  322. };
  323. session.messages = session.messages.concat([
  324. savedUserMessage,
  325. botMessage,
  326. ]);
  327. });
  328. const api: ClientApi = getClientApi(modelConfig.providerName);
  329. // make request
  330. api.llm.chat({
  331. messages: sendMessages,
  332. config: { ...modelConfig, stream: true },
  333. onUpdate(message) {
  334. botMessage.streaming = true;
  335. if (message) {
  336. botMessage.content = message;
  337. }
  338. get().updateCurrentSession((session) => {
  339. session.messages = session.messages.concat();
  340. });
  341. },
  342. onFinish(message) {
  343. botMessage.streaming = false;
  344. if (message) {
  345. botMessage.content = message;
  346. get().onNewMessage(botMessage);
  347. }
  348. ChatControllerPool.remove(session.id, botMessage.id);
  349. },
  350. onBeforeTool(tool: ChatMessageTool) {
  351. (botMessage.tools = botMessage?.tools || []).push(tool);
  352. get().updateCurrentSession((session) => {
  353. session.messages = session.messages.concat();
  354. });
  355. },
  356. onAfterTool(tool: ChatMessageTool) {
  357. botMessage?.tools?.forEach((t, i, tools) => {
  358. if (tool.id == t.id) {
  359. tools[i] = { ...tool };
  360. }
  361. });
  362. get().updateCurrentSession((session) => {
  363. session.messages = session.messages.concat();
  364. });
  365. },
  366. onError(error) {
  367. const isAborted = error.message?.includes?.("aborted");
  368. botMessage.content +=
  369. "\n\n" +
  370. prettyObject({
  371. error: true,
  372. message: error.message,
  373. });
  374. botMessage.streaming = false;
  375. userMessage.isError = !isAborted;
  376. botMessage.isError = !isAborted;
  377. get().updateCurrentSession((session) => {
  378. session.messages = session.messages.concat();
  379. });
  380. ChatControllerPool.remove(
  381. session.id,
  382. botMessage.id ?? messageIndex,
  383. );
  384. console.error("[Chat] failed ", error);
  385. },
  386. onController(controller) {
  387. // collect controller for stop/retry
  388. ChatControllerPool.addController(
  389. session.id,
  390. botMessage.id ?? messageIndex,
  391. controller,
  392. );
  393. },
  394. });
  395. },
  396. getMemoryPrompt() {
  397. const session = get().currentSession();
  398. if (session.memoryPrompt.length) {
  399. return {
  400. role: "system",
  401. content: Locale.Store.Prompt.History(session.memoryPrompt),
  402. date: "",
  403. } as ChatMessage;
  404. }
  405. },
  406. getMessagesWithMemory() {
  407. const session = get().currentSession();
  408. const modelConfig = session.mask.modelConfig;
  409. const clearContextIndex = session.clearContextIndex ?? 0;
  410. const messages = session.messages.slice();
  411. const totalMessageCount = session.messages.length;
  412. // in-context prompts
  413. const contextPrompts = session.mask.context.slice();
  414. // system prompts, to get close to OpenAI Web ChatGPT
  415. const shouldInjectSystemPrompts =
  416. modelConfig.enableInjectSystemPrompts &&
  417. (session.mask.modelConfig.model.startsWith("gpt-") ||
  418. session.mask.modelConfig.model.startsWith("chatgpt-"));
  419. var systemPrompts: ChatMessage[] = [];
  420. systemPrompts = shouldInjectSystemPrompts
  421. ? [
  422. createMessage({
  423. role: "system",
  424. content: fillTemplateWith("", {
  425. ...modelConfig,
  426. template: DEFAULT_SYSTEM_TEMPLATE,
  427. }),
  428. }),
  429. ]
  430. : [];
  431. if (shouldInjectSystemPrompts) {
  432. console.log(
  433. "[Global System Prompt] ",
  434. systemPrompts.at(0)?.content ?? "empty",
  435. );
  436. }
  437. const memoryPrompt = get().getMemoryPrompt();
  438. // long term memory
  439. const shouldSendLongTermMemory =
  440. modelConfig.sendMemory &&
  441. session.memoryPrompt &&
  442. session.memoryPrompt.length > 0 &&
  443. session.lastSummarizeIndex > clearContextIndex;
  444. const longTermMemoryPrompts =
  445. shouldSendLongTermMemory && memoryPrompt ? [memoryPrompt] : [];
  446. const longTermMemoryStartIndex = session.lastSummarizeIndex;
  447. // short term memory
  448. const shortTermMemoryStartIndex = Math.max(
  449. 0,
  450. totalMessageCount - modelConfig.historyMessageCount,
  451. );
  452. // lets concat send messages, including 4 parts:
  453. // 0. system prompt: to get close to OpenAI Web ChatGPT
  454. // 1. long term memory: summarized memory messages
  455. // 2. pre-defined in-context prompts
  456. // 3. short term memory: latest n messages
  457. // 4. newest input message
  458. const memoryStartIndex = shouldSendLongTermMemory
  459. ? Math.min(longTermMemoryStartIndex, shortTermMemoryStartIndex)
  460. : shortTermMemoryStartIndex;
  461. // and if user has cleared history messages, we should exclude the memory too.
  462. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
  463. const maxTokenThreshold = modelConfig.max_tokens;
  464. // get recent messages as much as possible
  465. const reversedRecentMessages = [];
  466. for (
  467. let i = totalMessageCount - 1, tokenCount = 0;
  468. i >= contextStartIndex && tokenCount < maxTokenThreshold;
  469. i -= 1
  470. ) {
  471. const msg = messages[i];
  472. if (!msg || msg.isError) continue;
  473. tokenCount += estimateTokenLength(getMessageTextContent(msg));
  474. reversedRecentMessages.push(msg);
  475. }
  476. // concat all messages
  477. const recentMessages = [
  478. ...systemPrompts,
  479. ...longTermMemoryPrompts,
  480. ...contextPrompts,
  481. ...reversedRecentMessages.reverse(),
  482. ];
  483. return recentMessages;
  484. },
  485. updateMessage(
  486. sessionIndex: number,
  487. messageIndex: number,
  488. updater: (message?: ChatMessage) => void,
  489. ) {
  490. const sessions = get().sessions;
  491. const session = sessions.at(sessionIndex);
  492. const messages = session?.messages;
  493. updater(messages?.at(messageIndex));
  494. set(() => ({ sessions }));
  495. },
  496. resetSession() {
  497. get().updateCurrentSession((session) => {
  498. session.messages = [];
  499. session.memoryPrompt = "";
  500. });
  501. },
  502. summarizeSession() {
  503. const config = useAppConfig.getState();
  504. const session = get().currentSession();
  505. const modelConfig = session.mask.modelConfig;
  506. // skip summarize when using dalle3?
  507. if (isDalle3(modelConfig.model)) {
  508. return;
  509. }
  510. const providerName = modelConfig.providerName;
  511. const api: ClientApi = getClientApi(providerName);
  512. // remove error messages if any
  513. const messages = session.messages;
  514. // should summarize topic after chating more than 50 words
  515. const SUMMARIZE_MIN_LEN = 50;
  516. if (
  517. config.enableAutoGenerateTitle &&
  518. session.topic === DEFAULT_TOPIC &&
  519. countMessages(messages) >= SUMMARIZE_MIN_LEN
  520. ) {
  521. const topicMessages = messages.concat(
  522. createMessage({
  523. role: "user",
  524. content: Locale.Store.Prompt.Topic,
  525. }),
  526. );
  527. api.llm.chat({
  528. messages: topicMessages,
  529. config: {
  530. model: getSummarizeModel(session.mask.modelConfig.model),
  531. stream: false,
  532. providerName,
  533. },
  534. onFinish(message) {
  535. get().updateCurrentSession(
  536. (session) =>
  537. (session.topic =
  538. message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
  539. );
  540. },
  541. });
  542. }
  543. const summarizeIndex = Math.max(
  544. session.lastSummarizeIndex,
  545. session.clearContextIndex ?? 0,
  546. );
  547. let toBeSummarizedMsgs = messages
  548. .filter((msg) => !msg.isError)
  549. .slice(summarizeIndex);
  550. const historyMsgLength = countMessages(toBeSummarizedMsgs);
  551. if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
  552. const n = toBeSummarizedMsgs.length;
  553. toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
  554. Math.max(0, n - modelConfig.historyMessageCount),
  555. );
  556. }
  557. const memoryPrompt = get().getMemoryPrompt();
  558. if (memoryPrompt) {
  559. // add memory prompt
  560. toBeSummarizedMsgs.unshift(memoryPrompt);
  561. }
  562. const lastSummarizeIndex = session.messages.length;
  563. console.log(
  564. "[Chat History] ",
  565. toBeSummarizedMsgs,
  566. historyMsgLength,
  567. modelConfig.compressMessageLengthThreshold,
  568. );
  569. if (
  570. historyMsgLength > modelConfig.compressMessageLengthThreshold &&
  571. modelConfig.sendMemory
  572. ) {
  573. /** Destruct max_tokens while summarizing
  574. * this param is just shit
  575. **/
  576. const { max_tokens, ...modelcfg } = modelConfig;
  577. api.llm.chat({
  578. messages: toBeSummarizedMsgs.concat(
  579. createMessage({
  580. role: "system",
  581. content: Locale.Store.Prompt.Summarize,
  582. date: "",
  583. }),
  584. ),
  585. config: {
  586. ...modelcfg,
  587. stream: true,
  588. model: getSummarizeModel(session.mask.modelConfig.model),
  589. },
  590. onUpdate(message) {
  591. session.memoryPrompt = message;
  592. },
  593. onFinish(message) {
  594. console.log("[Memory] ", message);
  595. get().updateCurrentSession((session) => {
  596. session.lastSummarizeIndex = lastSummarizeIndex;
  597. session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
  598. });
  599. },
  600. onError(err) {
  601. console.error("[Summarize] ", err);
  602. },
  603. });
  604. }
  605. },
  606. updateStat(message: ChatMessage) {
  607. get().updateCurrentSession((session) => {
  608. session.stat.charCount += message.content.length;
  609. // TODO: should update chat count and word count
  610. });
  611. },
  612. updateCurrentSession(updater: (session: ChatSession) => void) {
  613. const sessions = get().sessions;
  614. const index = get().currentSessionIndex;
  615. updater(sessions[index]);
  616. set(() => ({ sessions }));
  617. },
  618. async clearAllData() {
  619. await indexedDBStorage.clear();
  620. localStorage.clear();
  621. location.reload();
  622. },
  623. };
  624. return methods;
  625. },
  626. {
  627. name: StoreKey.Chat,
  628. version: 3.1,
  629. migrate(persistedState, version) {
  630. const state = persistedState as any;
  631. const newState = JSON.parse(
  632. JSON.stringify(state),
  633. ) as typeof DEFAULT_CHAT_STATE;
  634. if (version < 2) {
  635. newState.sessions = [];
  636. const oldSessions = state.sessions;
  637. for (const oldSession of oldSessions) {
  638. const newSession = createEmptySession();
  639. newSession.topic = oldSession.topic;
  640. newSession.messages = [...oldSession.messages];
  641. newSession.mask.modelConfig.sendMemory = true;
  642. newSession.mask.modelConfig.historyMessageCount = 4;
  643. newSession.mask.modelConfig.compressMessageLengthThreshold = 1000;
  644. newState.sessions.push(newSession);
  645. }
  646. }
  647. if (version < 3) {
  648. // migrate id to nanoid
  649. newState.sessions.forEach((s) => {
  650. s.id = nanoid();
  651. s.messages.forEach((m) => (m.id = nanoid()));
  652. });
  653. }
  654. // Enable `enableInjectSystemPrompts` attribute for old sessions.
  655. // Resolve issue of old sessions not automatically enabling.
  656. if (version < 3.1) {
  657. newState.sessions.forEach((s) => {
  658. if (
  659. // Exclude those already set by user
  660. !s.mask.modelConfig.hasOwnProperty("enableInjectSystemPrompts")
  661. ) {
  662. // Because users may have changed this configuration,
  663. // the user's current configuration is used instead of the default
  664. const config = useAppConfig.getState();
  665. s.mask.modelConfig.enableInjectSystemPrompts =
  666. config.modelConfig.enableInjectSystemPrompts;
  667. }
  668. });
  669. }
  670. return newState as any;
  671. },
  672. },
  673. );