chat.ts 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. import { trimTopic, getMessageTextContent } from "../utils";
  2. import Locale, { getLang } from "../locales";
  3. import { showToast } from "../components/ui-lib";
  4. import { ModelConfig, ModelType, useAppConfig } from "./config";
  5. import { createEmptyMask, Mask } from "./mask";
  6. import {
  7. DEFAULT_INPUT_TEMPLATE,
  8. DEFAULT_MODELS,
  9. DEFAULT_SYSTEM_TEMPLATE,
  10. KnowledgeCutOffDate,
  11. StoreKey,
  12. SUMMARIZE_MODEL,
  13. GEMINI_SUMMARIZE_MODEL,
  14. } from "../constant";
  15. import { getClientApi } from "../client/api";
  16. import type {
  17. ClientApi,
  18. RequestMessage,
  19. MultimodalContent,
  20. } from "../client/api";
  21. import { ChatControllerPool } from "../client/controller";
  22. import { prettyObject } from "../utils/format";
  23. import { estimateTokenLength } from "../utils/token";
  24. import { nanoid } from "nanoid";
  25. import { createPersistStore } from "../utils/store";
  26. import { collectModelsWithDefaultModel } from "../utils/model";
  27. import { useAccessStore } from "./access";
  28. import { isDalle3 } from "../utils";
  29. export type ChatMessageTool = {
  30. id: string;
  31. type?: string;
  32. function?: {
  33. name: string;
  34. arguments?: string;
  35. };
  36. content?: string;
  37. isError?: boolean;
  38. };
  39. export type ChatMessage = RequestMessage & {
  40. date: string;
  41. streaming?: boolean;
  42. isError?: boolean;
  43. id: string;
  44. model?: ModelType;
  45. tools?: ChatMessageTool[];
  46. };
  47. export function createMessage(override: Partial<ChatMessage>): ChatMessage {
  48. return {
  49. id: nanoid(),
  50. date: new Date().toLocaleString(),
  51. role: "user",
  52. content: "",
  53. ...override,
  54. };
  55. }
  56. export interface ChatStat {
  57. tokenCount: number;
  58. wordCount: number;
  59. charCount: number;
  60. }
  61. export interface ChatSession {
  62. id: string;
  63. topic: string;
  64. memoryPrompt: string;
  65. messages: ChatMessage[];
  66. stat: ChatStat;
  67. lastUpdate: number;
  68. lastSummarizeIndex: number;
  69. clearContextIndex?: number;
  70. mask: Mask;
  71. }
  72. export const DEFAULT_TOPIC = Locale.Store.DefaultTopic;
  73. export const BOT_HELLO: ChatMessage = createMessage({
  74. role: "assistant",
  75. content: Locale.Store.BotHello,
  76. });
  77. function createEmptySession(): ChatSession {
  78. return {
  79. id: nanoid(),
  80. topic: DEFAULT_TOPIC,
  81. memoryPrompt: "",
  82. messages: [],
  83. stat: {
  84. tokenCount: 0,
  85. wordCount: 0,
  86. charCount: 0,
  87. },
  88. lastUpdate: Date.now(),
  89. lastSummarizeIndex: 0,
  90. mask: createEmptyMask(),
  91. };
  92. }
  93. function getSummarizeModel(currentModel: string) {
  94. // if it is using gpt-* models, force to use 4o-mini to summarize
  95. if (currentModel.startsWith("gpt")) {
  96. const configStore = useAppConfig.getState();
  97. const accessStore = useAccessStore.getState();
  98. const allModel = collectModelsWithDefaultModel(
  99. configStore.models,
  100. [configStore.customModels, accessStore.customModels].join(","),
  101. accessStore.defaultModel,
  102. );
  103. const summarizeModel = allModel.find(
  104. (m) => m.name === SUMMARIZE_MODEL && m.available,
  105. );
  106. return summarizeModel?.name ?? currentModel;
  107. }
  108. if (currentModel.startsWith("gemini")) {
  109. return GEMINI_SUMMARIZE_MODEL;
  110. }
  111. return currentModel;
  112. }
  113. function countMessages(msgs: ChatMessage[]) {
  114. return msgs.reduce(
  115. (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
  116. 0,
  117. );
  118. }
  119. function fillTemplateWith(input: string, modelConfig: ModelConfig) {
  120. const cutoff =
  121. KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
  122. // Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
  123. const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
  124. var serviceProvider = "OpenAI";
  125. if (modelInfo) {
  126. // TODO: auto detect the providerName from the modelConfig.model
  127. // Directly use the providerName from the modelInfo
  128. serviceProvider = modelInfo.provider.providerName;
  129. }
  130. const vars = {
  131. ServiceProvider: serviceProvider,
  132. cutoff,
  133. model: modelConfig.model,
  134. time: new Date().toString(),
  135. lang: getLang(),
  136. input: input,
  137. };
  138. let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE;
  139. // remove duplicate
  140. if (input.startsWith(output)) {
  141. output = "";
  142. }
  143. // must contains {{input}}
  144. const inputVar = "{{input}}";
  145. if (!output.includes(inputVar)) {
  146. output += "\n" + inputVar;
  147. }
  148. Object.entries(vars).forEach(([name, value]) => {
  149. const regex = new RegExp(`{{${name}}}`, "g");
  150. output = output.replace(regex, value.toString()); // Ensure value is a string
  151. });
  152. return output;
  153. }
  154. const DEFAULT_CHAT_STATE = {
  155. sessions: [createEmptySession()],
  156. currentSessionIndex: 0,
  157. };
  158. export const useChatStore = createPersistStore(
  159. DEFAULT_CHAT_STATE,
  160. (set, _get) => {
  161. function get() {
  162. return {
  163. ..._get(),
  164. ...methods,
  165. };
  166. }
  167. const methods = {
  168. clearSessions() {
  169. set(() => ({
  170. sessions: [createEmptySession()],
  171. currentSessionIndex: 0,
  172. }));
  173. },
  174. selectSession(index: number) {
  175. set({
  176. currentSessionIndex: index,
  177. });
  178. },
  179. moveSession(from: number, to: number) {
  180. set((state) => {
  181. const { sessions, currentSessionIndex: oldIndex } = state;
  182. // move the session
  183. const newSessions = [...sessions];
  184. const session = newSessions[from];
  185. newSessions.splice(from, 1);
  186. newSessions.splice(to, 0, session);
  187. // modify current session id
  188. let newIndex = oldIndex === from ? to : oldIndex;
  189. if (oldIndex > from && oldIndex <= to) {
  190. newIndex -= 1;
  191. } else if (oldIndex < from && oldIndex >= to) {
  192. newIndex += 1;
  193. }
  194. return {
  195. currentSessionIndex: newIndex,
  196. sessions: newSessions,
  197. };
  198. });
  199. },
  200. newSession(mask?: Mask) {
  201. const session = createEmptySession();
  202. if (mask) {
  203. const config = useAppConfig.getState();
  204. const globalModelConfig = config.modelConfig;
  205. session.mask = {
  206. ...mask,
  207. modelConfig: {
  208. ...globalModelConfig,
  209. ...mask.modelConfig,
  210. },
  211. };
  212. session.topic = mask.name;
  213. }
  214. set((state) => ({
  215. currentSessionIndex: 0,
  216. sessions: [session].concat(state.sessions),
  217. }));
  218. },
  219. nextSession(delta: number) {
  220. const n = get().sessions.length;
  221. const limit = (x: number) => (x + n) % n;
  222. const i = get().currentSessionIndex;
  223. get().selectSession(limit(i + delta));
  224. },
  225. deleteSession(index: number) {
  226. const deletingLastSession = get().sessions.length === 1;
  227. const deletedSession = get().sessions.at(index);
  228. if (!deletedSession) return;
  229. const sessions = get().sessions.slice();
  230. sessions.splice(index, 1);
  231. const currentIndex = get().currentSessionIndex;
  232. let nextIndex = Math.min(
  233. currentIndex - Number(index < currentIndex),
  234. sessions.length - 1,
  235. );
  236. if (deletingLastSession) {
  237. nextIndex = 0;
  238. sessions.push(createEmptySession());
  239. }
  240. // for undo delete action
  241. const restoreState = {
  242. currentSessionIndex: get().currentSessionIndex,
  243. sessions: get().sessions.slice(),
  244. };
  245. set(() => ({
  246. currentSessionIndex: nextIndex,
  247. sessions,
  248. }));
  249. showToast(
  250. Locale.Home.DeleteToast,
  251. {
  252. text: Locale.Home.Revert,
  253. onClick() {
  254. set(() => restoreState);
  255. },
  256. },
  257. 5000,
  258. );
  259. },
  260. currentSession() {
  261. let index = get().currentSessionIndex;
  262. const sessions = get().sessions;
  263. if (index < 0 || index >= sessions.length) {
  264. index = Math.min(sessions.length - 1, Math.max(0, index));
  265. set(() => ({ currentSessionIndex: index }));
  266. }
  267. const session = sessions[index];
  268. return session;
  269. },
  270. onNewMessage(message: ChatMessage) {
  271. get().updateCurrentSession((session) => {
  272. session.messages = session.messages.concat();
  273. session.lastUpdate = Date.now();
  274. });
  275. get().updateStat(message);
  276. get().summarizeSession();
  277. },
  278. async onUserInput(content: string, attachImages?: string[]) {
  279. const session = get().currentSession();
  280. const modelConfig = session.mask.modelConfig;
  281. const userContent = fillTemplateWith(content, modelConfig);
  282. console.log("[User Input] after template: ", userContent);
  283. let mContent: string | MultimodalContent[] = userContent;
  284. if (attachImages && attachImages.length > 0) {
  285. mContent = [
  286. {
  287. type: "text",
  288. text: userContent,
  289. },
  290. ];
  291. mContent = mContent.concat(
  292. attachImages.map((url) => {
  293. return {
  294. type: "image_url",
  295. image_url: {
  296. url: url,
  297. },
  298. };
  299. }),
  300. );
  301. }
  302. let userMessage: ChatMessage = createMessage({
  303. role: "user",
  304. content: mContent,
  305. });
  306. const botMessage: ChatMessage = createMessage({
  307. role: "assistant",
  308. streaming: true,
  309. model: modelConfig.model,
  310. });
  311. // get recent messages
  312. const recentMessages = get().getMessagesWithMemory();
  313. const sendMessages = recentMessages.concat(userMessage);
  314. const messageIndex = get().currentSession().messages.length + 1;
  315. // save user's and bot's message
  316. get().updateCurrentSession((session) => {
  317. const savedUserMessage = {
  318. ...userMessage,
  319. content: mContent,
  320. };
  321. session.messages = session.messages.concat([
  322. savedUserMessage,
  323. botMessage,
  324. ]);
  325. });
  326. const api: ClientApi = getClientApi(modelConfig.providerName);
  327. // make request
  328. api.llm.chat({
  329. messages: sendMessages,
  330. config: { ...modelConfig, stream: true },
  331. onUpdate(message) {
  332. botMessage.streaming = true;
  333. if (message) {
  334. botMessage.content = message;
  335. }
  336. get().updateCurrentSession((session) => {
  337. session.messages = session.messages.concat();
  338. });
  339. },
  340. onFinish(message) {
  341. botMessage.streaming = false;
  342. if (message) {
  343. botMessage.content = message;
  344. get().onNewMessage(botMessage);
  345. }
  346. ChatControllerPool.remove(session.id, botMessage.id);
  347. },
  348. onBeforeTool(tool: ChatMessageTool) {
  349. (botMessage.tools = botMessage?.tools || []).push(tool);
  350. get().updateCurrentSession((session) => {
  351. session.messages = session.messages.concat();
  352. });
  353. },
  354. onAfterTool(tool: ChatMessageTool) {
  355. console.log("onAfterTool", botMessage);
  356. botMessage?.tools?.forEach((t, i, tools) => {
  357. if (tool.id == t.id) {
  358. tools[i] = { ...tool };
  359. }
  360. });
  361. get().updateCurrentSession((session) => {
  362. session.messages = session.messages.concat();
  363. });
  364. },
  365. onError(error) {
  366. const isAborted = error.message.includes("aborted");
  367. botMessage.content +=
  368. "\n\n" +
  369. prettyObject({
  370. error: true,
  371. message: error.message,
  372. });
  373. botMessage.streaming = false;
  374. userMessage.isError = !isAborted;
  375. botMessage.isError = !isAborted;
  376. get().updateCurrentSession((session) => {
  377. session.messages = session.messages.concat();
  378. });
  379. ChatControllerPool.remove(
  380. session.id,
  381. botMessage.id ?? messageIndex,
  382. );
  383. console.error("[Chat] failed ", error);
  384. },
  385. onController(controller) {
  386. // collect controller for stop/retry
  387. ChatControllerPool.addController(
  388. session.id,
  389. botMessage.id ?? messageIndex,
  390. controller,
  391. );
  392. },
  393. });
  394. },
  395. getMemoryPrompt() {
  396. const session = get().currentSession();
  397. if (session.memoryPrompt.length) {
  398. return {
  399. role: "system",
  400. content: Locale.Store.Prompt.History(session.memoryPrompt),
  401. date: "",
  402. } as ChatMessage;
  403. }
  404. },
  405. getMessagesWithMemory() {
  406. const session = get().currentSession();
  407. const modelConfig = session.mask.modelConfig;
  408. const clearContextIndex = session.clearContextIndex ?? 0;
  409. const messages = session.messages.slice();
  410. const totalMessageCount = session.messages.length;
  411. // in-context prompts
  412. const contextPrompts = session.mask.context.slice();
  413. // system prompts, to get close to OpenAI Web ChatGPT
  414. const shouldInjectSystemPrompts =
  415. modelConfig.enableInjectSystemPrompts &&
  416. session.mask.modelConfig.model.startsWith("gpt-");
  417. var systemPrompts: ChatMessage[] = [];
  418. systemPrompts = shouldInjectSystemPrompts
  419. ? [
  420. createMessage({
  421. role: "system",
  422. content: fillTemplateWith("", {
  423. ...modelConfig,
  424. template: DEFAULT_SYSTEM_TEMPLATE,
  425. }),
  426. }),
  427. ]
  428. : [];
  429. if (shouldInjectSystemPrompts) {
  430. console.log(
  431. "[Global System Prompt] ",
  432. systemPrompts.at(0)?.content ?? "empty",
  433. );
  434. }
  435. const memoryPrompt = get().getMemoryPrompt();
  436. // long term memory
  437. const shouldSendLongTermMemory =
  438. modelConfig.sendMemory &&
  439. session.memoryPrompt &&
  440. session.memoryPrompt.length > 0 &&
  441. session.lastSummarizeIndex > clearContextIndex;
  442. const longTermMemoryPrompts =
  443. shouldSendLongTermMemory && memoryPrompt ? [memoryPrompt] : [];
  444. const longTermMemoryStartIndex = session.lastSummarizeIndex;
  445. // short term memory
  446. const shortTermMemoryStartIndex = Math.max(
  447. 0,
  448. totalMessageCount - modelConfig.historyMessageCount,
  449. );
  450. // lets concat send messages, including 4 parts:
  451. // 0. system prompt: to get close to OpenAI Web ChatGPT
  452. // 1. long term memory: summarized memory messages
  453. // 2. pre-defined in-context prompts
  454. // 3. short term memory: latest n messages
  455. // 4. newest input message
  456. const memoryStartIndex = shouldSendLongTermMemory
  457. ? Math.min(longTermMemoryStartIndex, shortTermMemoryStartIndex)
  458. : shortTermMemoryStartIndex;
  459. // and if user has cleared history messages, we should exclude the memory too.
  460. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
  461. const maxTokenThreshold = modelConfig.max_tokens;
  462. // get recent messages as much as possible
  463. const reversedRecentMessages = [];
  464. for (
  465. let i = totalMessageCount - 1, tokenCount = 0;
  466. i >= contextStartIndex && tokenCount < maxTokenThreshold;
  467. i -= 1
  468. ) {
  469. const msg = messages[i];
  470. if (!msg || msg.isError) continue;
  471. tokenCount += estimateTokenLength(getMessageTextContent(msg));
  472. reversedRecentMessages.push(msg);
  473. }
  474. // concat all messages
  475. const recentMessages = [
  476. ...systemPrompts,
  477. ...longTermMemoryPrompts,
  478. ...contextPrompts,
  479. ...reversedRecentMessages.reverse(),
  480. ];
  481. return recentMessages;
  482. },
  483. updateMessage(
  484. sessionIndex: number,
  485. messageIndex: number,
  486. updater: (message?: ChatMessage) => void,
  487. ) {
  488. const sessions = get().sessions;
  489. const session = sessions.at(sessionIndex);
  490. const messages = session?.messages;
  491. updater(messages?.at(messageIndex));
  492. set(() => ({ sessions }));
  493. },
  494. resetSession() {
  495. get().updateCurrentSession((session) => {
  496. session.messages = [];
  497. session.memoryPrompt = "";
  498. });
  499. },
  500. summarizeSession() {
  501. const config = useAppConfig.getState();
  502. const session = get().currentSession();
  503. const modelConfig = session.mask.modelConfig;
  504. // skip summarize when using dalle3?
  505. if (isDalle3(modelConfig.model)) {
  506. return;
  507. }
  508. const providerName = modelConfig.providerName;
  509. const api: ClientApi = getClientApi(providerName);
  510. // remove error messages if any
  511. const messages = session.messages;
  512. // should summarize topic after chating more than 50 words
  513. const SUMMARIZE_MIN_LEN = 50;
  514. if (
  515. config.enableAutoGenerateTitle &&
  516. session.topic === DEFAULT_TOPIC &&
  517. countMessages(messages) >= SUMMARIZE_MIN_LEN
  518. ) {
  519. const topicMessages = messages.concat(
  520. createMessage({
  521. role: "user",
  522. content: Locale.Store.Prompt.Topic,
  523. }),
  524. );
  525. api.llm.chat({
  526. messages: topicMessages,
  527. config: {
  528. model: getSummarizeModel(session.mask.modelConfig.model),
  529. stream: false,
  530. providerName,
  531. },
  532. onFinish(message) {
  533. get().updateCurrentSession(
  534. (session) =>
  535. (session.topic =
  536. message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
  537. );
  538. },
  539. });
  540. }
  541. const summarizeIndex = Math.max(
  542. session.lastSummarizeIndex,
  543. session.clearContextIndex ?? 0,
  544. );
  545. let toBeSummarizedMsgs = messages
  546. .filter((msg) => !msg.isError)
  547. .slice(summarizeIndex);
  548. const historyMsgLength = countMessages(toBeSummarizedMsgs);
  549. if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
  550. const n = toBeSummarizedMsgs.length;
  551. toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
  552. Math.max(0, n - modelConfig.historyMessageCount),
  553. );
  554. }
  555. const memoryPrompt = get().getMemoryPrompt();
  556. if (memoryPrompt) {
  557. // add memory prompt
  558. toBeSummarizedMsgs.unshift(memoryPrompt);
  559. }
  560. const lastSummarizeIndex = session.messages.length;
  561. console.log(
  562. "[Chat History] ",
  563. toBeSummarizedMsgs,
  564. historyMsgLength,
  565. modelConfig.compressMessageLengthThreshold,
  566. );
  567. if (
  568. historyMsgLength > modelConfig.compressMessageLengthThreshold &&
  569. modelConfig.sendMemory
  570. ) {
  571. /** Destruct max_tokens while summarizing
  572. * this param is just shit
  573. **/
  574. const { max_tokens, ...modelcfg } = modelConfig;
  575. api.llm.chat({
  576. messages: toBeSummarizedMsgs.concat(
  577. createMessage({
  578. role: "system",
  579. content: Locale.Store.Prompt.Summarize,
  580. date: "",
  581. }),
  582. ),
  583. config: {
  584. ...modelcfg,
  585. stream: true,
  586. model: getSummarizeModel(session.mask.modelConfig.model),
  587. },
  588. onUpdate(message) {
  589. session.memoryPrompt = message;
  590. },
  591. onFinish(message) {
  592. console.log("[Memory] ", message);
  593. get().updateCurrentSession((session) => {
  594. session.lastSummarizeIndex = lastSummarizeIndex;
  595. session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
  596. });
  597. },
  598. onError(err) {
  599. console.error("[Summarize] ", err);
  600. },
  601. });
  602. }
  603. },
  604. updateStat(message: ChatMessage) {
  605. get().updateCurrentSession((session) => {
  606. session.stat.charCount += message.content.length;
  607. // TODO: should update chat count and word count
  608. });
  609. },
  610. updateCurrentSession(updater: (session: ChatSession) => void) {
  611. const sessions = get().sessions;
  612. const index = get().currentSessionIndex;
  613. updater(sessions[index]);
  614. set(() => ({ sessions }));
  615. },
  616. clearAllData() {
  617. localStorage.clear();
  618. location.reload();
  619. },
  620. };
  621. return methods;
  622. },
  623. {
  624. name: StoreKey.Chat,
  625. version: 3.1,
  626. migrate(persistedState, version) {
  627. const state = persistedState as any;
  628. const newState = JSON.parse(
  629. JSON.stringify(state),
  630. ) as typeof DEFAULT_CHAT_STATE;
  631. if (version < 2) {
  632. newState.sessions = [];
  633. const oldSessions = state.sessions;
  634. for (const oldSession of oldSessions) {
  635. const newSession = createEmptySession();
  636. newSession.topic = oldSession.topic;
  637. newSession.messages = [...oldSession.messages];
  638. newSession.mask.modelConfig.sendMemory = true;
  639. newSession.mask.modelConfig.historyMessageCount = 4;
  640. newSession.mask.modelConfig.compressMessageLengthThreshold = 1000;
  641. newState.sessions.push(newSession);
  642. }
  643. }
  644. if (version < 3) {
  645. // migrate id to nanoid
  646. newState.sessions.forEach((s) => {
  647. s.id = nanoid();
  648. s.messages.forEach((m) => (m.id = nanoid()));
  649. });
  650. }
  651. // Enable `enableInjectSystemPrompts` attribute for old sessions.
  652. // Resolve issue of old sessions not automatically enabling.
  653. if (version < 3.1) {
  654. newState.sessions.forEach((s) => {
  655. if (
  656. // Exclude those already set by user
  657. !s.mask.modelConfig.hasOwnProperty("enableInjectSystemPrompts")
  658. ) {
  659. // Because users may have changed this configuration,
  660. // the user's current configuration is used instead of the default
  661. const config = useAppConfig.getState();
  662. s.mask.modelConfig.enableInjectSystemPrompts =
  663. config.modelConfig.enableInjectSystemPrompts;
  664. }
  665. });
  666. }
  667. return newState as any;
  668. },
  669. },
  670. );