chat.ts 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. import { getMessageTextContent, trimTopic } from "../utils";
  2. import { indexedDBStorage } from "@/app/utils/indexedDB-storage";
  3. import { nanoid } from "nanoid";
  4. import type {
  5. ClientApi,
  6. MultimodalContent,
  7. RequestMessage,
  8. } from "../client/api";
  9. import { getClientApi } from "../client/api";
  10. import { ChatControllerPool } from "../client/controller";
  11. import { showToast } from "../components/ui-lib";
  12. import {
  13. DEFAULT_INPUT_TEMPLATE,
  14. DEFAULT_MODELS,
  15. DEFAULT_SYSTEM_TEMPLATE,
  16. KnowledgeCutOffDate,
  17. StoreKey,
  18. SUMMARIZE_MODEL,
  19. GEMINI_SUMMARIZE_MODEL,
  20. ServiceProvider,
  21. } from "../constant";
  22. import Locale, { getLang } from "../locales";
  23. import { isDalle3, safeLocalStorage } from "../utils";
  24. import { prettyObject } from "../utils/format";
  25. import { createPersistStore } from "../utils/store";
  26. import { estimateTokenLength } from "../utils/token";
  27. import { ModelConfig, ModelType, useAppConfig } from "./config";
  28. import { useAccessStore } from "./access";
  29. import { collectModelsWithDefaultModel } from "../utils/model";
  30. import { createEmptyMask, Mask } from "./mask";
  31. const localStorage = safeLocalStorage();
  32. export type ChatMessageTool = {
  33. id: string;
  34. index?: number;
  35. type?: string;
  36. function?: {
  37. name: string;
  38. arguments?: string;
  39. };
  40. content?: string;
  41. isError?: boolean;
  42. errorMsg?: string;
  43. };
  44. export type ChatMessage = RequestMessage & {
  45. date: string;
  46. streaming?: boolean;
  47. isError?: boolean;
  48. id: string;
  49. model?: ModelType;
  50. tools?: ChatMessageTool[];
  51. };
  52. export function createMessage(override: Partial<ChatMessage>): ChatMessage {
  53. return {
  54. id: nanoid(),
  55. date: new Date().toLocaleString(),
  56. role: "user",
  57. content: "",
  58. ...override,
  59. };
  60. }
  61. export interface ChatStat {
  62. tokenCount: number;
  63. wordCount: number;
  64. charCount: number;
  65. }
  66. export interface ChatSession {
  67. id: string;
  68. topic: string;
  69. memoryPrompt: string;
  70. messages: ChatMessage[];
  71. stat: ChatStat;
  72. lastUpdate: number;
  73. lastSummarizeIndex: number;
  74. clearContextIndex?: number;
  75. mask: Mask;
  76. }
  77. export const DEFAULT_TOPIC = Locale.Store.DefaultTopic;
  78. export const BOT_HELLO: ChatMessage = createMessage({
  79. role: "assistant",
  80. content: Locale.Store.BotHello,
  81. });
  82. function createEmptySession(): ChatSession {
  83. return {
  84. id: nanoid(),
  85. topic: DEFAULT_TOPIC,
  86. memoryPrompt: "",
  87. messages: [],
  88. stat: {
  89. tokenCount: 0,
  90. wordCount: 0,
  91. charCount: 0,
  92. },
  93. lastUpdate: Date.now(),
  94. lastSummarizeIndex: 0,
  95. mask: createEmptyMask(),
  96. };
  97. }
  98. function getSummarizeModel(
  99. currentModel: string,
  100. providerName: string,
  101. ): string[] {
  102. // if it is using gpt-* models, force to use 4o-mini to summarize
  103. if (currentModel.startsWith("gpt") || currentModel.startsWith("chatgpt")) {
  104. const configStore = useAppConfig.getState();
  105. const accessStore = useAccessStore.getState();
  106. const allModel = collectModelsWithDefaultModel(
  107. configStore.models,
  108. [configStore.customModels, accessStore.customModels].join(","),
  109. accessStore.defaultModel,
  110. );
  111. const summarizeModel = allModel.find(
  112. (m) => m.name === SUMMARIZE_MODEL && m.available,
  113. );
  114. if (summarizeModel) {
  115. return [
  116. summarizeModel.name,
  117. summarizeModel.provider?.providerName as string,
  118. ];
  119. }
  120. }
  121. if (currentModel.startsWith("gemini")) {
  122. return [GEMINI_SUMMARIZE_MODEL, ServiceProvider.Google];
  123. }
  124. return [currentModel, providerName];
  125. }
  126. function countMessages(msgs: ChatMessage[]) {
  127. return msgs.reduce(
  128. (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
  129. 0,
  130. );
  131. }
  132. function fillTemplateWith(input: string, modelConfig: ModelConfig) {
  133. const cutoff =
  134. KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
  135. // Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
  136. const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
  137. var serviceProvider = "OpenAI";
  138. if (modelInfo) {
  139. // TODO: auto detect the providerName from the modelConfig.model
  140. // Directly use the providerName from the modelInfo
  141. serviceProvider = modelInfo.provider.providerName;
  142. }
  143. const vars = {
  144. ServiceProvider: serviceProvider,
  145. cutoff,
  146. model: modelConfig.model,
  147. time: new Date().toString(),
  148. lang: getLang(),
  149. input: input,
  150. };
  151. let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE;
  152. // remove duplicate
  153. if (input.startsWith(output)) {
  154. output = "";
  155. }
  156. // must contains {{input}}
  157. const inputVar = "{{input}}";
  158. if (!output.includes(inputVar)) {
  159. output += "\n" + inputVar;
  160. }
  161. Object.entries(vars).forEach(([name, value]) => {
  162. const regex = new RegExp(`{{${name}}}`, "g");
  163. output = output.replace(regex, value.toString()); // Ensure value is a string
  164. });
  165. return output;
  166. }
  167. const DEFAULT_CHAT_STATE = {
  168. sessions: [createEmptySession()],
  169. currentSessionIndex: 0,
  170. lastInput: "",
  171. };
  172. export const useChatStore = createPersistStore(
  173. DEFAULT_CHAT_STATE,
  174. (set, _get) => {
  175. function get() {
  176. return {
  177. ..._get(),
  178. ...methods,
  179. };
  180. }
  181. const methods = {
  182. forkSession() {
  183. // 获取当前会话
  184. const currentSession = get().currentSession();
  185. if (!currentSession) return;
  186. const newSession = createEmptySession();
  187. newSession.topic = currentSession.topic;
  188. newSession.messages = [...currentSession.messages];
  189. newSession.mask = {
  190. ...currentSession.mask,
  191. modelConfig: {
  192. ...currentSession.mask.modelConfig,
  193. },
  194. };
  195. set((state) => ({
  196. currentSessionIndex: 0,
  197. sessions: [newSession, ...state.sessions],
  198. }));
  199. },
  200. clearSessions() {
  201. set(() => ({
  202. sessions: [createEmptySession()],
  203. currentSessionIndex: 0,
  204. }));
  205. },
  206. selectSession(index: number) {
  207. set({
  208. currentSessionIndex: index,
  209. });
  210. },
  211. moveSession(from: number, to: number) {
  212. set((state) => {
  213. const { sessions, currentSessionIndex: oldIndex } = state;
  214. // move the session
  215. const newSessions = [...sessions];
  216. const session = newSessions[from];
  217. newSessions.splice(from, 1);
  218. newSessions.splice(to, 0, session);
  219. // modify current session id
  220. let newIndex = oldIndex === from ? to : oldIndex;
  221. if (oldIndex > from && oldIndex <= to) {
  222. newIndex -= 1;
  223. } else if (oldIndex < from && oldIndex >= to) {
  224. newIndex += 1;
  225. }
  226. return {
  227. currentSessionIndex: newIndex,
  228. sessions: newSessions,
  229. };
  230. });
  231. },
  232. newSession(mask?: Mask) {
  233. const session = createEmptySession();
  234. if (mask) {
  235. const config = useAppConfig.getState();
  236. const globalModelConfig = config.modelConfig;
  237. session.mask = {
  238. ...mask,
  239. modelConfig: {
  240. ...globalModelConfig,
  241. ...mask.modelConfig,
  242. },
  243. };
  244. session.topic = mask.name;
  245. }
  246. set((state) => ({
  247. currentSessionIndex: 0,
  248. sessions: [session].concat(state.sessions),
  249. }));
  250. },
  251. nextSession(delta: number) {
  252. const n = get().sessions.length;
  253. const limit = (x: number) => (x + n) % n;
  254. const i = get().currentSessionIndex;
  255. get().selectSession(limit(i + delta));
  256. },
  257. deleteSession(index: number) {
  258. const deletingLastSession = get().sessions.length === 1;
  259. const deletedSession = get().sessions.at(index);
  260. if (!deletedSession) return;
  261. const sessions = get().sessions.slice();
  262. sessions.splice(index, 1);
  263. const currentIndex = get().currentSessionIndex;
  264. let nextIndex = Math.min(
  265. currentIndex - Number(index < currentIndex),
  266. sessions.length - 1,
  267. );
  268. if (deletingLastSession) {
  269. nextIndex = 0;
  270. sessions.push(createEmptySession());
  271. }
  272. // for undo delete action
  273. const restoreState = {
  274. currentSessionIndex: get().currentSessionIndex,
  275. sessions: get().sessions.slice(),
  276. };
  277. set(() => ({
  278. currentSessionIndex: nextIndex,
  279. sessions,
  280. }));
  281. showToast(
  282. Locale.Home.DeleteToast,
  283. {
  284. text: Locale.Home.Revert,
  285. onClick() {
  286. set(() => restoreState);
  287. },
  288. },
  289. 5000,
  290. );
  291. },
  292. currentSession() {
  293. let index = get().currentSessionIndex;
  294. const sessions = get().sessions;
  295. if (index < 0 || index >= sessions.length) {
  296. index = Math.min(sessions.length - 1, Math.max(0, index));
  297. set(() => ({ currentSessionIndex: index }));
  298. }
  299. const session = sessions[index];
  300. return session;
  301. },
  302. onNewMessage(message: ChatMessage) {
  303. get().updateCurrentSession((session) => {
  304. session.messages = session.messages.concat();
  305. session.lastUpdate = Date.now();
  306. });
  307. get().updateStat(message);
  308. get().summarizeSession();
  309. },
  310. async onUserInput(content: string, attachImages?: string[]) {
  311. const session = get().currentSession();
  312. const modelConfig = session.mask.modelConfig;
  313. const userContent = fillTemplateWith(content, modelConfig);
  314. console.log("[User Input] after template: ", userContent);
  315. let mContent: string | MultimodalContent[] = userContent;
  316. if (attachImages && attachImages.length > 0) {
  317. mContent = [
  318. ...(userContent
  319. ? [{ type: "text" as const, text: userContent }]
  320. : []),
  321. ...attachImages.map((url) => ({
  322. type: "image_url" as const,
  323. image_url: { url },
  324. })),
  325. ];
  326. }
  327. let userMessage: ChatMessage = createMessage({
  328. role: "user",
  329. content: mContent,
  330. });
  331. const botMessage: ChatMessage = createMessage({
  332. role: "assistant",
  333. streaming: true,
  334. model: modelConfig.model,
  335. });
  336. // get recent messages
  337. const recentMessages = get().getMessagesWithMemory();
  338. const sendMessages = recentMessages.concat(userMessage);
  339. const messageIndex = get().currentSession().messages.length + 1;
  340. // save user's and bot's message
  341. get().updateCurrentSession((session) => {
  342. const savedUserMessage = {
  343. ...userMessage,
  344. content: mContent,
  345. };
  346. session.messages = session.messages.concat([
  347. savedUserMessage,
  348. botMessage,
  349. ]);
  350. });
  351. const api: ClientApi = getClientApi(modelConfig.providerName);
  352. // make request
  353. api.llm.chat({
  354. messages: sendMessages,
  355. config: { ...modelConfig, stream: true },
  356. onUpdate(message) {
  357. botMessage.streaming = true;
  358. if (message) {
  359. botMessage.content = message;
  360. }
  361. get().updateCurrentSession((session) => {
  362. session.messages = session.messages.concat();
  363. });
  364. },
  365. onFinish(message) {
  366. botMessage.streaming = false;
  367. if (message) {
  368. botMessage.content = message;
  369. get().onNewMessage(botMessage);
  370. }
  371. ChatControllerPool.remove(session.id, botMessage.id);
  372. },
  373. onBeforeTool(tool: ChatMessageTool) {
  374. (botMessage.tools = botMessage?.tools || []).push(tool);
  375. get().updateCurrentSession((session) => {
  376. session.messages = session.messages.concat();
  377. });
  378. },
  379. onAfterTool(tool: ChatMessageTool) {
  380. botMessage?.tools?.forEach((t, i, tools) => {
  381. if (tool.id == t.id) {
  382. tools[i] = { ...tool };
  383. }
  384. });
  385. get().updateCurrentSession((session) => {
  386. session.messages = session.messages.concat();
  387. });
  388. },
  389. onError(error) {
  390. const isAborted = error.message?.includes?.("aborted");
  391. botMessage.content +=
  392. "\n\n" +
  393. prettyObject({
  394. error: true,
  395. message: error.message,
  396. });
  397. botMessage.streaming = false;
  398. userMessage.isError = !isAborted;
  399. botMessage.isError = !isAborted;
  400. get().updateCurrentSession((session) => {
  401. session.messages = session.messages.concat();
  402. });
  403. ChatControllerPool.remove(
  404. session.id,
  405. botMessage.id ?? messageIndex,
  406. );
  407. console.error("[Chat] failed ", error);
  408. },
  409. onController(controller) {
  410. // collect controller for stop/retry
  411. ChatControllerPool.addController(
  412. session.id,
  413. botMessage.id ?? messageIndex,
  414. controller,
  415. );
  416. },
  417. });
  418. },
  419. getMemoryPrompt() {
  420. const session = get().currentSession();
  421. if (session.memoryPrompt.length) {
  422. return {
  423. role: "system",
  424. content: Locale.Store.Prompt.History(session.memoryPrompt),
  425. date: "",
  426. } as ChatMessage;
  427. }
  428. },
  429. getMessagesWithMemory() {
  430. const session = get().currentSession();
  431. const modelConfig = session.mask.modelConfig;
  432. const clearContextIndex = session.clearContextIndex ?? 0;
  433. const messages = session.messages.slice();
  434. const totalMessageCount = session.messages.length;
  435. // in-context prompts
  436. const contextPrompts = session.mask.context.slice();
  437. // system prompts, to get close to OpenAI Web ChatGPT
  438. const shouldInjectSystemPrompts =
  439. modelConfig.enableInjectSystemPrompts &&
  440. (session.mask.modelConfig.model.startsWith("gpt-") ||
  441. session.mask.modelConfig.model.startsWith("chatgpt-"));
  442. var systemPrompts: ChatMessage[] = [];
  443. systemPrompts = shouldInjectSystemPrompts
  444. ? [
  445. createMessage({
  446. role: "system",
  447. content: fillTemplateWith("", {
  448. ...modelConfig,
  449. template: DEFAULT_SYSTEM_TEMPLATE,
  450. }),
  451. }),
  452. ]
  453. : [];
  454. if (shouldInjectSystemPrompts) {
  455. console.log(
  456. "[Global System Prompt] ",
  457. systemPrompts.at(0)?.content ?? "empty",
  458. );
  459. }
  460. const memoryPrompt = get().getMemoryPrompt();
  461. // long term memory
  462. const shouldSendLongTermMemory =
  463. modelConfig.sendMemory &&
  464. session.memoryPrompt &&
  465. session.memoryPrompt.length > 0 &&
  466. session.lastSummarizeIndex > clearContextIndex;
  467. const longTermMemoryPrompts =
  468. shouldSendLongTermMemory && memoryPrompt ? [memoryPrompt] : [];
  469. const longTermMemoryStartIndex = session.lastSummarizeIndex;
  470. // short term memory
  471. const shortTermMemoryStartIndex = Math.max(
  472. 0,
  473. totalMessageCount - modelConfig.historyMessageCount,
  474. );
  475. // lets concat send messages, including 4 parts:
  476. // 0. system prompt: to get close to OpenAI Web ChatGPT
  477. // 1. long term memory: summarized memory messages
  478. // 2. pre-defined in-context prompts
  479. // 3. short term memory: latest n messages
  480. // 4. newest input message
  481. const memoryStartIndex = shouldSendLongTermMemory
  482. ? Math.min(longTermMemoryStartIndex, shortTermMemoryStartIndex)
  483. : shortTermMemoryStartIndex;
  484. // and if user has cleared history messages, we should exclude the memory too.
  485. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
  486. const maxTokenThreshold = modelConfig.max_tokens;
  487. // get recent messages as much as possible
  488. const reversedRecentMessages = [];
  489. for (
  490. let i = totalMessageCount - 1, tokenCount = 0;
  491. i >= contextStartIndex && tokenCount < maxTokenThreshold;
  492. i -= 1
  493. ) {
  494. const msg = messages[i];
  495. if (!msg || msg.isError) continue;
  496. tokenCount += estimateTokenLength(getMessageTextContent(msg));
  497. reversedRecentMessages.push(msg);
  498. }
  499. // concat all messages
  500. const recentMessages = [
  501. ...systemPrompts,
  502. ...longTermMemoryPrompts,
  503. ...contextPrompts,
  504. ...reversedRecentMessages.reverse(),
  505. ];
  506. return recentMessages;
  507. },
  508. updateMessage(
  509. sessionIndex: number,
  510. messageIndex: number,
  511. updater: (message?: ChatMessage) => void,
  512. ) {
  513. const sessions = get().sessions;
  514. const session = sessions.at(sessionIndex);
  515. const messages = session?.messages;
  516. updater(messages?.at(messageIndex));
  517. set(() => ({ sessions }));
  518. },
  519. resetSession() {
  520. get().updateCurrentSession((session) => {
  521. session.messages = [];
  522. session.memoryPrompt = "";
  523. });
  524. },
  525. summarizeSession(refreshTitle: boolean = false) {
  526. const config = useAppConfig.getState();
  527. const session = get().currentSession();
  528. const modelConfig = session.mask.modelConfig;
  529. // skip summarize when using dalle3?
  530. if (isDalle3(modelConfig.model)) {
  531. return;
  532. }
  533. // if not config compressModel, then using getSummarizeModel
  534. const [model, providerName] = modelConfig.compressModel
  535. ? [modelConfig.compressModel, modelConfig.compressProviderName]
  536. : getSummarizeModel(
  537. session.mask.modelConfig.model,
  538. session.mask.modelConfig.providerName,
  539. );
  540. const api: ClientApi = getClientApi(providerName as ServiceProvider);
  541. // remove error messages if any
  542. const messages = session.messages;
  543. // should summarize topic after chating more than 50 words
  544. const SUMMARIZE_MIN_LEN = 50;
  545. if (
  546. (config.enableAutoGenerateTitle &&
  547. session.topic === DEFAULT_TOPIC &&
  548. countMessages(messages) >= SUMMARIZE_MIN_LEN) ||
  549. refreshTitle
  550. ) {
  551. const startIndex = Math.max(
  552. 0,
  553. messages.length - modelConfig.historyMessageCount,
  554. );
  555. const topicMessages = messages
  556. .slice(
  557. startIndex < messages.length ? startIndex : messages.length - 1,
  558. messages.length,
  559. )
  560. .concat(
  561. createMessage({
  562. role: "user",
  563. content: Locale.Store.Prompt.Topic,
  564. }),
  565. );
  566. api.llm.chat({
  567. messages: topicMessages,
  568. config: {
  569. model,
  570. stream: false,
  571. providerName,
  572. },
  573. onFinish(message) {
  574. if (!isValidMessage(message)) return;
  575. get().updateCurrentSession(
  576. (session) =>
  577. (session.topic =
  578. message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
  579. );
  580. },
  581. });
  582. }
  583. const summarizeIndex = Math.max(
  584. session.lastSummarizeIndex,
  585. session.clearContextIndex ?? 0,
  586. );
  587. let toBeSummarizedMsgs = messages
  588. .filter((msg) => !msg.isError)
  589. .slice(summarizeIndex);
  590. const historyMsgLength = countMessages(toBeSummarizedMsgs);
  591. if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
  592. const n = toBeSummarizedMsgs.length;
  593. toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
  594. Math.max(0, n - modelConfig.historyMessageCount),
  595. );
  596. }
  597. const memoryPrompt = get().getMemoryPrompt();
  598. if (memoryPrompt) {
  599. // add memory prompt
  600. toBeSummarizedMsgs.unshift(memoryPrompt);
  601. }
  602. const lastSummarizeIndex = session.messages.length;
  603. console.log(
  604. "[Chat History] ",
  605. toBeSummarizedMsgs,
  606. historyMsgLength,
  607. modelConfig.compressMessageLengthThreshold,
  608. );
  609. if (
  610. historyMsgLength > modelConfig.compressMessageLengthThreshold &&
  611. modelConfig.sendMemory
  612. ) {
  613. /** Destruct max_tokens while summarizing
  614. * this param is just shit
  615. **/
  616. const { max_tokens, ...modelcfg } = modelConfig;
  617. api.llm.chat({
  618. messages: toBeSummarizedMsgs.concat(
  619. createMessage({
  620. role: "system",
  621. content: Locale.Store.Prompt.Summarize,
  622. date: "",
  623. }),
  624. ),
  625. config: {
  626. ...modelcfg,
  627. stream: true,
  628. model,
  629. providerName,
  630. },
  631. onUpdate(message) {
  632. session.memoryPrompt = message;
  633. },
  634. onFinish(message) {
  635. console.log("[Memory] ", message);
  636. get().updateCurrentSession((session) => {
  637. session.lastSummarizeIndex = lastSummarizeIndex;
  638. session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
  639. });
  640. },
  641. onError(err) {
  642. console.error("[Summarize] ", err);
  643. },
  644. });
  645. }
  646. function isValidMessage(message: any): boolean {
  647. return typeof message === "string" && !message.startsWith("```json");
  648. }
  649. },
  650. updateStat(message: ChatMessage) {
  651. get().updateCurrentSession((session) => {
  652. session.stat.charCount += message.content.length;
  653. // TODO: should update chat count and word count
  654. });
  655. },
  656. updateCurrentSession(updater: (session: ChatSession) => void) {
  657. const sessions = get().sessions;
  658. const index = get().currentSessionIndex;
  659. updater(sessions[index]);
  660. set(() => ({ sessions }));
  661. },
  662. async clearAllData() {
  663. await indexedDBStorage.clear();
  664. localStorage.clear();
  665. location.reload();
  666. },
  667. setLastInput(lastInput: string) {
  668. set({
  669. lastInput,
  670. });
  671. },
  672. };
  673. return methods;
  674. },
  675. {
  676. name: StoreKey.Chat,
  677. version: 3.3,
  678. migrate(persistedState, version) {
  679. const state = persistedState as any;
  680. const newState = JSON.parse(
  681. JSON.stringify(state),
  682. ) as typeof DEFAULT_CHAT_STATE;
  683. if (version < 2) {
  684. newState.sessions = [];
  685. const oldSessions = state.sessions;
  686. for (const oldSession of oldSessions) {
  687. const newSession = createEmptySession();
  688. newSession.topic = oldSession.topic;
  689. newSession.messages = [...oldSession.messages];
  690. newSession.mask.modelConfig.sendMemory = true;
  691. newSession.mask.modelConfig.historyMessageCount = 4;
  692. newSession.mask.modelConfig.compressMessageLengthThreshold = 1000;
  693. newState.sessions.push(newSession);
  694. }
  695. }
  696. if (version < 3) {
  697. // migrate id to nanoid
  698. newState.sessions.forEach((s) => {
  699. s.id = nanoid();
  700. s.messages.forEach((m) => (m.id = nanoid()));
  701. });
  702. }
  703. // Enable `enableInjectSystemPrompts` attribute for old sessions.
  704. // Resolve issue of old sessions not automatically enabling.
  705. if (version < 3.1) {
  706. newState.sessions.forEach((s) => {
  707. if (
  708. // Exclude those already set by user
  709. !s.mask.modelConfig.hasOwnProperty("enableInjectSystemPrompts")
  710. ) {
  711. // Because users may have changed this configuration,
  712. // the user's current configuration is used instead of the default
  713. const config = useAppConfig.getState();
  714. s.mask.modelConfig.enableInjectSystemPrompts =
  715. config.modelConfig.enableInjectSystemPrompts;
  716. }
  717. });
  718. }
  719. // add default summarize model for every session
  720. if (version < 3.2) {
  721. newState.sessions.forEach((s) => {
  722. const config = useAppConfig.getState();
  723. s.mask.modelConfig.compressModel = config.modelConfig.compressModel;
  724. s.mask.modelConfig.compressProviderName =
  725. config.modelConfig.compressProviderName;
  726. });
  727. }
  728. // revert default summarize model for every session
  729. if (version < 3.3) {
  730. newState.sessions.forEach((s) => {
  731. const config = useAppConfig.getState();
  732. s.mask.modelConfig.compressModel = "";
  733. s.mask.modelConfig.compressProviderName = "";
  734. });
  735. }
  736. return newState as any;
  737. },
  738. },
  739. );