chat.ts 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707
  1. import { trimTopic, getMessageTextContent } from "../utils";
  2. import Locale, { getLang } from "../locales";
  3. import { showToast } from "../components/ui-lib";
  4. import { ModelConfig, ModelType, useAppConfig } from "./config";
  5. import { createEmptyMask, Mask } from "./mask";
  6. import {
  7. DEFAULT_INPUT_TEMPLATE,
  8. DEFAULT_MODELS,
  9. DEFAULT_SYSTEM_TEMPLATE,
  10. KnowledgeCutOffDate,
  11. ModelProvider,
  12. StoreKey,
  13. SUMMARIZE_MODEL,
  14. GEMINI_SUMMARIZE_MODEL,
  15. } from "../constant";
  16. import { ClientApi, RequestMessage, MultimodalContent } from "../client/api";
  17. import { ChatControllerPool } from "../client/controller";
  18. import { prettyObject } from "../utils/format";
  19. import { estimateTokenLength } from "../utils/token";
  20. import { nanoid } from "nanoid";
  21. import { createPersistStore } from "../utils/store";
  22. export type ChatMessage = RequestMessage & {
  23. date: string;
  24. streaming?: boolean;
  25. isError?: boolean;
  26. id: string;
  27. model?: ModelType;
  28. };
  29. export function createMessage(override: Partial<ChatMessage>): ChatMessage {
  30. return {
  31. id: nanoid(),
  32. date: new Date().toLocaleString(),
  33. role: "user",
  34. content: "",
  35. ...override,
  36. };
  37. }
  38. export interface ChatStat {
  39. tokenCount: number;
  40. wordCount: number;
  41. charCount: number;
  42. }
  43. export interface ChatSession {
  44. id: string;
  45. topic: string;
  46. memoryPrompt: string;
  47. messages: ChatMessage[];
  48. stat: ChatStat;
  49. lastUpdate: number;
  50. lastSummarizeIndex: number;
  51. clearContextIndex?: number;
  52. mask: Mask;
  53. }
  54. export const DEFAULT_TOPIC = Locale.Store.DefaultTopic;
  55. export const BOT_HELLO: ChatMessage = createMessage({
  56. role: "assistant",
  57. content: Locale.Store.BotHello,
  58. });
  59. function createEmptySession(): ChatSession {
  60. return {
  61. id: nanoid(),
  62. topic: DEFAULT_TOPIC,
  63. memoryPrompt: "",
  64. messages: [],
  65. stat: {
  66. tokenCount: 0,
  67. wordCount: 0,
  68. charCount: 0,
  69. },
  70. lastUpdate: Date.now(),
  71. lastSummarizeIndex: 0,
  72. mask: createEmptyMask(),
  73. };
  74. }
  75. function getSummarizeModel(currentModel: string) {
  76. // if it is using gpt-* models, force to use 3.5 to summarize
  77. if (currentModel.startsWith("gpt")) {
  78. return SUMMARIZE_MODEL;
  79. }
  80. if (currentModel.startsWith("gemini-pro")) {
  81. return GEMINI_SUMMARIZE_MODEL;
  82. }
  83. return currentModel;
  84. }
  85. function countMessages(msgs: ChatMessage[]) {
  86. return msgs.reduce(
  87. (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
  88. 0,
  89. );
  90. }
  91. function fillTemplateWith(input: string, modelConfig: ModelConfig) {
  92. const cutoff =
  93. KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
  94. // Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
  95. const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
  96. var serviceProvider = "OpenAI";
  97. if (modelInfo) {
  98. // TODO: auto detect the providerName from the modelConfig.model
  99. // Directly use the providerName from the modelInfo
  100. serviceProvider = modelInfo.provider.providerName;
  101. }
  102. const vars = {
  103. ServiceProvider: serviceProvider,
  104. cutoff,
  105. model: modelConfig.model,
  106. time: new Date().toLocaleString(),
  107. lang: getLang(),
  108. input: input,
  109. };
  110. let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE;
  111. // must contains {{input}}
  112. const inputVar = "{{input}}";
  113. if (!output.includes(inputVar)) {
  114. output += "\n" + inputVar;
  115. }
  116. Object.entries(vars).forEach(([name, value]) => {
  117. const regex = new RegExp(`{{${name}}}`, "g");
  118. output = output.replace(regex, value.toString()); // Ensure value is a string
  119. });
  120. return output;
  121. }
  122. const DEFAULT_CHAT_STATE = {
  123. sessions: [createEmptySession()],
  124. currentSessionIndex: 0,
  125. };
  126. export const useChatStore = createPersistStore(
  127. DEFAULT_CHAT_STATE,
  128. (set, _get) => {
  129. function get() {
  130. return {
  131. ..._get(),
  132. ...methods,
  133. };
  134. }
  135. const methods = {
  136. clearSessions() {
  137. set(() => ({
  138. sessions: [createEmptySession()],
  139. currentSessionIndex: 0,
  140. }));
  141. },
  142. selectSession(index: number) {
  143. set({
  144. currentSessionIndex: index,
  145. });
  146. },
  147. moveSession(from: number, to: number) {
  148. set((state) => {
  149. const { sessions, currentSessionIndex: oldIndex } = state;
  150. // move the session
  151. const newSessions = [...sessions];
  152. const session = newSessions[from];
  153. newSessions.splice(from, 1);
  154. newSessions.splice(to, 0, session);
  155. // modify current session id
  156. let newIndex = oldIndex === from ? to : oldIndex;
  157. if (oldIndex > from && oldIndex <= to) {
  158. newIndex -= 1;
  159. } else if (oldIndex < from && oldIndex >= to) {
  160. newIndex += 1;
  161. }
  162. return {
  163. currentSessionIndex: newIndex,
  164. sessions: newSessions,
  165. };
  166. });
  167. },
  168. newSession(mask?: Mask) {
  169. const session = createEmptySession();
  170. if (mask) {
  171. const config = useAppConfig.getState();
  172. const globalModelConfig = config.modelConfig;
  173. session.mask = {
  174. ...mask,
  175. modelConfig: {
  176. ...globalModelConfig,
  177. ...mask.modelConfig,
  178. },
  179. };
  180. session.topic = mask.name;
  181. }
  182. set((state) => ({
  183. currentSessionIndex: 0,
  184. sessions: [session].concat(state.sessions),
  185. }));
  186. },
  187. nextSession(delta: number) {
  188. const n = get().sessions.length;
  189. const limit = (x: number) => (x + n) % n;
  190. const i = get().currentSessionIndex;
  191. get().selectSession(limit(i + delta));
  192. },
  193. deleteSession(index: number) {
  194. const deletingLastSession = get().sessions.length === 1;
  195. const deletedSession = get().sessions.at(index);
  196. if (!deletedSession) return;
  197. const sessions = get().sessions.slice();
  198. sessions.splice(index, 1);
  199. const currentIndex = get().currentSessionIndex;
  200. let nextIndex = Math.min(
  201. currentIndex - Number(index < currentIndex),
  202. sessions.length - 1,
  203. );
  204. if (deletingLastSession) {
  205. nextIndex = 0;
  206. sessions.push(createEmptySession());
  207. }
  208. // for undo delete action
  209. const restoreState = {
  210. currentSessionIndex: get().currentSessionIndex,
  211. sessions: get().sessions.slice(),
  212. };
  213. set(() => ({
  214. currentSessionIndex: nextIndex,
  215. sessions,
  216. }));
  217. showToast(
  218. Locale.Home.DeleteToast,
  219. {
  220. text: Locale.Home.Revert,
  221. onClick() {
  222. set(() => restoreState);
  223. },
  224. },
  225. 5000,
  226. );
  227. },
  228. currentSession() {
  229. let index = get().currentSessionIndex;
  230. const sessions = get().sessions;
  231. if (index < 0 || index >= sessions.length) {
  232. index = Math.min(sessions.length - 1, Math.max(0, index));
  233. set(() => ({ currentSessionIndex: index }));
  234. }
  235. const session = sessions[index];
  236. return session;
  237. },
  238. onNewMessage(message: ChatMessage) {
  239. get().updateCurrentSession((session) => {
  240. session.messages = session.messages.concat();
  241. session.lastUpdate = Date.now();
  242. });
  243. get().updateStat(message);
  244. get().summarizeSession();
  245. },
  246. async onUserInput(content: string, attachImages?: string[]) {
  247. const session = get().currentSession();
  248. const modelConfig = session.mask.modelConfig;
  249. const userContent = fillTemplateWith(content, modelConfig);
  250. console.log("[User Input] after template: ", userContent);
  251. let mContent: string | MultimodalContent[] = userContent;
  252. if (attachImages && attachImages.length > 0) {
  253. mContent = [
  254. {
  255. type: "text",
  256. text: userContent,
  257. },
  258. ];
  259. mContent = mContent.concat(
  260. attachImages.map((url) => {
  261. return {
  262. type: "image_url",
  263. image_url: {
  264. url: url,
  265. },
  266. };
  267. }),
  268. );
  269. }
  270. let userMessage: ChatMessage = createMessage({
  271. role: "user",
  272. content: mContent,
  273. });
  274. const botMessage: ChatMessage = createMessage({
  275. role: "assistant",
  276. streaming: true,
  277. model: modelConfig.model,
  278. });
  279. // get recent messages
  280. const recentMessages = get().getMessagesWithMemory();
  281. const sendMessages = recentMessages.concat(userMessage);
  282. const messageIndex = get().currentSession().messages.length + 1;
  283. // save user's and bot's message
  284. get().updateCurrentSession((session) => {
  285. const savedUserMessage = {
  286. ...userMessage,
  287. content: mContent,
  288. };
  289. session.messages = session.messages.concat([
  290. savedUserMessage,
  291. botMessage,
  292. ]);
  293. });
  294. var api: ClientApi;
  295. if (modelConfig.model.startsWith("gemini")) {
  296. api = new ClientApi(ModelProvider.GeminiPro);
  297. } else {
  298. api = new ClientApi(ModelProvider.GPT);
  299. }
  300. // make request
  301. api.llm.chat({
  302. messages: sendMessages,
  303. config: { ...modelConfig, stream: true },
  304. onUpdate(message) {
  305. botMessage.streaming = true;
  306. if (message) {
  307. botMessage.content = message;
  308. }
  309. get().updateCurrentSession((session) => {
  310. session.messages = session.messages.concat();
  311. });
  312. },
  313. onFinish(message) {
  314. botMessage.streaming = false;
  315. if (message) {
  316. botMessage.content = message;
  317. get().onNewMessage(botMessage);
  318. }
  319. ChatControllerPool.remove(session.id, botMessage.id);
  320. },
  321. onError(error) {
  322. const isAborted = error.message.includes("aborted");
  323. botMessage.content +=
  324. "\n\n" +
  325. prettyObject({
  326. error: true,
  327. message: error.message,
  328. });
  329. botMessage.streaming = false;
  330. userMessage.isError = !isAborted;
  331. botMessage.isError = !isAborted;
  332. get().updateCurrentSession((session) => {
  333. session.messages = session.messages.concat();
  334. });
  335. ChatControllerPool.remove(
  336. session.id,
  337. botMessage.id ?? messageIndex,
  338. );
  339. console.error("[Chat] failed ", error);
  340. },
  341. onController(controller) {
  342. // collect controller for stop/retry
  343. ChatControllerPool.addController(
  344. session.id,
  345. botMessage.id ?? messageIndex,
  346. controller,
  347. );
  348. },
  349. });
  350. },
  351. getMemoryPrompt() {
  352. const session = get().currentSession();
  353. return {
  354. role: "system",
  355. content:
  356. session.memoryPrompt.length > 0
  357. ? Locale.Store.Prompt.History(session.memoryPrompt)
  358. : "",
  359. date: "",
  360. } as ChatMessage;
  361. },
  362. getMessagesWithMemory() {
  363. const session = get().currentSession();
  364. const modelConfig = session.mask.modelConfig;
  365. const clearContextIndex = session.clearContextIndex ?? 0;
  366. const messages = session.messages.slice();
  367. const totalMessageCount = session.messages.length;
  368. // in-context prompts
  369. const contextPrompts = session.mask.context.slice();
  370. // system prompts, to get close to OpenAI Web ChatGPT
  371. const shouldInjectSystemPrompts =
  372. modelConfig.enableInjectSystemPrompts &&
  373. session.mask.modelConfig.model.startsWith("gpt-");
  374. var systemPrompts: ChatMessage[] = [];
  375. systemPrompts = shouldInjectSystemPrompts
  376. ? [
  377. createMessage({
  378. role: "system",
  379. content: fillTemplateWith("", {
  380. ...modelConfig,
  381. template: DEFAULT_SYSTEM_TEMPLATE,
  382. }),
  383. }),
  384. ]
  385. : [];
  386. if (shouldInjectSystemPrompts) {
  387. console.log(
  388. "[Global System Prompt] ",
  389. systemPrompts.at(0)?.content ?? "empty",
  390. );
  391. }
  392. // long term memory
  393. const shouldSendLongTermMemory =
  394. modelConfig.sendMemory &&
  395. session.memoryPrompt &&
  396. session.memoryPrompt.length > 0 &&
  397. session.lastSummarizeIndex > clearContextIndex;
  398. const longTermMemoryPrompts = shouldSendLongTermMemory
  399. ? [get().getMemoryPrompt()]
  400. : [];
  401. const longTermMemoryStartIndex = session.lastSummarizeIndex;
  402. // short term memory
  403. const shortTermMemoryStartIndex = Math.max(
  404. 0,
  405. totalMessageCount - modelConfig.historyMessageCount,
  406. );
  407. // lets concat send messages, including 4 parts:
  408. // 0. system prompt: to get close to OpenAI Web ChatGPT
  409. // 1. long term memory: summarized memory messages
  410. // 2. pre-defined in-context prompts
  411. // 3. short term memory: latest n messages
  412. // 4. newest input message
  413. const memoryStartIndex = shouldSendLongTermMemory
  414. ? Math.min(longTermMemoryStartIndex, shortTermMemoryStartIndex)
  415. : shortTermMemoryStartIndex;
  416. // and if user has cleared history messages, we should exclude the memory too.
  417. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
  418. const maxTokenThreshold = modelConfig.max_tokens;
  419. // get recent messages as much as possible
  420. const reversedRecentMessages = [];
  421. for (
  422. let i = totalMessageCount - 1, tokenCount = 0;
  423. i >= contextStartIndex && tokenCount < maxTokenThreshold;
  424. i -= 1
  425. ) {
  426. const msg = messages[i];
  427. if (!msg || msg.isError) continue;
  428. tokenCount += estimateTokenLength(getMessageTextContent(msg));
  429. reversedRecentMessages.push(msg);
  430. }
  431. // concat all messages
  432. const recentMessages = [
  433. ...systemPrompts,
  434. ...longTermMemoryPrompts,
  435. ...contextPrompts,
  436. ...reversedRecentMessages.reverse(),
  437. ];
  438. return recentMessages;
  439. },
  440. updateMessage(
  441. sessionIndex: number,
  442. messageIndex: number,
  443. updater: (message?: ChatMessage) => void,
  444. ) {
  445. const sessions = get().sessions;
  446. const session = sessions.at(sessionIndex);
  447. const messages = session?.messages;
  448. updater(messages?.at(messageIndex));
  449. set(() => ({ sessions }));
  450. },
  451. resetSession() {
  452. get().updateCurrentSession((session) => {
  453. session.messages = [];
  454. session.memoryPrompt = "";
  455. });
  456. },
  457. summarizeSession() {
  458. const config = useAppConfig.getState();
  459. const session = get().currentSession();
  460. const modelConfig = session.mask.modelConfig;
  461. var api: ClientApi;
  462. if (modelConfig.model.startsWith("gemini")) {
  463. api = new ClientApi(ModelProvider.GeminiPro);
  464. } else {
  465. api = new ClientApi(ModelProvider.GPT);
  466. }
  467. // remove error messages if any
  468. const messages = session.messages;
  469. // should summarize topic after chating more than 50 words
  470. const SUMMARIZE_MIN_LEN = 50;
  471. if (
  472. config.enableAutoGenerateTitle &&
  473. session.topic === DEFAULT_TOPIC &&
  474. countMessages(messages) >= SUMMARIZE_MIN_LEN
  475. ) {
  476. const topicMessages = messages.concat(
  477. createMessage({
  478. role: "user",
  479. content: Locale.Store.Prompt.Topic,
  480. }),
  481. );
  482. api.llm.chat({
  483. messages: topicMessages,
  484. config: {
  485. model: getSummarizeModel(session.mask.modelConfig.model),
  486. },
  487. onFinish(message) {
  488. get().updateCurrentSession(
  489. (session) =>
  490. (session.topic =
  491. message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
  492. );
  493. },
  494. });
  495. }
  496. const summarizeIndex = Math.max(
  497. session.lastSummarizeIndex,
  498. session.clearContextIndex ?? 0,
  499. );
  500. let toBeSummarizedMsgs = messages
  501. .filter((msg) => !msg.isError)
  502. .slice(summarizeIndex);
  503. const historyMsgLength = countMessages(toBeSummarizedMsgs);
  504. if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
  505. const n = toBeSummarizedMsgs.length;
  506. toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
  507. Math.max(0, n - modelConfig.historyMessageCount),
  508. );
  509. }
  510. // add memory prompt
  511. toBeSummarizedMsgs.unshift(get().getMemoryPrompt());
  512. const lastSummarizeIndex = session.messages.length;
  513. console.log(
  514. "[Chat History] ",
  515. toBeSummarizedMsgs,
  516. historyMsgLength,
  517. modelConfig.compressMessageLengthThreshold,
  518. );
  519. if (
  520. historyMsgLength > modelConfig.compressMessageLengthThreshold &&
  521. modelConfig.sendMemory
  522. ) {
  523. api.llm.chat({
  524. messages: toBeSummarizedMsgs.concat(
  525. createMessage({
  526. role: "system",
  527. content: Locale.Store.Prompt.Summarize,
  528. date: "",
  529. }),
  530. ),
  531. config: {
  532. ...modelConfig,
  533. stream: true,
  534. model: getSummarizeModel(session.mask.modelConfig.model),
  535. },
  536. onUpdate(message) {
  537. session.memoryPrompt = message;
  538. },
  539. onFinish(message) {
  540. console.log("[Memory] ", message);
  541. get().updateCurrentSession((session) => {
  542. session.lastSummarizeIndex = lastSummarizeIndex;
  543. session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
  544. });
  545. },
  546. onError(err) {
  547. console.error("[Summarize] ", err);
  548. },
  549. });
  550. }
  551. },
  552. updateStat(message: ChatMessage) {
  553. get().updateCurrentSession((session) => {
  554. session.stat.charCount += message.content.length;
  555. // TODO: should update chat count and word count
  556. });
  557. },
  558. updateCurrentSession(updater: (session: ChatSession) => void) {
  559. const sessions = get().sessions;
  560. const index = get().currentSessionIndex;
  561. updater(sessions[index]);
  562. set(() => ({ sessions }));
  563. },
  564. clearAllData() {
  565. localStorage.clear();
  566. location.reload();
  567. },
  568. };
  569. return methods;
  570. },
  571. {
  572. name: StoreKey.Chat,
  573. version: 3.1,
  574. migrate(persistedState, version) {
  575. const state = persistedState as any;
  576. const newState = JSON.parse(
  577. JSON.stringify(state),
  578. ) as typeof DEFAULT_CHAT_STATE;
  579. if (version < 2) {
  580. newState.sessions = [];
  581. const oldSessions = state.sessions;
  582. for (const oldSession of oldSessions) {
  583. const newSession = createEmptySession();
  584. newSession.topic = oldSession.topic;
  585. newSession.messages = [...oldSession.messages];
  586. newSession.mask.modelConfig.sendMemory = true;
  587. newSession.mask.modelConfig.historyMessageCount = 4;
  588. newSession.mask.modelConfig.compressMessageLengthThreshold = 1000;
  589. newState.sessions.push(newSession);
  590. }
  591. }
  592. if (version < 3) {
  593. // migrate id to nanoid
  594. newState.sessions.forEach((s) => {
  595. s.id = nanoid();
  596. s.messages.forEach((m) => (m.id = nanoid()));
  597. });
  598. }
  599. // Enable `enableInjectSystemPrompts` attribute for old sessions.
  600. // Resolve issue of old sessions not automatically enabling.
  601. if (version < 3.1) {
  602. newState.sessions.forEach((s) => {
  603. if (
  604. // Exclude those already set by user
  605. !s.mask.modelConfig.hasOwnProperty("enableInjectSystemPrompts")
  606. ) {
  607. // Because users may have changed this configuration,
  608. // the user's current configuration is used instead of the default
  609. const config = useAppConfig.getState();
  610. s.mask.modelConfig.enableInjectSystemPrompts =
  611. config.modelConfig.enableInjectSystemPrompts;
  612. }
  613. });
  614. }
  615. return newState as any;
  616. },
  617. },
  618. );