chat.ts 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. import { trimTopic, getMessageTextContent } from "../utils";
  2. import Locale, { getLang } from "../locales";
  3. import { showToast } from "../components/ui-lib";
  4. import { ModelConfig, ModelType, useAppConfig } from "./config";
  5. import { createEmptyMask, Mask } from "./mask";
  6. import {
  7. DEFAULT_INPUT_TEMPLATE,
  8. DEFAULT_MODELS,
  9. DEFAULT_SYSTEM_TEMPLATE,
  10. KnowledgeCutOffDate,
  11. ModelProvider,
  12. StoreKey,
  13. SUMMARIZE_MODEL,
  14. GEMINI_SUMMARIZE_MODEL,
  15. } from "../constant";
  16. import { ClientApi, RequestMessage, MultimodalContent } from "../client/api";
  17. import { ChatControllerPool } from "../client/controller";
  18. import { prettyObject } from "../utils/format";
  19. import { estimateTokenLength } from "../utils/token";
  20. import { nanoid } from "nanoid";
  21. import { createPersistStore } from "../utils/store";
  22. export type ChatMessage = RequestMessage & {
  23. date: string;
  24. streaming?: boolean;
  25. isError?: boolean;
  26. id: string;
  27. model?: ModelType;
  28. };
  29. export function createMessage(override: Partial<ChatMessage>): ChatMessage {
  30. return {
  31. id: nanoid(),
  32. date: new Date().toLocaleString(),
  33. role: "user",
  34. content: "",
  35. ...override,
  36. };
  37. }
  38. export interface ChatStat {
  39. tokenCount: number;
  40. wordCount: number;
  41. charCount: number;
  42. }
  43. export interface ChatSession {
  44. id: string;
  45. topic: string;
  46. memoryPrompt: string;
  47. messages: ChatMessage[];
  48. stat: ChatStat;
  49. lastUpdate: number;
  50. lastSummarizeIndex: number;
  51. clearContextIndex?: number;
  52. mask: Mask;
  53. }
  54. export const DEFAULT_TOPIC = Locale.Store.DefaultTopic;
  55. export const BOT_HELLO: ChatMessage = createMessage({
  56. role: "assistant",
  57. content: Locale.Store.BotHello,
  58. });
  59. function createEmptySession(): ChatSession {
  60. return {
  61. id: nanoid(),
  62. topic: DEFAULT_TOPIC,
  63. memoryPrompt: "",
  64. messages: [],
  65. stat: {
  66. tokenCount: 0,
  67. wordCount: 0,
  68. charCount: 0,
  69. },
  70. lastUpdate: Date.now(),
  71. lastSummarizeIndex: 0,
  72. mask: createEmptyMask(),
  73. };
  74. }
  75. function getSummarizeModel(currentModel: string) {
  76. // if it is using gpt-* models, force to use 3.5 to summarize
  77. if (currentModel.startsWith("gpt")) {
  78. return SUMMARIZE_MODEL;
  79. }
  80. if (currentModel.startsWith("gemini-pro")) {
  81. return GEMINI_SUMMARIZE_MODEL;
  82. }
  83. return currentModel;
  84. }
  85. function countMessages(msgs: ChatMessage[]) {
  86. return msgs.reduce(
  87. (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
  88. 0,
  89. );
  90. }
  91. function fillTemplateWith(input: string, modelConfig: ModelConfig) {
  92. const cutoff =
  93. KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
  94. // Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
  95. const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
  96. var serviceProvider = "OpenAI";
  97. if (modelInfo) {
  98. // TODO: auto detect the providerName from the modelConfig.model
  99. // Directly use the providerName from the modelInfo
  100. serviceProvider = modelInfo.provider.providerName;
  101. }
  102. const vars = {
  103. ServiceProvider: serviceProvider,
  104. cutoff,
  105. model: modelConfig.model,
  106. time: new Date().toLocaleString(),
  107. lang: getLang(),
  108. input: input,
  109. };
  110. let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE;
  111. // must contains {{input}}
  112. const inputVar = "{{input}}";
  113. if (!output.includes(inputVar)) {
  114. output += "\n" + inputVar;
  115. }
  116. Object.entries(vars).forEach(([name, value]) => {
  117. const regex = new RegExp(`{{${name}}}`, "g");
  118. output = output.replace(regex, value.toString()); // Ensure value is a string
  119. });
  120. return output;
  121. }
  122. const DEFAULT_CHAT_STATE = {
  123. sessions: [createEmptySession()],
  124. currentSessionIndex: 0,
  125. };
  126. export const useChatStore = createPersistStore(
  127. DEFAULT_CHAT_STATE,
  128. (set, _get) => {
  129. function get() {
  130. return {
  131. ..._get(),
  132. ...methods,
  133. };
  134. }
  135. const methods = {
  136. clearSessions() {
  137. set(() => ({
  138. sessions: [createEmptySession()],
  139. currentSessionIndex: 0,
  140. }));
  141. },
  142. selectSession(index: number) {
  143. set({
  144. currentSessionIndex: index,
  145. });
  146. },
  147. moveSession(from: number, to: number) {
  148. set((state) => {
  149. const { sessions, currentSessionIndex: oldIndex } = state;
  150. // move the session
  151. const newSessions = [...sessions];
  152. const session = newSessions[from];
  153. newSessions.splice(from, 1);
  154. newSessions.splice(to, 0, session);
  155. // modify current session id
  156. let newIndex = oldIndex === from ? to : oldIndex;
  157. if (oldIndex > from && oldIndex <= to) {
  158. newIndex -= 1;
  159. } else if (oldIndex < from && oldIndex >= to) {
  160. newIndex += 1;
  161. }
  162. return {
  163. currentSessionIndex: newIndex,
  164. sessions: newSessions,
  165. };
  166. });
  167. },
  168. newSession(mask?: Mask) {
  169. const session = createEmptySession();
  170. if (mask) {
  171. const config = useAppConfig.getState();
  172. const globalModelConfig = config.modelConfig;
  173. session.mask = {
  174. ...mask,
  175. modelConfig: {
  176. ...globalModelConfig,
  177. ...mask.modelConfig,
  178. },
  179. };
  180. session.topic = mask.name;
  181. }
  182. set((state) => ({
  183. currentSessionIndex: 0,
  184. sessions: [session].concat(state.sessions),
  185. }));
  186. },
  187. nextSession(delta: number) {
  188. const n = get().sessions.length;
  189. const limit = (x: number) => (x + n) % n;
  190. const i = get().currentSessionIndex;
  191. get().selectSession(limit(i + delta));
  192. },
  193. deleteSession(index: number) {
  194. const deletingLastSession = get().sessions.length === 1;
  195. const deletedSession = get().sessions.at(index);
  196. if (!deletedSession) return;
  197. const sessions = get().sessions.slice();
  198. sessions.splice(index, 1);
  199. const currentIndex = get().currentSessionIndex;
  200. let nextIndex = Math.min(
  201. currentIndex - Number(index < currentIndex),
  202. sessions.length - 1,
  203. );
  204. if (deletingLastSession) {
  205. nextIndex = 0;
  206. sessions.push(createEmptySession());
  207. }
  208. // for undo delete action
  209. const restoreState = {
  210. currentSessionIndex: get().currentSessionIndex,
  211. sessions: get().sessions.slice(),
  212. };
  213. set(() => ({
  214. currentSessionIndex: nextIndex,
  215. sessions,
  216. }));
  217. showToast(
  218. Locale.Home.DeleteToast,
  219. {
  220. text: Locale.Home.Revert,
  221. onClick() {
  222. set(() => restoreState);
  223. },
  224. },
  225. 5000,
  226. );
  227. },
  228. currentSession() {
  229. let index = get().currentSessionIndex;
  230. const sessions = get().sessions;
  231. if (index < 0 || index >= sessions.length) {
  232. index = Math.min(sessions.length - 1, Math.max(0, index));
  233. set(() => ({ currentSessionIndex: index }));
  234. }
  235. const session = sessions[index];
  236. return session;
  237. },
  238. onNewMessage(message: ChatMessage) {
  239. get().updateCurrentSession((session) => {
  240. session.messages = session.messages.concat();
  241. session.lastUpdate = Date.now();
  242. });
  243. get().updateStat(message);
  244. // get().summarizeSession();
  245. },
  246. async onUserInput(content: string, attachImages?: string[]) {
  247. const session = get().currentSession();
  248. const modelConfig = session.mask.modelConfig;
  249. const userContent = fillTemplateWith(content, modelConfig);
  250. console.log("[User Input] after template: ", userContent);
  251. let mContent: string | MultimodalContent[] = userContent;
  252. if (attachImages && attachImages.length > 0) {
  253. mContent = [
  254. {
  255. type: "text",
  256. text: userContent,
  257. },
  258. ];
  259. mContent = mContent.concat(
  260. attachImages.map((url) => {
  261. return {
  262. type: "image_url",
  263. image_url: {
  264. url: url,
  265. },
  266. };
  267. }),
  268. );
  269. }
  270. let userMessage: ChatMessage = createMessage({
  271. role: "user",
  272. content: mContent,
  273. });
  274. const botMessage: ChatMessage = createMessage({
  275. role: "assistant",
  276. streaming: true,
  277. model: modelConfig.model,
  278. });
  279. // get recent messages
  280. const recentMessages = get().getMessagesWithMemory();
  281. const sendMessages = recentMessages.concat(userMessage);
  282. const messageIndex = get().currentSession().messages.length + 1;
  283. // save user's and bot's message
  284. get().updateCurrentSession((session) => {
  285. const savedUserMessage = {
  286. ...userMessage,
  287. content: mContent,
  288. };
  289. session.messages = session.messages.concat([
  290. savedUserMessage,
  291. botMessage,
  292. ]);
  293. });
  294. var api: ClientApi;
  295. if (modelConfig.model.startsWith("gemini")) {
  296. api = new ClientApi(ModelProvider.GeminiPro);
  297. } else if (modelConfig.model.startsWith("claude")) {
  298. api = new ClientApi(ModelProvider.Claude);
  299. } else {
  300. api = new ClientApi(ModelProvider.GPT);
  301. }
  302. // make request
  303. api.llm.chat({
  304. messages: sendMessages,
  305. config: { ...modelConfig, stream: true },
  306. onUpdate(message) {
  307. botMessage.streaming = true;
  308. if (message) {
  309. botMessage.content = message;
  310. }
  311. get().updateCurrentSession((session) => {
  312. session.messages = session.messages.concat();
  313. });
  314. },
  315. onFinish(message) {
  316. botMessage.streaming = false;
  317. if (message) {
  318. botMessage.content = message;
  319. get().onNewMessage(botMessage);
  320. }
  321. ChatControllerPool.remove(session.id, botMessage.id);
  322. },
  323. onError(error) {
  324. const isAborted = error.message.includes("aborted");
  325. botMessage.content +=
  326. "\n\n" +
  327. prettyObject({
  328. error: true,
  329. message: error.message,
  330. });
  331. botMessage.streaming = false;
  332. userMessage.isError = !isAborted;
  333. botMessage.isError = !isAborted;
  334. get().updateCurrentSession((session) => {
  335. session.messages = session.messages.concat();
  336. });
  337. ChatControllerPool.remove(
  338. session.id,
  339. botMessage.id ?? messageIndex,
  340. );
  341. console.error("[Chat] failed ", error);
  342. },
  343. onController(controller) {
  344. // collect controller for stop/retry
  345. ChatControllerPool.addController(
  346. session.id,
  347. botMessage.id ?? messageIndex,
  348. controller,
  349. );
  350. },
  351. });
  352. },
  353. getMemoryPrompt() {
  354. const session = get().currentSession();
  355. return {
  356. role: "system",
  357. content:
  358. session.memoryPrompt.length > 0
  359. ? Locale.Store.Prompt.History(session.memoryPrompt)
  360. : "",
  361. date: "",
  362. } as ChatMessage;
  363. },
  364. getMessagesWithMemory() {
  365. const session = get().currentSession();
  366. const modelConfig = session.mask.modelConfig;
  367. const clearContextIndex = session.clearContextIndex ?? 0;
  368. const messages = session.messages.slice();
  369. const totalMessageCount = session.messages.length;
  370. // in-context prompts
  371. const contextPrompts = session.mask.context.slice();
  372. // system prompts, to get close to OpenAI Web ChatGPT
  373. const shouldInjectSystemPrompts =
  374. modelConfig.enableInjectSystemPrompts &&
  375. session.mask.modelConfig.model.startsWith("gpt-");
  376. var systemPrompts: ChatMessage[] = [];
  377. systemPrompts = shouldInjectSystemPrompts
  378. ? [
  379. createMessage({
  380. role: "system",
  381. content: fillTemplateWith("", {
  382. ...modelConfig,
  383. template: DEFAULT_SYSTEM_TEMPLATE,
  384. }),
  385. }),
  386. ]
  387. : [];
  388. if (shouldInjectSystemPrompts) {
  389. console.log(
  390. "[Global System Prompt] ",
  391. systemPrompts.at(0)?.content ?? "empty",
  392. );
  393. }
  394. // long term memory
  395. const shouldSendLongTermMemory =
  396. modelConfig.sendMemory &&
  397. session.memoryPrompt &&
  398. session.memoryPrompt.length > 0 &&
  399. session.lastSummarizeIndex > clearContextIndex;
  400. const longTermMemoryPrompts = shouldSendLongTermMemory
  401. ? [get().getMemoryPrompt()]
  402. : [];
  403. const longTermMemoryStartIndex = session.lastSummarizeIndex;
  404. // short term memory
  405. const shortTermMemoryStartIndex = Math.max(
  406. 0,
  407. totalMessageCount - modelConfig.historyMessageCount,
  408. );
  409. // lets concat send messages, including 4 parts:
  410. // 0. system prompt: to get close to OpenAI Web ChatGPT
  411. // 1. long term memory: summarized memory messages
  412. // 2. pre-defined in-context prompts
  413. // 3. short term memory: latest n messages
  414. // 4. newest input message
  415. const memoryStartIndex = shouldSendLongTermMemory
  416. ? Math.min(longTermMemoryStartIndex, shortTermMemoryStartIndex)
  417. : shortTermMemoryStartIndex;
  418. // and if user has cleared history messages, we should exclude the memory too.
  419. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
  420. const maxTokenThreshold = modelConfig.max_tokens;
  421. // get recent messages as much as possible
  422. const reversedRecentMessages = [];
  423. for (
  424. let i = totalMessageCount - 1, tokenCount = 0;
  425. i >= contextStartIndex && tokenCount < maxTokenThreshold;
  426. i -= 1
  427. ) {
  428. const msg = messages[i];
  429. if (!msg || msg.isError) continue;
  430. tokenCount += estimateTokenLength(getMessageTextContent(msg));
  431. reversedRecentMessages.push(msg);
  432. }
  433. // concat all messages
  434. const recentMessages = [
  435. ...systemPrompts,
  436. ...longTermMemoryPrompts,
  437. ...contextPrompts,
  438. ...reversedRecentMessages.reverse(),
  439. ];
  440. return recentMessages;
  441. },
  442. updateMessage(
  443. sessionIndex: number,
  444. messageIndex: number,
  445. updater: (message?: ChatMessage) => void,
  446. ) {
  447. const sessions = get().sessions;
  448. const session = sessions.at(sessionIndex);
  449. const messages = session?.messages;
  450. updater(messages?.at(messageIndex));
  451. set(() => ({ sessions }));
  452. },
  453. resetSession() {
  454. get().updateCurrentSession((session) => {
  455. session.messages = [];
  456. session.memoryPrompt = "";
  457. });
  458. },
  459. summarizeSession() {
  460. const config = useAppConfig.getState();
  461. const session = get().currentSession();
  462. const modelConfig = session.mask.modelConfig;
  463. var api: ClientApi;
  464. if (modelConfig.model.startsWith("gemini")) {
  465. api = new ClientApi(ModelProvider.GeminiPro);
  466. } else if (modelConfig.model.startsWith("claude")) {
  467. api = new ClientApi(ModelProvider.Claude);
  468. } else {
  469. api = new ClientApi(ModelProvider.GPT);
  470. }
  471. // remove error messages if any
  472. const messages = session.messages;
  473. // should summarize topic after chating more than 50 words
  474. const SUMMARIZE_MIN_LEN = 50;
  475. if (
  476. config.enableAutoGenerateTitle &&
  477. session.topic === DEFAULT_TOPIC &&
  478. countMessages(messages) >= SUMMARIZE_MIN_LEN
  479. ) {
  480. const topicMessages = messages.concat(
  481. createMessage({
  482. role: "user",
  483. content: Locale.Store.Prompt.Topic,
  484. }),
  485. );
  486. api.llm.chat({
  487. messages: topicMessages,
  488. config: {
  489. model: getSummarizeModel(session.mask.modelConfig.model),
  490. stream: false,
  491. },
  492. onFinish(message) {
  493. get().updateCurrentSession(
  494. (session) =>
  495. (session.topic =
  496. message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
  497. );
  498. },
  499. });
  500. }
  501. const summarizeIndex = Math.max(
  502. session.lastSummarizeIndex,
  503. session.clearContextIndex ?? 0,
  504. );
  505. let toBeSummarizedMsgs = messages
  506. .filter((msg) => !msg.isError)
  507. .slice(summarizeIndex);
  508. const historyMsgLength = countMessages(toBeSummarizedMsgs);
  509. if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
  510. const n = toBeSummarizedMsgs.length;
  511. toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
  512. Math.max(0, n - modelConfig.historyMessageCount),
  513. );
  514. }
  515. // add memory prompt
  516. toBeSummarizedMsgs.unshift(get().getMemoryPrompt());
  517. const lastSummarizeIndex = session.messages.length;
  518. console.log(
  519. "[Chat History] ",
  520. toBeSummarizedMsgs,
  521. historyMsgLength,
  522. modelConfig.compressMessageLengthThreshold,
  523. );
  524. if (
  525. historyMsgLength > modelConfig.compressMessageLengthThreshold &&
  526. modelConfig.sendMemory
  527. ) {
  528. /** Destruct max_tokens while summarizing
  529. * this param is just shit
  530. **/
  531. const { max_tokens, ...modelcfg } = modelConfig;
  532. api.llm.chat({
  533. messages: toBeSummarizedMsgs.concat(
  534. createMessage({
  535. role: "system",
  536. content: Locale.Store.Prompt.Summarize,
  537. date: "",
  538. }),
  539. ),
  540. config: {
  541. ...modelcfg,
  542. stream: true,
  543. model: getSummarizeModel(session.mask.modelConfig.model),
  544. },
  545. onUpdate(message) {
  546. session.memoryPrompt = message;
  547. },
  548. onFinish(message) {
  549. console.log("[Memory] ", message);
  550. get().updateCurrentSession((session) => {
  551. session.lastSummarizeIndex = lastSummarizeIndex;
  552. session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
  553. });
  554. },
  555. onError(err) {
  556. console.error("[Summarize] ", err);
  557. },
  558. });
  559. }
  560. },
  561. updateStat(message: ChatMessage) {
  562. get().updateCurrentSession((session) => {
  563. session.stat.charCount += message.content.length;
  564. // TODO: should update chat count and word count
  565. });
  566. },
  567. updateCurrentSession(updater: (session: ChatSession) => void) {
  568. const sessions = get().sessions;
  569. const index = get().currentSessionIndex;
  570. updater(sessions[index]);
  571. set(() => ({ sessions }));
  572. },
  573. clearAllData() {
  574. localStorage.clear();
  575. location.reload();
  576. },
  577. };
  578. return methods;
  579. },
  580. {
  581. name: StoreKey.Chat,
  582. version: 3.1,
  583. migrate(persistedState, version) {
  584. const state = persistedState as any;
  585. const newState = JSON.parse(
  586. JSON.stringify(state),
  587. ) as typeof DEFAULT_CHAT_STATE;
  588. if (version < 2) {
  589. newState.sessions = [];
  590. const oldSessions = state.sessions;
  591. for (const oldSession of oldSessions) {
  592. const newSession = createEmptySession();
  593. newSession.topic = oldSession.topic;
  594. newSession.messages = [...oldSession.messages];
  595. newSession.mask.modelConfig.sendMemory = true;
  596. newSession.mask.modelConfig.historyMessageCount = 4;
  597. newSession.mask.modelConfig.compressMessageLengthThreshold = 1000;
  598. newState.sessions.push(newSession);
  599. }
  600. }
  601. if (version < 3) {
  602. // migrate id to nanoid
  603. newState.sessions.forEach((s) => {
  604. s.id = nanoid();
  605. s.messages.forEach((m) => (m.id = nanoid()));
  606. });
  607. }
  608. // Enable `enableInjectSystemPrompts` attribute for old sessions.
  609. // Resolve issue of old sessions not automatically enabling.
  610. if (version < 3.1) {
  611. newState.sessions.forEach((s) => {
  612. if (
  613. // Exclude those already set by user
  614. !s.mask.modelConfig.hasOwnProperty("enableInjectSystemPrompts")
  615. ) {
  616. // Because users may have changed this configuration,
  617. // the user's current configuration is used instead of the default
  618. const config = useAppConfig.getState();
  619. s.mask.modelConfig.enableInjectSystemPrompts =
  620. config.modelConfig.enableInjectSystemPrompts;
  621. }
  622. });
  623. }
  624. return newState as any;
  625. },
  626. },
  627. );