config.ts 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. import { LLMModel } from "../client/api";
  2. import { getClientConfig } from "../config/client";
  3. import {
  4. DEFAULT_INPUT_TEMPLATE,
  5. DEFAULT_MODELS,
  6. DEFAULT_SIDEBAR_WIDTH,
  7. StoreKey,
  8. ServiceProvider,
  9. } from "../constant";
  10. import { createPersistStore } from "../utils/store";
  11. export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
  12. export enum SubmitKey {
  13. Enter = "Enter",
  14. CtrlEnter = "Ctrl + Enter",
  15. ShiftEnter = "Shift + Enter",
  16. AltEnter = "Alt + Enter",
  17. MetaEnter = "Meta + Enter",
  18. }
  19. export enum Theme {
  20. Auto = "auto",
  21. Dark = "dark",
  22. Light = "light",
  23. }
  24. const config = getClientConfig();
  25. export const DEFAULT_CONFIG = {
  26. lastUpdate: Date.now(), // timestamp, to merge state
  27. submitKey: SubmitKey.Enter,
  28. avatar: "1f603",
  29. fontSize: 14,
  30. fontFamily: "",
  31. theme: Theme.Auto as Theme,
  32. tightBorder: !!config?.isApp,
  33. sendPreviewBubble: true,
  34. enableAutoGenerateTitle: true,
  35. sidebarWidth: DEFAULT_SIDEBAR_WIDTH,
  36. disablePromptHint: false,
  37. dontShowMaskSplashScreen: false, // dont show splash screen when create chat
  38. hideBuiltinMasks: false, // dont add builtin masks
  39. customModels: "",
  40. models: DEFAULT_MODELS as any as LLMModel[],
  41. modelConfig: {
  42. model: "gpt-3.5-turbo" as ModelType,
  43. providerName: "OpenAI" as ServiceProvider,
  44. temperature: 0.5,
  45. top_p: 1,
  46. max_tokens: 4000,
  47. presence_penalty: 0,
  48. frequency_penalty: 0,
  49. sendMemory: true,
  50. historyMessageCount: 4,
  51. compressMessageLengthThreshold: 1000,
  52. enableInjectSystemPrompts: true,
  53. template: config?.template ?? DEFAULT_INPUT_TEMPLATE,
  54. },
  55. };
  56. export type ChatConfig = typeof DEFAULT_CONFIG;
  57. export type ModelConfig = ChatConfig["modelConfig"];
  58. export function limitNumber(
  59. x: number,
  60. min: number,
  61. max: number,
  62. defaultValue: number,
  63. ) {
  64. if (isNaN(x)) {
  65. return defaultValue;
  66. }
  67. return Math.min(max, Math.max(min, x));
  68. }
  69. export const ModalConfigValidator = {
  70. model(x: string) {
  71. return x as ModelType;
  72. },
  73. max_tokens(x: number) {
  74. return limitNumber(x, 0, 512000, 1024);
  75. },
  76. presence_penalty(x: number) {
  77. return limitNumber(x, -2, 2, 0);
  78. },
  79. frequency_penalty(x: number) {
  80. return limitNumber(x, -2, 2, 0);
  81. },
  82. temperature(x: number) {
  83. return limitNumber(x, 0, 2, 1);
  84. },
  85. top_p(x: number) {
  86. return limitNumber(x, 0, 1, 1);
  87. },
  88. };
  89. export const useAppConfig = createPersistStore(
  90. { ...DEFAULT_CONFIG },
  91. (set, get) => ({
  92. reset() {
  93. set(() => ({ ...DEFAULT_CONFIG }));
  94. },
  95. mergeModels(newModels: LLMModel[]) {
  96. if (!newModels || newModels.length === 0) {
  97. return;
  98. }
  99. const oldModels = get().models;
  100. const modelMap: Record<string, LLMModel> = {};
  101. for (const model of oldModels) {
  102. model.available = false;
  103. modelMap[`${model.name}@${model?.provider?.id}`] = model;
  104. }
  105. for (const model of newModels) {
  106. model.available = true;
  107. modelMap[`${model.name}@${model?.provider?.id}`] = model;
  108. }
  109. set(() => ({
  110. models: Object.values(modelMap),
  111. }));
  112. },
  113. allModels() {},
  114. }),
  115. {
  116. name: StoreKey.Config,
  117. version: 3.9,
  118. migrate(persistedState, version) {
  119. const state = persistedState as ChatConfig;
  120. if (version < 3.4) {
  121. state.modelConfig.sendMemory = true;
  122. state.modelConfig.historyMessageCount = 4;
  123. state.modelConfig.compressMessageLengthThreshold = 1000;
  124. state.modelConfig.frequency_penalty = 0;
  125. state.modelConfig.top_p = 1;
  126. state.modelConfig.template = DEFAULT_INPUT_TEMPLATE;
  127. state.dontShowMaskSplashScreen = false;
  128. state.hideBuiltinMasks = false;
  129. }
  130. if (version < 3.5) {
  131. state.customModels = "claude,claude-100k";
  132. }
  133. if (version < 3.6) {
  134. state.modelConfig.enableInjectSystemPrompts = true;
  135. }
  136. if (version < 3.7) {
  137. state.enableAutoGenerateTitle = true;
  138. }
  139. if (version < 3.8) {
  140. state.lastUpdate = Date.now();
  141. }
  142. if (version < 3.9) {
  143. state.modelConfig.template =
  144. state.modelConfig.template !== DEFAULT_INPUT_TEMPLATE
  145. ? state.modelConfig.template
  146. : config?.template ?? DEFAULT_INPUT_TEMPLATE;
  147. }
  148. return state as any;
  149. },
  150. },
  151. );