config.ts 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. import { LLMModel } from "../client/api";
  2. import { DalleSize } from "../typing";
  3. import { getClientConfig } from "../config/client";
  4. import {
  5. DEFAULT_INPUT_TEMPLATE,
  6. DEFAULT_MODELS,
  7. DEFAULT_SIDEBAR_WIDTH,
  8. StoreKey,
  9. ServiceProvider,
  10. } from "../constant";
  11. import { createPersistStore } from "../utils/store";
  12. export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
  13. export enum SubmitKey {
  14. Enter = "Enter",
  15. CtrlEnter = "Ctrl + Enter",
  16. ShiftEnter = "Shift + Enter",
  17. AltEnter = "Alt + Enter",
  18. MetaEnter = "Meta + Enter",
  19. }
  20. export enum Theme {
  21. Auto = "auto",
  22. Dark = "dark",
  23. Light = "light",
  24. }
  25. const config = getClientConfig();
  26. export const DEFAULT_CONFIG = {
  27. lastUpdate: Date.now(), // timestamp, to merge state
  28. submitKey: SubmitKey.Enter,
  29. avatar: "1f603",
  30. fontSize: 14,
  31. fontFamily: "",
  32. theme: Theme.Auto as Theme,
  33. tightBorder: true,
  34. sendPreviewBubble: true,
  35. enableAutoGenerateTitle: true,
  36. sidebarWidth: DEFAULT_SIDEBAR_WIDTH,
  37. disablePromptHint: false,
  38. dontShowMaskSplashScreen: false, // dont show splash screen when create chat
  39. hideBuiltinMasks: false, // dont add builtin masks
  40. customModels: "",
  41. models: DEFAULT_MODELS as any as LLMModel[],
  42. modelConfig: {
  43. model: "gpt-3.5-turbo" as ModelType,
  44. providerName: "OpenAI" as ServiceProvider,
  45. temperature: 0.5,
  46. top_p: 1,
  47. max_tokens: 4000,
  48. presence_penalty: 0,
  49. frequency_penalty: 0,
  50. sendMemory: true,
  51. historyMessageCount: 4,
  52. compressMessageLengthThreshold: 1000,
  53. enableInjectSystemPrompts: true,
  54. template: config?.template ?? DEFAULT_INPUT_TEMPLATE,
  55. size: "1024x1024" as DalleSize,
  56. },
  57. };
  58. export type ChatConfig = typeof DEFAULT_CONFIG;
  59. export type ModelConfig = ChatConfig["modelConfig"];
  60. export function limitNumber(
  61. x: number,
  62. min: number,
  63. max: number,
  64. defaultValue: number,
  65. ) {
  66. if (isNaN(x)) {
  67. return defaultValue;
  68. }
  69. return Math.min(max, Math.max(min, x));
  70. }
  71. export const ModalConfigValidator = {
  72. model(x: string) {
  73. return x as ModelType;
  74. },
  75. max_tokens(x: number) {
  76. return limitNumber(x, 0, 512000, 1024);
  77. },
  78. presence_penalty(x: number) {
  79. return limitNumber(x, -2, 2, 0);
  80. },
  81. frequency_penalty(x: number) {
  82. return limitNumber(x, -2, 2, 0);
  83. },
  84. temperature(x: number) {
  85. return limitNumber(x, 0, 2, 1);
  86. },
  87. top_p(x: number) {
  88. return limitNumber(x, 0, 1, 1);
  89. },
  90. };
  91. export const useAppConfig = createPersistStore(
  92. { ...DEFAULT_CONFIG },
  93. (set, get) => ({
  94. reset() {
  95. set(() => ({ ...DEFAULT_CONFIG }));
  96. },
  97. mergeModels(newModels: LLMModel[]) {
  98. if (!newModels || newModels.length === 0) {
  99. return;
  100. }
  101. const oldModels = get().models;
  102. const modelMap: Record<string, LLMModel> = {};
  103. for (const model of oldModels) {
  104. model.available = false;
  105. modelMap[`${model.name}@${model?.provider?.id}`] = model;
  106. }
  107. for (const model of newModels) {
  108. model.available = true;
  109. modelMap[`${model.name}@${model?.provider?.id}`] = model;
  110. }
  111. set(() => ({
  112. models: Object.values(modelMap),
  113. }));
  114. },
  115. allModels() {},
  116. }),
  117. {
  118. name: StoreKey.Config,
  119. version: 3.9,
  120. migrate(persistedState, version) {
  121. const state = persistedState as ChatConfig;
  122. if (version < 3.4) {
  123. state.modelConfig.sendMemory = true;
  124. state.modelConfig.historyMessageCount = 4;
  125. state.modelConfig.compressMessageLengthThreshold = 1000;
  126. state.modelConfig.frequency_penalty = 0;
  127. state.modelConfig.top_p = 1;
  128. state.modelConfig.template = DEFAULT_INPUT_TEMPLATE;
  129. state.dontShowMaskSplashScreen = false;
  130. state.hideBuiltinMasks = false;
  131. }
  132. if (version < 3.5) {
  133. state.customModels = "claude,claude-100k";
  134. }
  135. if (version < 3.6) {
  136. state.modelConfig.enableInjectSystemPrompts = true;
  137. }
  138. if (version < 3.7) {
  139. state.enableAutoGenerateTitle = true;
  140. }
  141. if (version < 3.8) {
  142. state.lastUpdate = Date.now();
  143. }
  144. if (version < 3.9) {
  145. state.modelConfig.template =
  146. state.modelConfig.template !== DEFAULT_INPUT_TEMPLATE
  147. ? state.modelConfig.template
  148. : config?.template ?? DEFAULT_INPUT_TEMPLATE;
  149. }
  150. return state as any;
  151. },
  152. },
  153. );