config.ts 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. import { isMacOS } from "../utils";
  2. import { getClientConfig } from "../config/client";
  3. import {
  4. DEFAULT_INPUT_TEMPLATE,
  5. DEFAULT_MODELS,
  6. DEFAULT_SIDEBAR_WIDTH,
  7. StoreKey,
  8. } from "../constant";
  9. import { createPersistStore } from "../utils/store";
  10. import { OpenAIConfig } from "../client/openai/config";
  11. import { api } from "../client";
  12. import { SubmitKey, Theme } from "../typing";
  13. export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
  14. export const DEFAULT_CHAT_CONFIG = {
  15. enableAutoGenerateTitle: true,
  16. sendMemory: true,
  17. historyMessageCount: 4,
  18. compressMessageLengthThreshold: 1000,
  19. enableInjectSystemPrompts: true,
  20. template: DEFAULT_INPUT_TEMPLATE,
  21. };
  22. export type ChatConfig = typeof DEFAULT_CHAT_CONFIG;
  23. export const DEFAULT_PROVIDER_CONFIG = {
  24. openai: OpenAIConfig.provider,
  25. // azure: {
  26. // endpoint: "https://api.openai.com",
  27. // apiKey: "",
  28. // version: "",
  29. // ...COMMON_PROVIDER_CONFIG,
  30. // },
  31. // claude: {
  32. // endpoint: "https://api.anthropic.com",
  33. // apiKey: "",
  34. // ...COMMON_PROVIDER_CONFIG,
  35. // },
  36. // google: {
  37. // endpoint: "https://api.anthropic.com",
  38. // apiKey: "",
  39. // ...COMMON_PROVIDER_CONFIG,
  40. // },
  41. };
  42. export const DEFAULT_MODEL_CONFIG = {
  43. openai: OpenAIConfig.model,
  44. // azure: {
  45. // model: "gpt-3.5-turbo" as string,
  46. // summarizeModel: "gpt-3.5-turbo",
  47. //
  48. // temperature: 0.5,
  49. // top_p: 1,
  50. // max_tokens: 2000,
  51. // presence_penalty: 0,
  52. // frequency_penalty: 0,
  53. // },
  54. // claude: {
  55. // model: "claude-2",
  56. // summarizeModel: "claude-2",
  57. //
  58. // max_tokens_to_sample: 100000,
  59. // temperature: 1,
  60. // top_p: 0.7,
  61. // top_k: 1,
  62. // },
  63. // google: {
  64. // model: "chat-bison-001",
  65. // summarizeModel: "claude-2",
  66. //
  67. // temperature: 1,
  68. // topP: 0.7,
  69. // topK: 1,
  70. // },
  71. };
  72. export type LLMProvider = keyof typeof DEFAULT_PROVIDER_CONFIG;
  73. export const LLMProviders = Array.from(
  74. Object.entries(DEFAULT_PROVIDER_CONFIG),
  75. ).map(([k, v]) => [v.name, k]);
  76. export const DEFAULT_MASK_CONFIG = {
  77. provider: "openai" as LLMProvider,
  78. chatConfig: { ...DEFAULT_CHAT_CONFIG },
  79. modelConfig: { ...DEFAULT_MODEL_CONFIG },
  80. };
  81. export const DEFAULT_APP_CONFIG = {
  82. lastUpdate: Date.now(), // timestamp, to merge state
  83. submitKey: isMacOS() ? SubmitKey.MetaEnter : SubmitKey.CtrlEnter,
  84. avatar: "1f603",
  85. fontSize: 14,
  86. theme: Theme.Auto as Theme,
  87. tightBorder: !!getClientConfig()?.isApp,
  88. sendPreviewBubble: true,
  89. sidebarWidth: DEFAULT_SIDEBAR_WIDTH,
  90. disablePromptHint: false,
  91. dontShowMaskSplashScreen: false, // dont show splash screen when create chat
  92. hideBuiltinMasks: false, // dont add builtin masks
  93. providerConfig: { ...DEFAULT_PROVIDER_CONFIG },
  94. globalMaskConfig: { ...DEFAULT_MASK_CONFIG },
  95. };
  96. export type AppConfig = typeof DEFAULT_APP_CONFIG;
  97. export type ProviderConfig = typeof DEFAULT_PROVIDER_CONFIG;
  98. export type MaskConfig = typeof DEFAULT_MASK_CONFIG;
  99. export type ModelConfig = typeof DEFAULT_MODEL_CONFIG;
  100. export function limitNumber(
  101. x: number,
  102. min: number,
  103. max: number,
  104. defaultValue: number,
  105. ) {
  106. if (isNaN(x)) {
  107. return defaultValue;
  108. }
  109. return Math.min(max, Math.max(min, x));
  110. }
  111. export const ModalConfigValidator = {
  112. model(x: string) {
  113. return x as ModelType;
  114. },
  115. max_tokens(x: number) {
  116. return limitNumber(x, 0, 100000, 2000);
  117. },
  118. presence_penalty(x: number) {
  119. return limitNumber(x, -2, 2, 0);
  120. },
  121. frequency_penalty(x: number) {
  122. return limitNumber(x, -2, 2, 0);
  123. },
  124. temperature(x: number) {
  125. return limitNumber(x, 0, 1, 1);
  126. },
  127. top_p(x: number) {
  128. return limitNumber(x, 0, 1, 1);
  129. },
  130. };
  131. export const useAppConfig = createPersistStore(
  132. { ...DEFAULT_APP_CONFIG },
  133. (set, get) => ({
  134. reset() {
  135. set(() => ({ ...DEFAULT_APP_CONFIG }));
  136. },
  137. getDefaultClient() {
  138. return api.createLLMClient(get().providerConfig, get().globalMaskConfig);
  139. },
  140. }),
  141. {
  142. name: StoreKey.Config,
  143. version: 4,
  144. migrate(persistedState, version) {
  145. const state = persistedState as any;
  146. if (version < 3.4) {
  147. state.modelConfig.sendMemory = true;
  148. state.modelConfig.historyMessageCount = 4;
  149. state.modelConfig.compressMessageLengthThreshold = 1000;
  150. state.modelConfig.frequency_penalty = 0;
  151. state.modelConfig.top_p = 1;
  152. state.modelConfig.template = DEFAULT_INPUT_TEMPLATE;
  153. state.dontShowMaskSplashScreen = false;
  154. state.hideBuiltinMasks = false;
  155. }
  156. if (version < 3.5) {
  157. state.customModels = "claude,claude-100k";
  158. }
  159. if (version < 3.6) {
  160. state.modelConfig.enableInjectSystemPrompts = true;
  161. }
  162. if (version < 3.7) {
  163. state.enableAutoGenerateTitle = true;
  164. }
  165. if (version < 3.8) {
  166. state.lastUpdate = Date.now();
  167. }
  168. if (version < 4) {
  169. // todo: migarte from old versions
  170. }
  171. return state as any;
  172. },
  173. },
  174. );