config.ts 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. import { isMacOS } from "../utils";
  2. import { getClientConfig } from "../config/client";
  3. import {
  4. DEFAULT_INPUT_TEMPLATE,
  5. DEFAULT_SIDEBAR_WIDTH,
  6. StoreKey,
  7. } from "../constant";
  8. import { createPersistStore } from "../utils/store";
  9. import { OpenAIConfig } from "../client/openai/config";
  10. import { api } from "../client";
  11. import { SubmitKey, Theme } from "../typing";
  12. import { AnthropicConfig } from "../client/anthropic/config";
  13. export const DEFAULT_CHAT_CONFIG = {
  14. enableAutoGenerateTitle: true,
  15. sendMemory: true,
  16. historyMessageCount: 4,
  17. compressMessageLengthThreshold: 1000,
  18. enableInjectSystemPrompts: true,
  19. template: DEFAULT_INPUT_TEMPLATE,
  20. };
  21. export type ChatConfig = typeof DEFAULT_CHAT_CONFIG;
  22. export const DEFAULT_PROVIDER_CONFIG = {
  23. openai: OpenAIConfig.provider,
  24. anthropic: AnthropicConfig.provider,
  25. // azure: {
  26. // endpoint: "https://api.openai.com",
  27. // apiKey: "",
  28. // version: "",
  29. // ...COMMON_PROVIDER_CONFIG,
  30. // },
  31. // google: {
  32. // endpoint: "https://api.anthropic.com",
  33. // apiKey: "",
  34. // ...COMMON_PROVIDER_CONFIG,
  35. // },
  36. };
  37. export const DEFAULT_MODEL_CONFIG = {
  38. openai: OpenAIConfig.model,
  39. anthropic: AnthropicConfig.model,
  40. // azure: {
  41. // model: "gpt-3.5-turbo" as string,
  42. // summarizeModel: "gpt-3.5-turbo",
  43. //
  44. // temperature: 0.5,
  45. // top_p: 1,
  46. // max_tokens: 2000,
  47. // presence_penalty: 0,
  48. // frequency_penalty: 0,
  49. // },
  50. // google: {
  51. // model: "chat-bison-001",
  52. // summarizeModel: "claude-2",
  53. //
  54. // temperature: 1,
  55. // topP: 0.7,
  56. // topK: 1,
  57. // },
  58. };
  59. export type LLMProvider = keyof typeof DEFAULT_PROVIDER_CONFIG;
  60. export const LLMProviders = Array.from(
  61. Object.entries(DEFAULT_PROVIDER_CONFIG),
  62. ).map(([k, v]) => [v.name, k]);
  63. export const DEFAULT_MASK_CONFIG = {
  64. provider: "openai" as LLMProvider,
  65. chatConfig: { ...DEFAULT_CHAT_CONFIG },
  66. modelConfig: { ...DEFAULT_MODEL_CONFIG },
  67. };
  68. export const DEFAULT_APP_CONFIG = {
  69. lastUpdate: Date.now(), // timestamp, to merge state
  70. submitKey: isMacOS() ? SubmitKey.MetaEnter : SubmitKey.CtrlEnter,
  71. avatar: "1f603",
  72. fontSize: 14,
  73. theme: Theme.Auto as Theme,
  74. tightBorder: !!getClientConfig()?.isApp,
  75. sendPreviewBubble: true,
  76. sidebarWidth: DEFAULT_SIDEBAR_WIDTH,
  77. disablePromptHint: false,
  78. dontShowMaskSplashScreen: false, // dont show splash screen when create chat
  79. hideBuiltinMasks: false, // dont add builtin masks
  80. providerConfig: { ...DEFAULT_PROVIDER_CONFIG },
  81. globalMaskConfig: { ...DEFAULT_MASK_CONFIG },
  82. };
  83. export type AppConfig = typeof DEFAULT_APP_CONFIG;
  84. export type ProviderConfig = typeof DEFAULT_PROVIDER_CONFIG;
  85. export type MaskConfig = typeof DEFAULT_MASK_CONFIG;
  86. export type ModelConfig = typeof DEFAULT_MODEL_CONFIG;
  87. export function limitNumber(
  88. x: number,
  89. min: number,
  90. max: number,
  91. defaultValue: number,
  92. ) {
  93. if (isNaN(x)) {
  94. return defaultValue;
  95. }
  96. return Math.min(max, Math.max(min, x));
  97. }
  98. export const ModalConfigValidator = {
  99. model(x: string) {
  100. return x as string;
  101. },
  102. max_tokens(x: number) {
  103. return limitNumber(x, 0, 100000, 2000);
  104. },
  105. presence_penalty(x: number) {
  106. return limitNumber(x, -2, 2, 0);
  107. },
  108. frequency_penalty(x: number) {
  109. return limitNumber(x, -2, 2, 0);
  110. },
  111. temperature(x: number) {
  112. return limitNumber(x, 0, 1, 1);
  113. },
  114. top_p(x: number) {
  115. return limitNumber(x, 0, 1, 1);
  116. },
  117. };
  118. export const useAppConfig = createPersistStore(
  119. { ...DEFAULT_APP_CONFIG },
  120. (set, get) => ({
  121. reset() {
  122. set(() => ({ ...DEFAULT_APP_CONFIG }));
  123. },
  124. getDefaultClient() {
  125. return api.createLLMClient(get().providerConfig, get().globalMaskConfig);
  126. },
  127. }),
  128. {
  129. name: StoreKey.Config,
  130. version: 4,
  131. migrate(persistedState, version) {
  132. const state = persistedState as any;
  133. if (version < 3.4) {
  134. state.modelConfig.sendMemory = true;
  135. state.modelConfig.historyMessageCount = 4;
  136. state.modelConfig.compressMessageLengthThreshold = 1000;
  137. state.modelConfig.frequency_penalty = 0;
  138. state.modelConfig.top_p = 1;
  139. state.modelConfig.template = DEFAULT_INPUT_TEMPLATE;
  140. state.dontShowMaskSplashScreen = false;
  141. state.hideBuiltinMasks = false;
  142. }
  143. if (version < 3.5) {
  144. state.customModels = "claude,claude-100k";
  145. }
  146. if (version < 3.6) {
  147. state.modelConfig.enableInjectSystemPrompts = true;
  148. }
  149. if (version < 3.7) {
  150. state.enableAutoGenerateTitle = true;
  151. }
  152. if (version < 3.8) {
  153. state.lastUpdate = Date.now();
  154. }
  155. if (version < 4) {
  156. // todo: migarte from old versions
  157. }
  158. return state as any;
  159. },
  160. },
  161. );