Kaynağa Gözat

feat: add claude and bard

Yidadaa 2 yıl önce
ebeveyn
işleme
cdf0311d27

+ 29 - 0
app/client/anthropic/config.ts

@@ -0,0 +1,29 @@
+export const AnthropicConfig = {
+  model: {
+    model: "claude-instant-1",
+    summarizeModel: "claude-instant-1",
+
+    max_tokens_to_sample: 8192,
+    temperature: 0.5,
+    top_p: 0.7,
+    top_k: 5,
+  },
+  provider: {
+    name: "Anthropic" as const,
+    endpoint: "https://api.anthropic.com",
+    apiKey: "",
+    customModels: "",
+    version: "2023-06-01",
+
+    models: [
+      {
+        name: "claude-instant-1",
+        available: true,
+      },
+      {
+        name: "claude-2",
+        available: true,
+      },
+    ],
+  },
+};

+ 233 - 0
app/client/anthropic/index.ts

@@ -0,0 +1,233 @@
+import { ModelConfig, ProviderConfig } from "@/app/store";
+import { createLogger } from "@/app/utils/log";
+import { getAuthKey } from "../common/auth";
+import { API_PREFIX, AnthropicPath, ApiPath } from "@/app/constant";
+import { getApiPath } from "@/app/utils/path";
+import { trimEnd } from "@/app/utils/string";
+import { Anthropic } from "./types";
+import { ChatOptions, LLMModel, LLMUsage, RequestMessage } from "../types";
+import { omit } from "@/app/utils/object";
+import {
+  EventStreamContentType,
+  fetchEventSource,
+} from "@fortaine/fetch-event-source";
+import { prettyObject } from "@/app/utils/format";
+import Locale from "@/app/locales";
+import { AnthropicConfig } from "./config";
+
+export function createAnthropicClient(
+  providerConfigs: ProviderConfig,
+  modelConfig: ModelConfig,
+) {
+  const anthropicConfig = { ...providerConfigs.anthropic };
+  const logger = createLogger("[Anthropic]");
+  const anthropicModelConfig = { ...modelConfig.anthropic };
+
+  return {
+    headers() {
+      return {
+        "Content-Type": "application/json",
+        "x-api-key": getAuthKey(anthropicConfig.apiKey),
+        "anthropic-version": anthropicConfig.version,
+      };
+    },
+
+    path(path: AnthropicPath): string {
+      let baseUrl: string = anthropicConfig.endpoint;
+
+      // if endpoint is empty, use default endpoint
+      if (baseUrl.trim().length === 0) {
+        baseUrl = getApiPath(ApiPath.Anthropic);
+      }
+
+      if (!baseUrl.startsWith("http") && !baseUrl.startsWith(API_PREFIX)) {
+        baseUrl = "https://" + baseUrl;
+      }
+
+      baseUrl = trimEnd(baseUrl, "/");
+
+      return `${baseUrl}/${path}`;
+    },
+
+    extractMessage(res: Anthropic.ChatResponse) {
+      return res.completion;
+    },
+
+    beforeRequest(options: ChatOptions, stream = false) {
+      const ClaudeMapper: Record<RequestMessage["role"], string> = {
+        assistant: "Assistant",
+        user: "Human",
+        system: "Human",
+      };
+
+      const prompt = options.messages
+        .map((v) => ({
+          role: ClaudeMapper[v.role] ?? "Human",
+          content: v.content,
+        }))
+        .map((v) => `\n\n${v.role}: ${v.content}`)
+        .join("");
+
+      if (options.shouldSummarize) {
+        anthropicModelConfig.model = anthropicModelConfig.summarizeModel;
+      }
+
+      const requestBody: Anthropic.ChatRequest = {
+        prompt,
+        stream,
+        ...omit(anthropicModelConfig, "summarizeModel"),
+      };
+
+      const path = this.path(AnthropicPath.Chat);
+
+      logger.log("path = ", path, requestBody);
+
+      const controller = new AbortController();
+      options.onController?.(controller);
+
+      const payload = {
+        method: "POST",
+        body: JSON.stringify(requestBody),
+        signal: controller.signal,
+        headers: this.headers(),
+        mode: "no-cors" as RequestMode,
+      };
+
+      return {
+        path,
+        payload,
+        controller,
+      };
+    },
+
+    async chat(options: ChatOptions) {
+      try {
+        const { path, payload, controller } = this.beforeRequest(
+          options,
+          false,
+        );
+
+        controller.signal.onabort = () => options.onFinish("");
+
+        const res = await fetch(path, payload);
+        const resJson = await res.json();
+
+        const message = this.extractMessage(resJson);
+        options.onFinish(message);
+      } catch (e) {
+        logger.error("failed to chat", e);
+        options.onError?.(e as Error);
+      }
+    },
+
+    async chatStream(options: ChatOptions) {
+      try {
+        const { path, payload, controller } = this.beforeRequest(options, true);
+
+        const context = {
+          text: "",
+          finished: false,
+        };
+
+        const finish = () => {
+          if (!context.finished) {
+            options.onFinish(context.text);
+            context.finished = true;
+          }
+        };
+
+        controller.signal.onabort = finish;
+
+        logger.log(payload);
+
+        fetchEventSource(path, {
+          ...payload,
+          async onopen(res) {
+            const contentType = res.headers.get("content-type");
+            logger.log("response content type: ", contentType);
+
+            if (contentType?.startsWith("text/plain")) {
+              context.text = await res.clone().text();
+              return finish();
+            }
+
+            if (
+              !res.ok ||
+              !res.headers
+                .get("content-type")
+                ?.startsWith(EventStreamContentType) ||
+              res.status !== 200
+            ) {
+              const responseTexts = [context.text];
+              let extraInfo = await res.clone().text();
+              try {
+                const resJson = await res.clone().json();
+                extraInfo = prettyObject(resJson);
+              } catch {}
+
+              if (res.status === 401) {
+                responseTexts.push(Locale.Error.Unauthorized);
+              }
+
+              if (extraInfo) {
+                responseTexts.push(extraInfo);
+              }
+
+              context.text = responseTexts.join("\n\n");
+
+              return finish();
+            }
+          },
+          onmessage(msg) {
+            if (msg.data === "[DONE]" || context.finished) {
+              return finish();
+            }
+            const chunk = msg.data;
+            try {
+              const chunkJson = JSON.parse(
+                chunk,
+              ) as Anthropic.ChatStreamResponse;
+              const delta = chunkJson.completion;
+              if (delta) {
+                context.text += delta;
+                options.onUpdate?.(context.text, delta);
+              }
+            } catch (e) {
+              logger.error("[Request] parse error", chunk, msg);
+            }
+          },
+          onclose() {
+            finish();
+          },
+          onerror(e) {
+            options.onError?.(e);
+          },
+          openWhenHidden: true,
+        });
+      } catch (e) {
+        logger.error("failed to chat", e);
+        options.onError?.(e as Error);
+      }
+    },
+
+    async usage() {
+      return {
+        used: 0,
+        total: 0,
+      } as LLMUsage;
+    },
+
+    async models(): Promise<LLMModel[]> {
+      const customModels = anthropicConfig.customModels
+        .split(",")
+        .map((v) => v.trim())
+        .filter((v) => !!v)
+        .map((v) => ({
+          name: v,
+          available: true,
+        }));
+
+      return [...AnthropicConfig.provider.models.slice(), ...customModels];
+    },
+  };
+}

+ 24 - 0
app/client/anthropic/types.ts

@@ -0,0 +1,24 @@
+export namespace Anthropic {
+  export interface ChatRequest {
+    model: string; // The model that will complete your prompt.
+    prompt: string; // The prompt that you want Claude to complete.
+    max_tokens_to_sample: number; // The maximum number of tokens to generate before stopping.
+    stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text.
+    temperature?: number; // Amount of randomness injected into the response.
+    top_p?: number; // Use nucleus sampling.
+    top_k?: number; // Only sample from the top K options for each subsequent token.
+    metadata?: object; // An object describing metadata about the request.
+    stream?: boolean; // Whether to incrementally stream the response using server-sent events.
+  }
+
+  export interface ChatResponse {
+    completion: string;
+    stop_reason: "stop_sequence" | "max_tokens";
+    model: string;
+  }
+
+  export type ChatStreamResponse = ChatResponse & {
+    stop?: string;
+    log_id: string;
+  };
+}

+ 5 - 6
app/client/common/auth.ts

@@ -6,23 +6,22 @@ export function bearer(value: string) {
   return `Bearer ${value.trim()}`;
 }
 
-export function getAuthHeaders(apiKey = "") {
+export function getAuthKey(apiKey = "") {
   const accessStore = useAccessStore.getState();
   const isApp = !!getClientConfig()?.isApp;
-
-  let headers: Record<string, string> = {};
+  let authKey = "";
 
   if (apiKey) {
     // use user's api key first
-    headers.Authorization = bearer(apiKey);
+    authKey = bearer(apiKey);
   } else if (
     accessStore.enabledAccessControl() &&
     !isApp &&
     !!accessStore.accessCode
   ) {
     // or use access code
-    headers.Authorization = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode);
+    authKey = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode);
   }
 
-  return headers;
+  return authKey;
 }

+ 0 - 5
app/client/common/config.ts

@@ -1,5 +0,0 @@
-export const COMMON_PROVIDER_CONFIG = {
-  customModels: "",
-  models: [] as string[],
-  autoFetchModels: false, // fetch available models from server or not
-};

+ 2 - 0
app/client/core.ts

@@ -2,9 +2,11 @@ import { MaskConfig, ProviderConfig } from "../store";
 import { shareToShareGPT } from "./common/share";
 import { createOpenAiClient } from "./openai";
 import { ChatControllerPool } from "./common/controller";
+import { createAnthropicClient } from "./anthropic";
 
 export const LLMClients = {
   openai: createOpenAiClient,
+  anthropic: createAnthropicClient,
 };
 
 export function createLLMClient(

+ 50 - 4
app/client/openai/config.ts

@@ -1,5 +1,3 @@
-import { COMMON_PROVIDER_CONFIG } from "../common/config";
-
 export const OpenAIConfig = {
   model: {
     model: "gpt-3.5-turbo" as string,
@@ -12,9 +10,57 @@ export const OpenAIConfig = {
     frequency_penalty: 0,
   },
   provider: {
-    name: "OpenAI",
+    name: "OpenAI" as const,
     endpoint: "https://api.openai.com",
     apiKey: "",
-    ...COMMON_PROVIDER_CONFIG,
+    customModels: "",
+    autoFetchModels: false, // fetch available models from server or not
+
+    models: [
+      {
+        name: "gpt-4",
+        available: true,
+      },
+      {
+        name: "gpt-4-0314",
+        available: true,
+      },
+      {
+        name: "gpt-4-0613",
+        available: true,
+      },
+      {
+        name: "gpt-4-32k",
+        available: true,
+      },
+      {
+        name: "gpt-4-32k-0314",
+        available: true,
+      },
+      {
+        name: "gpt-4-32k-0613",
+        available: true,
+      },
+      {
+        name: "gpt-3.5-turbo",
+        available: true,
+      },
+      {
+        name: "gpt-3.5-turbo-0301",
+        available: true,
+      },
+      {
+        name: "gpt-3.5-turbo-0613",
+        available: true,
+      },
+      {
+        name: "gpt-3.5-turbo-16k",
+        available: true,
+      },
+      {
+        name: "gpt-3.5-turbo-16k-0613",
+        available: true,
+      },
+    ],
   },
 };

+ 9 - 62
app/client/openai/index.ts

@@ -3,12 +3,7 @@ import {
   fetchEventSource,
 } from "@fortaine/fetch-event-source";
 
-import {
-  API_PREFIX,
-  ApiPath,
-  DEFAULT_MODELS,
-  OpenaiPath,
-} from "@/app/constant";
+import { API_PREFIX, ApiPath, OpenaiPath } from "@/app/constant";
 import { ModelConfig, ProviderConfig } from "@/app/store";
 
 import { OpenAI } from "./types";
@@ -21,7 +16,8 @@ import { getApiPath } from "@/app/utils/path";
 import { trimEnd } from "@/app/utils/string";
 import { omit } from "@/app/utils/object";
 import { createLogger } from "@/app/utils/log";
-import { getAuthHeaders } from "../common/auth";
+import { getAuthKey } from "../common/auth";
+import { OpenAIConfig } from "./config";
 
 export function createOpenAiClient(
   providerConfigs: ProviderConfig,
@@ -35,12 +31,12 @@ export function createOpenAiClient(
     headers() {
       return {
         "Content-Type": "application/json",
-        ...getAuthHeaders(openaiConfig.apiKey),
+        Authorization: getAuthKey(),
       };
     },
 
     path(path: OpenaiPath): string {
-      let baseUrl = openaiConfig.endpoint;
+      let baseUrl: string = openaiConfig.endpoint;
 
       // if endpoint is empty, use default endpoint
       if (baseUrl.trim().length === 0) {
@@ -206,59 +202,9 @@ export function createOpenAiClient(
     },
 
     async usage() {
-      const formatDate = (d: Date) =>
-        `${d.getFullYear()}-${(d.getMonth() + 1)
-          .toString()
-          .padStart(2, "0")}-${d.getDate().toString().padStart(2, "0")}`;
-      const ONE_DAY = 1 * 24 * 60 * 60 * 1000;
-      const now = new Date();
-      const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1);
-      const startDate = formatDate(startOfMonth);
-      const endDate = formatDate(new Date(Date.now() + ONE_DAY));
-
-      const [used, subs] = await Promise.all([
-        fetch(
-          `${this.path(
-            OpenaiPath.Usage,
-          )}?start_date=${startDate}&end_date=${endDate}`,
-          {
-            method: "GET",
-            headers: this.headers(),
-          },
-        ),
-        fetch(this.path(OpenaiPath.Subs), {
-          method: "GET",
-          headers: this.headers(),
-        }),
-      ]);
-
-      if (!used.ok || !subs.ok) {
-        throw new Error("Failed to query usage from openai");
-      }
-
-      const response = (await used.json()) as {
-        total_usage?: number;
-        error?: {
-          type: string;
-          message: string;
-        };
-      };
-
-      const total = (await subs.json()) as {
-        hard_limit_usd?: number;
-      };
-
-      if (response.error?.type) {
-        throw Error(response.error?.message);
-      }
-
-      response.total_usage = Math.round(response.total_usage ?? 0) / 100;
-      total.hard_limit_usd =
-        Math.round((total.hard_limit_usd ?? 0) * 100) / 100;
-
       return {
-        used: response.total_usage,
-        total: total.hard_limit_usd,
+        used: 0,
+        total: 0,
       } as LLMUsage;
     },
 
@@ -266,13 +212,14 @@ export function createOpenAiClient(
       const customModels = openaiConfig.customModels
         .split(",")
         .map((v) => v.trim())
+        .filter((v) => !!v)
         .map((v) => ({
           name: v,
           available: true,
         }));
 
       if (!openaiConfig.autoFetchModels) {
-        return [...DEFAULT_MODELS.slice(), ...customModels];
+        return [...OpenAIConfig.provider.models.slice(), ...customModels];
       }
 
       const res = await fetch(this.path(OpenaiPath.ListModel), {

+ 0 - 4
app/client/types.ts

@@ -1,5 +1,3 @@
-import { DEFAULT_MODELS } from "../constant";
-
 export interface LLMUsage {
   used: number;
   total: number;
@@ -14,8 +12,6 @@ export interface LLMModel {
 export const ROLES = ["system", "user", "assistant"] as const;
 export type MessageRole = (typeof ROLES)[number];
 
-export type ChatModel = (typeof DEFAULT_MODELS)[number]["name"];
-
 export interface RequestMessage {
   role: MessageRole;
   content: string;

+ 79 - 0
app/components/config/anthropic/model.tsx

@@ -0,0 +1,79 @@
+import { ModelConfig } from "@/app/store";
+import { ModelConfigProps } from "../types";
+import { ListItem, Select } from "../../ui-lib";
+import Locale from "@/app/locales";
+import { InputRange } from "../../input-range";
+
+export function AnthropicModelConfig(
+  props: ModelConfigProps<ModelConfig["anthropic"]>,
+) {
+  return (
+    <>
+      <ListItem title={Locale.Settings.Model}>
+        <Select
+          value={props.config.model}
+          onChange={(e) => {
+            props.updateConfig(
+              (config) => (config.model = e.currentTarget.value),
+            );
+          }}
+        >
+          {props.models.map((v, i) => (
+            <option value={v.name} key={i} disabled={!v.available}>
+              {v.name}
+            </option>
+          ))}
+        </Select>
+      </ListItem>
+      <ListItem
+        title={Locale.Settings.Temperature.Title}
+        subTitle={Locale.Settings.Temperature.SubTitle}
+      >
+        <InputRange
+          value={props.config.temperature?.toFixed(1)}
+          min="0"
+          max="1" // lets limit it to 0-1
+          step="0.1"
+          onChange={(e) => {
+            props.updateConfig(
+              (config) => (config.temperature = e.currentTarget.valueAsNumber),
+            );
+          }}
+        ></InputRange>
+      </ListItem>
+      <ListItem
+        title={Locale.Settings.TopP.Title}
+        subTitle={Locale.Settings.TopP.SubTitle}
+      >
+        <InputRange
+          value={(props.config.top_p ?? 1).toFixed(1)}
+          min="0"
+          max="1"
+          step="0.1"
+          onChange={(e) => {
+            props.updateConfig(
+              (config) => (config.top_p = e.currentTarget.valueAsNumber),
+            );
+          }}
+        ></InputRange>
+      </ListItem>
+      <ListItem
+        title={Locale.Settings.MaxTokens.Title}
+        subTitle={Locale.Settings.MaxTokens.SubTitle}
+      >
+        <input
+          type="number"
+          min={100}
+          max={100000}
+          value={props.config.max_tokens_to_sample}
+          onChange={(e) =>
+            props.updateConfig(
+              (config) =>
+                (config.max_tokens_to_sample = e.currentTarget.valueAsNumber),
+            )
+          }
+        ></input>
+      </ListItem>
+    </>
+  );
+}

+ 70 - 0
app/components/config/anthropic/provider.tsx

@@ -0,0 +1,70 @@
+import { ProviderConfig } from "@/app/store";
+import { ProviderConfigProps } from "../types";
+import { ListItem, PasswordInput } from "../../ui-lib";
+import Locale from "@/app/locales";
+import { REMOTE_API_HOST } from "@/app/constant";
+
+export function AnthropicProviderConfig(
+  props: ProviderConfigProps<ProviderConfig["anthropic"]>,
+) {
+  return (
+    <>
+      <ListItem
+        title={Locale.Settings.Endpoint.Title}
+        subTitle={Locale.Settings.Endpoint.SubTitle}
+      >
+        <input
+          type="text"
+          value={props.config.endpoint}
+          placeholder={REMOTE_API_HOST}
+          onChange={(e) =>
+            props.updateConfig(
+              (config) => (config.endpoint = e.currentTarget.value),
+            )
+          }
+        ></input>
+      </ListItem>
+      <ListItem
+        title={Locale.Settings.Token.Title}
+        subTitle={Locale.Settings.Token.SubTitle}
+      >
+        <PasswordInput
+          value={props.config.apiKey}
+          type="text"
+          placeholder={Locale.Settings.Token.Placeholder}
+          onChange={(e) => {
+            props.updateConfig(
+              (config) => (config.apiKey = e.currentTarget.value),
+            );
+          }}
+        />
+      </ListItem>
+      <ListItem title={"Anthropic Version"} subTitle={"填写 API 版本号"}>
+        <PasswordInput
+          value={props.config.version}
+          type="text"
+          onChange={(e) => {
+            props.updateConfig(
+              (config) => (config.version = e.currentTarget.value),
+            );
+          }}
+        />
+      </ListItem>
+      <ListItem
+        title={Locale.Settings.CustomModel.Title}
+        subTitle={Locale.Settings.CustomModel.SubTitle}
+      >
+        <input
+          type="text"
+          value={props.config.customModels}
+          placeholder="model1,model2,model3"
+          onChange={(e) =>
+            props.updateConfig(
+              (config) => (config.customModels = e.currentTarget.value),
+            )
+          }
+        ></input>
+      </ListItem>
+    </>
+  );
+}

+ 24 - 10
app/components/config/index.tsx

@@ -11,6 +11,10 @@ import { OpenAIProviderConfig } from "./openai/provider";
 import { ListItem, Select } from "../ui-lib";
 import Locale from "@/app/locales";
 import { InputRange } from "../input-range";
+import { OpenAIConfig } from "@/app/client/openai/config";
+import { AnthropicModelConfig } from "./anthropic/model";
+import { AnthropicConfig } from "@/app/client/anthropic/config";
+import { AnthropicProviderConfig } from "./anthropic/provider";
 
 export function ModelConfigList(props: {
   provider: LLMProvider;
@@ -24,16 +28,17 @@ export function ModelConfigList(props: {
         updateConfig={(update) => {
           props.updateConfig((config) => update(config.openai));
         }}
-        models={[
-          {
-            name: "gpt-3.5-turbo",
-            available: true,
-          },
-          {
-            name: "gpt-4",
-            available: true,
-          },
-        ]}
+        models={OpenAIConfig.provider.models}
+      />
+    );
+  } else if (props.provider === "anthropic") {
+    return (
+      <AnthropicModelConfig
+        config={props.config.anthropic}
+        updateConfig={(update) => {
+          props.updateConfig((config) => update(config.anthropic));
+        }}
+        models={AnthropicConfig.provider.models}
       />
     );
   }
@@ -55,6 +60,15 @@ export function ProviderConfigList(props: {
         }}
       />
     );
+  } else if (props.provider === "anthropic") {
+    return (
+      <AnthropicProviderConfig
+        config={props.config.anthropic}
+        updateConfig={(update) => {
+          props.updateConfig((config) => update(config.anthropic));
+        }}
+      />
+    );
   }
 
   return null;

+ 3 - 0
app/components/config/openai/provider.tsx

@@ -3,6 +3,8 @@ import { ProviderConfigProps } from "../types";
 import { ListItem, PasswordInput } from "../../ui-lib";
 import Locale from "@/app/locales";
 import { REMOTE_API_HOST } from "@/app/constant";
+import { IconButton } from "../../button";
+import ReloadIcon from "@/app/icons/reload.svg";
 
 export function OpenAIProviderConfig(
   props: ProviderConfigProps<ProviderConfig["openai"]>,
@@ -58,6 +60,7 @@ export function OpenAIProviderConfig(
       <ListItem title="自动拉取可用模型" subTitle="尝试拉取所有可用模型">
         <input
           type="checkbox"
+          style={{ marginLeft: 10 }}
           checked={props.config.autoFetchModels}
           onChange={(e) =>
             props.updateConfig(

+ 0 - 139
app/components/model-config.tsx

@@ -1,139 +0,0 @@
-import { ModalConfigValidator, ModelConfig, useAppConfig } from "../store";
-
-import Locale from "../locales";
-import { InputRange } from "./input-range";
-import { ListItem, Select } from "./ui-lib";
-
-export function _ModelConfigList(props: {
-  modelConfig: ModelConfig;
-  updateConfig: (updater: (config: ModelConfig) => void) => void;
-}) {
-  return null;
-  /*
-  const config = useAppConfig();
-
-  return (
-    <>
-      <ListItem title={Locale.Settings.Model}>
-        <Select
-          value={props.modelConfig.model}
-          onChange={(e) => {
-            props.updateConfig(
-              (config) =>
-                (config.model = ModalConfigValidator.model(
-                  e.currentTarget.value,
-                )),
-            );
-          }}
-        >
-          {config.allModels().map((v, i) => (
-            <option value={v.name} key={i} disabled={!v.available}>
-              {v.name}
-            </option>
-          ))}
-        </Select>
-      </ListItem>
-      <ListItem
-        title={Locale.Settings.Temperature.Title}
-        subTitle={Locale.Settings.Temperature.SubTitle}
-      >
-        <InputRange
-          value={props.modelConfig.temperature?.toFixed(1)}
-          min="0"
-          max="1" // lets limit it to 0-1
-          step="0.1"
-          onChange={(e) => {
-            props.updateConfig(
-              (config) =>
-                (config.temperature = ModalConfigValidator.temperature(
-                  e.currentTarget.valueAsNumber,
-                )),
-            );
-          }}
-        ></InputRange>
-      </ListItem>
-      <ListItem
-        title={Locale.Settings.TopP.Title}
-        subTitle={Locale.Settings.TopP.SubTitle}
-      >
-        <InputRange
-          value={(props.modelConfig.top_p ?? 1).toFixed(1)}
-          min="0"
-          max="1"
-          step="0.1"
-          onChange={(e) => {
-            props.updateConfig(
-              (config) =>
-                (config.top_p = ModalConfigValidator.top_p(
-                  e.currentTarget.valueAsNumber,
-                )),
-            );
-          }}
-        ></InputRange>
-      </ListItem>
-      <ListItem
-        title={Locale.Settings.MaxTokens.Title}
-        subTitle={Locale.Settings.MaxTokens.SubTitle}
-      >
-        <input
-          type="number"
-          min={100}
-          max={100000}
-          value={props.modelConfig.max_tokens}
-          onChange={(e) =>
-            props.updateConfig(
-              (config) =>
-                (config.max_tokens = ModalConfigValidator.max_tokens(
-                  e.currentTarget.valueAsNumber,
-                )),
-            )
-          }
-        ></input>
-      </ListItem>
-      <ListItem
-        title={Locale.Settings.PresencePenalty.Title}
-        subTitle={Locale.Settings.PresencePenalty.SubTitle}
-      >
-        <InputRange
-          value={props.modelConfig.presence_penalty?.toFixed(1)}
-          min="-2"
-          max="2"
-          step="0.1"
-          onChange={(e) => {
-            props.updateConfig(
-              (config) =>
-                (config.presence_penalty =
-                  ModalConfigValidator.presence_penalty(
-                    e.currentTarget.valueAsNumber,
-                  )),
-            );
-          }}
-        ></InputRange>
-      </ListItem>
-
-      <ListItem
-        title={Locale.Settings.FrequencyPenalty.Title}
-        subTitle={Locale.Settings.FrequencyPenalty.SubTitle}
-      >
-        <InputRange
-          value={props.modelConfig.frequency_penalty?.toFixed(1)}
-          min="-2"
-          max="2"
-          step="0.1"
-          onChange={(e) => {
-            props.updateConfig(
-              (config) =>
-                (config.frequency_penalty =
-                  ModalConfigValidator.frequency_penalty(
-                    e.currentTarget.valueAsNumber,
-                  )),
-            );
-          }}
-        ></InputRange>
-      </ListItem>
-
-      
-    </>
-  );
-  */
-}

+ 22 - 69
app/components/settings.tsx

@@ -37,8 +37,6 @@ import {
   useUpdateStore,
   useAccessStore,
   useAppConfig,
-  LLMProvider,
-  LLMProviders,
 } from "../store";
 
 import Locale, {
@@ -578,22 +576,6 @@ export function Settings() {
     console.log("[Update] remote version ", updateStore.remoteVersion);
   }
 
-  const usage = {
-    used: updateStore.used,
-    subscription: updateStore.subscription,
-  };
-  const [loadingUsage, setLoadingUsage] = useState(false);
-  function checkUsage(force = false) {
-    if (accessStore.hideBalanceQuery) {
-      return;
-    }
-
-    setLoadingUsage(true);
-    updateStore.updateUsage(force).finally(() => {
-      setLoadingUsage(false);
-    });
-  }
-
   const accessStore = useAccessStore();
   const enabledAccessControl = useMemo(
     () => accessStore.enabledAccessControl(),
@@ -610,7 +592,6 @@ export function Settings() {
   useEffect(() => {
     // checks per minutes
     checkUpdate();
-    showUsage && checkUsage();
     // eslint-disable-next-line react-hooks/exhaustive-deps
   }, []);
 
@@ -806,6 +787,28 @@ export function Settings() {
           </ListItem>
         </List>
 
+        <List>
+          {showAccessCode ? (
+            <ListItem
+              title={Locale.Settings.AccessCode.Title}
+              subTitle={Locale.Settings.AccessCode.SubTitle}
+            >
+              <PasswordInput
+                value={accessStore.accessCode}
+                type="text"
+                placeholder={Locale.Settings.AccessCode.Placeholder}
+                onChange={(e) => {
+                  accessStore.update(
+                    (config) => (config.accessCode = e.currentTarget.value),
+                  );
+                }}
+              />
+            </ListItem>
+          ) : (
+            <></>
+          )}
+        </List>
+
         <SyncItems />
 
         <List>
@@ -875,56 +878,6 @@ export function Settings() {
           </ListItem>
         </List>
 
-        <List>
-          {showAccessCode ? (
-            <ListItem
-              title={Locale.Settings.AccessCode.Title}
-              subTitle={Locale.Settings.AccessCode.SubTitle}
-            >
-              <PasswordInput
-                value={accessStore.accessCode}
-                type="text"
-                placeholder={Locale.Settings.AccessCode.Placeholder}
-                onChange={(e) => {
-                  accessStore.update(
-                    (config) => (config.accessCode = e.currentTarget.value),
-                  );
-                }}
-              />
-            </ListItem>
-          ) : (
-            <></>
-          )}
-
-          {!accessStore.hideUserApiKey ? <></> : null}
-
-          {!accessStore.hideBalanceQuery ? (
-            <ListItem
-              title={Locale.Settings.Usage.Title}
-              subTitle={
-                showUsage
-                  ? loadingUsage
-                    ? Locale.Settings.Usage.IsChecking
-                    : Locale.Settings.Usage.SubTitle(
-                        usage?.used ?? "[?]",
-                        usage?.subscription ?? "[?]",
-                      )
-                  : Locale.Settings.Usage.NoAccess
-              }
-            >
-              {!showUsage || loadingUsage ? (
-                <div />
-              ) : (
-                <IconButton
-                  icon={<ResetIcon></ResetIcon>}
-                  text={Locale.Settings.Usage.Check}
-                  onClick={() => checkUsage(true)}
-                />
-              )}
-            </ListItem>
-          ) : null}
-        </List>
-
         <List>
           <ProviderSelectItem
             value={config.globalMaskConfig.provider}

+ 5 - 49
app/constant.ts

@@ -24,6 +24,7 @@ export const API_PREFIX = "/api";
 
 export enum ApiPath {
   OpenAI = "/api/openai",
+  Anthropic = "/api/anthropic",
   Cors = "/api/cors",
   Config = "/api/config",
 }
@@ -70,6 +71,10 @@ export enum OpenaiPath {
   ListModel = "v1/models",
 }
 
+export enum AnthropicPath {
+  Chat = "v1/complete",
+}
+
 export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
 export const DEFAULT_SYSTEM_TEMPLATE = `
 You are ChatGPT, a large language model trained by OpenAI.
@@ -77,54 +82,5 @@ Knowledge cutoff: 2021-09
 Current model: {{model}}
 Current time: {{time}}`;
 
-export const SUMMARIZE_MODEL = "gpt-3.5-turbo";
-
-export const DEFAULT_MODELS = [
-  {
-    name: "gpt-4",
-    available: true,
-  },
-  {
-    name: "gpt-4-0314",
-    available: true,
-  },
-  {
-    name: "gpt-4-0613",
-    available: true,
-  },
-  {
-    name: "gpt-4-32k",
-    available: true,
-  },
-  {
-    name: "gpt-4-32k-0314",
-    available: true,
-  },
-  {
-    name: "gpt-4-32k-0613",
-    available: true,
-  },
-  {
-    name: "gpt-3.5-turbo",
-    available: true,
-  },
-  {
-    name: "gpt-3.5-turbo-0301",
-    available: true,
-  },
-  {
-    name: "gpt-3.5-turbo-0613",
-    available: true,
-  },
-  {
-    name: "gpt-3.5-turbo-16k",
-    available: true,
-  },
-  {
-    name: "gpt-3.5-turbo-16k-0613",
-    available: true,
-  },
-] as const;
-
 export const CHAT_PAGE_SIZE = 15;
 export const MAX_RENDER_MSG_COUNT = 45;

+ 4 - 6
app/store/access.ts

@@ -1,7 +1,7 @@
-import { REMOTE_API_HOST, DEFAULT_MODELS, StoreKey } from "../constant";
+import { REMOTE_API_HOST, StoreKey } from "../constant";
 import { getClientConfig } from "../config/client";
 import { createPersistStore } from "../utils/store";
-import { getAuthHeaders } from "../client/common/auth";
+import { getAuthKey } from "../client/common/auth";
 
 let fetchState = 0; // 0 not fetch, 1 fetching, 2 done
 
@@ -39,7 +39,7 @@ export const useAccessStore = createPersistStore(
         method: "post",
         body: null,
         headers: {
-          ...getAuthHeaders(),
+          Authorization: getAuthKey(),
         },
       })
         .then((res) => res.json())
@@ -48,9 +48,7 @@ export const useAccessStore = createPersistStore(
           set(() => ({ ...res }));
 
           if (res.disableGPT4) {
-            DEFAULT_MODELS.forEach(
-              (m: any) => (m.available = !m.name.startsWith("gpt-4")),
-            );
+            // disable model
           }
         })
         .catch(() => {

+ 16 - 21
app/store/chat.ts

@@ -2,20 +2,9 @@ import { trimTopic } from "../utils";
 
 import Locale, { getLang } from "../locales";
 import { showToast } from "../components/ui-lib";
-import {
-  LLMProvider,
-  MaskConfig,
-  ModelConfig,
-  ModelType,
-  useAppConfig,
-} from "./config";
+import { MaskConfig, useAppConfig } from "./config";
 import { createEmptyMask, Mask } from "./mask";
-import {
-  DEFAULT_INPUT_TEMPLATE,
-  DEFAULT_SYSTEM_TEMPLATE,
-  StoreKey,
-  SUMMARIZE_MODEL,
-} from "../constant";
+import { DEFAULT_INPUT_TEMPLATE, StoreKey } from "../constant";
 import { ChatControllerPool } from "../client/common/controller";
 import { prettyObject } from "../utils/format";
 import { estimateTokenLength } from "../utils/token";
@@ -85,11 +74,6 @@ function createEmptySession(): ChatSession {
   };
 }
 
-function getSummarizeModel(currentModel: string) {
-  // if it is using gpt-* models, force to use 3.5 to summarize
-  return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel;
-}
-
 function countMessages(msgs: ChatMessage[]) {
   return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0);
 }
@@ -291,6 +275,18 @@ export const useChatStore = createPersistStore(
         return this.extractModelConfig(maskConfig);
       },
 
+      getMaxTokens() {
+        const maskConfig = this.getCurrentMaskConfig();
+
+        if (maskConfig.provider === "openai") {
+          return maskConfig.modelConfig.openai.max_tokens;
+        } else if (maskConfig.provider === "anthropic") {
+          return maskConfig.modelConfig.anthropic.max_tokens_to_sample;
+        }
+
+        return 8192;
+      },
+
       getClient() {
         const appConfig = useAppConfig.getState();
         const currentMaskConfig = get().getCurrentMaskConfig();
@@ -463,7 +459,7 @@ export const useChatStore = createPersistStore(
           : shortTermMemoryStartIndex;
         // and if user has cleared history messages, we should exclude the memory too.
         const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
-        const maxTokenThreshold = modelConfig.max_tokens;
+        const maxTokenThreshold = this.getMaxTokens();
 
         // get recent messages as much as possible
         const reversedRecentMessages = [];
@@ -546,7 +542,6 @@ export const useChatStore = createPersistStore(
           });
         }
 
-        const modelConfig = this.getCurrentModelConfig();
         const summarizeIndex = Math.max(
           session.lastSummarizeIndex,
           session.clearContextIndex ?? 0,
@@ -557,7 +552,7 @@ export const useChatStore = createPersistStore(
 
         const historyMsgLength = countMessages(toBeSummarizedMsgs);
 
-        if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
+        if (historyMsgLength > this.getMaxTokens()) {
           const n = toBeSummarizedMsgs.length;
           toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
             Math.max(0, n - chatConfig.historyMessageCount),

+ 4 - 18
app/store/config.ts

@@ -2,7 +2,6 @@ import { isMacOS } from "../utils";
 import { getClientConfig } from "../config/client";
 import {
   DEFAULT_INPUT_TEMPLATE,
-  DEFAULT_MODELS,
   DEFAULT_SIDEBAR_WIDTH,
   StoreKey,
 } from "../constant";
@@ -10,8 +9,7 @@ import { createPersistStore } from "../utils/store";
 import { OpenAIConfig } from "../client/openai/config";
 import { api } from "../client";
 import { SubmitKey, Theme } from "../typing";
-
-export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
+import { AnthropicConfig } from "../client/anthropic/config";
 
 export const DEFAULT_CHAT_CONFIG = {
   enableAutoGenerateTitle: true,
@@ -25,17 +23,13 @@ export type ChatConfig = typeof DEFAULT_CHAT_CONFIG;
 
 export const DEFAULT_PROVIDER_CONFIG = {
   openai: OpenAIConfig.provider,
+  anthropic: AnthropicConfig.provider,
   // azure: {
   //   endpoint: "https://api.openai.com",
   //   apiKey: "",
   //   version: "",
   //   ...COMMON_PROVIDER_CONFIG,
   // },
-  // claude: {
-  //   endpoint: "https://api.anthropic.com",
-  //   apiKey: "",
-  //   ...COMMON_PROVIDER_CONFIG,
-  // },
   // google: {
   //   endpoint: "https://api.anthropic.com",
   //   apiKey: "",
@@ -45,6 +39,7 @@ export const DEFAULT_PROVIDER_CONFIG = {
 
 export const DEFAULT_MODEL_CONFIG = {
   openai: OpenAIConfig.model,
+  anthropic: AnthropicConfig.model,
   // azure: {
   //   model: "gpt-3.5-turbo" as string,
   //   summarizeModel: "gpt-3.5-turbo",
@@ -55,15 +50,6 @@ export const DEFAULT_MODEL_CONFIG = {
   //   presence_penalty: 0,
   //   frequency_penalty: 0,
   // },
-  // claude: {
-  //   model: "claude-2",
-  //   summarizeModel: "claude-2",
-  //
-  //   max_tokens_to_sample: 100000,
-  //   temperature: 1,
-  //   top_p: 0.7,
-  //   top_k: 1,
-  // },
   // google: {
   //   model: "chat-bison-001",
   //   summarizeModel: "claude-2",
@@ -125,7 +111,7 @@ export function limitNumber(
 
 export const ModalConfigValidator = {
   model(x: string) {
-    return x as ModelType;
+    return x as string;
   },
   max_tokens(x: number) {
     return limitNumber(x, 0, 100000, 2000);

+ 1 - 1
src-tauri/tauri.conf.json

@@ -9,7 +9,7 @@
   },
   "package": {
     "productName": "ChatGPT Next Web",
-    "version": "2.9.9"
+    "version": "3.0.0"
   },
   "tauri": {
     "allowlist": {