api.ts 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. import { getClientConfig } from "../config/client";
  2. import {
  3. ACCESS_CODE_PREFIX,
  4. Azure,
  5. ModelProvider,
  6. ServiceProvider,
  7. } from "../constant";
  8. import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
  9. import { ChatGPTApi } from "./platforms/openai";
  10. import { GeminiProApi } from "./platforms/google";
  11. import { ClaudeApi } from "./platforms/anthropic";
  12. import { ErnieApi } from "./platforms/baidu";
  13. import { DoubaoApi } from "./platforms/bytedance";
  14. import { QwenApi } from "./platforms/alibaba";
  15. export const ROLES = ["system", "user", "assistant"] as const;
  16. export type MessageRole = (typeof ROLES)[number];
  17. export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
  18. export type ChatModel = ModelType;
  19. export interface MultimodalContent {
  20. type: "text" | "image_url";
  21. text?: string;
  22. image_url?: {
  23. url: string;
  24. };
  25. }
  26. export interface RequestMessage {
  27. role: MessageRole;
  28. content: string | MultimodalContent[];
  29. }
  30. export interface LLMConfig {
  31. model: string;
  32. providerName?: string;
  33. temperature?: number;
  34. top_p?: number;
  35. stream?: boolean;
  36. presence_penalty?: number;
  37. frequency_penalty?: number;
  38. }
  39. export interface ChatOptions {
  40. messages: RequestMessage[];
  41. config: LLMConfig;
  42. onUpdate?: (message: string, chunk: string) => void;
  43. onFinish: (message: string) => void;
  44. onError?: (err: Error) => void;
  45. onController?: (controller: AbortController) => void;
  46. }
  47. export interface LLMUsage {
  48. used: number;
  49. total: number;
  50. }
  51. export interface LLMModel {
  52. name: string;
  53. displayName?: string;
  54. available: boolean;
  55. provider: LLMModelProvider;
  56. }
  57. export interface LLMModelProvider {
  58. id: string;
  59. providerName: string;
  60. providerType: string;
  61. }
  62. export abstract class LLMApi {
  63. abstract chat(options: ChatOptions): Promise<void>;
  64. abstract usage(): Promise<LLMUsage>;
  65. abstract models(): Promise<LLMModel[]>;
  66. }
  67. type ProviderName = "openai" | "azure" | "claude" | "palm";
  68. interface Model {
  69. name: string;
  70. provider: ProviderName;
  71. ctxlen: number;
  72. }
  73. interface ChatProvider {
  74. name: ProviderName;
  75. apiConfig: {
  76. baseUrl: string;
  77. apiKey: string;
  78. summaryModel: Model;
  79. };
  80. models: Model[];
  81. chat: () => void;
  82. usage: () => void;
  83. }
  84. export class ClientApi {
  85. public llm: LLMApi;
  86. constructor(provider: ModelProvider = ModelProvider.GPT) {
  87. switch (provider) {
  88. case ModelProvider.GeminiPro:
  89. this.llm = new GeminiProApi();
  90. break;
  91. case ModelProvider.Claude:
  92. this.llm = new ClaudeApi();
  93. break;
  94. case ModelProvider.Ernie:
  95. this.llm = new ErnieApi();
  96. break;
  97. case ModelProvider.Doubao:
  98. this.llm = new DoubaoApi();
  99. break;
  100. case ModelProvider.Qwen:
  101. this.llm = new QwenApi();
  102. break;
  103. default:
  104. this.llm = new ChatGPTApi();
  105. }
  106. }
  107. config() {}
  108. prompts() {}
  109. masks() {}
  110. async share(messages: ChatMessage[], avatarUrl: string | null = null) {
  111. const msgs = messages
  112. .map((m) => ({
  113. from: m.role === "user" ? "human" : "gpt",
  114. value: m.content,
  115. }))
  116. .concat([
  117. {
  118. from: "human",
  119. value:
  120. "Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web",
  121. },
  122. ]);
  123. // 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用
  124. // Please do not modify this message
  125. console.log("[Share]", messages, msgs);
  126. const clientConfig = getClientConfig();
  127. const proxyUrl = "/sharegpt";
  128. const rawUrl = "https://sharegpt.com/api/conversations";
  129. const shareUrl = clientConfig?.isApp ? rawUrl : proxyUrl;
  130. const res = await fetch(shareUrl, {
  131. body: JSON.stringify({
  132. avatarUrl,
  133. items: msgs,
  134. }),
  135. headers: {
  136. "Content-Type": "application/json",
  137. },
  138. method: "POST",
  139. });
  140. const resJson = await res.json();
  141. console.log("[Share]", resJson);
  142. if (resJson.id) {
  143. return `https://shareg.pt/${resJson.id}`;
  144. }
  145. }
  146. }
  147. export function getHeaders() {
  148. const accessStore = useAccessStore.getState();
  149. const chatStore = useChatStore.getState();
  150. const headers: Record<string, string> = {
  151. "Content-Type": "application/json",
  152. Accept: "application/json",
  153. };
  154. const clientConfig = getClientConfig();
  155. function getConfig() {
  156. const modelConfig = chatStore.currentSession().mask.modelConfig;
  157. const isGoogle = modelConfig.providerName == ServiceProvider.Google;
  158. const isAzure = modelConfig.providerName === ServiceProvider.Azure;
  159. const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic;
  160. const isBaidu = modelConfig.providerName == ServiceProvider.Baidu;
  161. const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance;
  162. const isEnabledAccessControl = accessStore.enabledAccessControl();
  163. const apiKey = isGoogle
  164. ? accessStore.googleApiKey
  165. : isAzure
  166. ? accessStore.azureApiKey
  167. : isAnthropic
  168. ? accessStore.anthropicApiKey
  169. : isByteDance
  170. ? accessStore.bytedanceApiKey
  171. : accessStore.openaiApiKey;
  172. return {
  173. isGoogle,
  174. isAzure,
  175. isAnthropic,
  176. isBaidu,
  177. isByteDance,
  178. apiKey,
  179. isEnabledAccessControl,
  180. };
  181. }
  182. function getAuthHeader(): string {
  183. return isAzure ? "api-key" : isAnthropic ? "x-api-key" : "Authorization";
  184. }
  185. function getBearerToken(apiKey: string, noBearer: boolean = false): string {
  186. return validString(apiKey)
  187. ? `${noBearer ? "" : "Bearer "}${apiKey.trim()}`
  188. : "";
  189. }
  190. function validString(x: string): boolean {
  191. return x?.length > 0;
  192. }
  193. const {
  194. isGoogle,
  195. isAzure,
  196. isAnthropic,
  197. isBaidu,
  198. apiKey,
  199. isEnabledAccessControl,
  200. } = getConfig();
  201. // when using google api in app, not set auth header
  202. if (isGoogle && clientConfig?.isApp) return headers;
  203. // when using baidu api in app, not set auth header
  204. if (isBaidu && clientConfig?.isApp) return headers;
  205. const authHeader = getAuthHeader();
  206. const bearerToken = getBearerToken(apiKey, isAzure || isAnthropic);
  207. if (bearerToken) {
  208. headers[authHeader] = bearerToken;
  209. } else if (isEnabledAccessControl && validString(accessStore.accessCode)) {
  210. headers["Authorization"] = getBearerToken(
  211. ACCESS_CODE_PREFIX + accessStore.accessCode,
  212. );
  213. }
  214. return headers;
  215. }
  216. export function getClientApi(provider: ServiceProvider): ClientApi {
  217. switch (provider) {
  218. case ServiceProvider.Google:
  219. return new ClientApi(ModelProvider.GeminiPro);
  220. case ServiceProvider.Anthropic:
  221. return new ClientApi(ModelProvider.Claude);
  222. case ServiceProvider.Baidu:
  223. return new ClientApi(ModelProvider.Ernie);
  224. case ServiceProvider.ByteDance:
  225. return new ClientApi(ModelProvider.Doubao);
  226. case ServiceProvider.Alibaba:
  227. return new ClientApi(ModelProvider.Qwen);
  228. default:
  229. return new ClientApi(ModelProvider.GPT);
  230. }
  231. }