api.ts 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. import { getClientConfig } from "../config/client";
  2. import {
  3. ACCESS_CODE_PREFIX,
  4. Azure,
  5. ModelProvider,
  6. ServiceProvider,
  7. } from "../constant";
  8. import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
  9. import { ChatGPTApi } from "./platforms/openai";
  10. import { GeminiProApi } from "./platforms/google";
  11. import { ClaudeApi } from "./platforms/anthropic";
  12. export const ROLES = ["system", "user", "assistant"] as const;
  13. export type MessageRole = (typeof ROLES)[number];
  14. export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
  15. export type ChatModel = ModelType;
  16. export interface MultimodalContent {
  17. type: "text" | "image_url";
  18. text?: string;
  19. image_url?: {
  20. url: string;
  21. };
  22. }
  23. export interface RequestMessage {
  24. role: MessageRole;
  25. content: string | MultimodalContent[];
  26. }
  27. export interface LLMConfig {
  28. model: string;
  29. temperature?: number;
  30. top_p?: number;
  31. stream?: boolean;
  32. presence_penalty?: number;
  33. frequency_penalty?: number;
  34. }
  35. export interface ChatOptions {
  36. messages: RequestMessage[];
  37. config: LLMConfig;
  38. onUpdate?: (message: string, chunk: string) => void;
  39. onFinish: (message: string) => void;
  40. onError?: (err: Error) => void;
  41. onController?: (controller: AbortController) => void;
  42. }
  43. export interface LLMUsage {
  44. used: number;
  45. total: number;
  46. }
  47. export interface LLMModel {
  48. name: string;
  49. available: boolean;
  50. provider: LLMModelProvider;
  51. }
  52. export interface LLMModelProvider {
  53. id: string;
  54. providerName: string;
  55. providerType: string;
  56. }
  57. export abstract class LLMApi {
  58. abstract chat(options: ChatOptions): Promise<void>;
  59. abstract usage(): Promise<LLMUsage>;
  60. abstract models(): Promise<LLMModel[]>;
  61. }
  62. type ProviderName = "openai" | "azure" | "claude" | "palm";
  63. interface Model {
  64. name: string;
  65. provider: ProviderName;
  66. ctxlen: number;
  67. }
  68. interface ChatProvider {
  69. name: ProviderName;
  70. apiConfig: {
  71. baseUrl: string;
  72. apiKey: string;
  73. summaryModel: Model;
  74. };
  75. models: Model[];
  76. chat: () => void;
  77. usage: () => void;
  78. }
  79. export class ClientApi {
  80. public llm: LLMApi;
  81. constructor(provider: ModelProvider = ModelProvider.GPT) {
  82. switch (provider) {
  83. case ModelProvider.GeminiPro:
  84. this.llm = new GeminiProApi();
  85. break;
  86. case ModelProvider.Claude:
  87. this.llm = new ClaudeApi();
  88. break;
  89. default:
  90. this.llm = new ChatGPTApi();
  91. }
  92. }
  93. config() {}
  94. prompts() {}
  95. masks() {}
  96. async share(messages: ChatMessage[], avatarUrl: string | null = null) {
  97. const msgs = messages
  98. .map((m) => ({
  99. from: m.role === "user" ? "human" : "gpt",
  100. value: m.content,
  101. }))
  102. .concat([
  103. {
  104. from: "human",
  105. value:
  106. "Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web",
  107. },
  108. ]);
  109. // 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用
  110. // Please do not modify this message
  111. console.log("[Share]", messages, msgs);
  112. const clientConfig = getClientConfig();
  113. const proxyUrl = "/sharegpt";
  114. const rawUrl = "https://sharegpt.com/api/conversations";
  115. const shareUrl = clientConfig?.isApp ? rawUrl : proxyUrl;
  116. const res = await fetch(shareUrl, {
  117. body: JSON.stringify({
  118. avatarUrl,
  119. items: msgs,
  120. }),
  121. headers: {
  122. "Content-Type": "application/json",
  123. },
  124. method: "POST",
  125. });
  126. const resJson = await res.json();
  127. console.log("[Share]", resJson);
  128. if (resJson.id) {
  129. return `https://shareg.pt/${resJson.id}`;
  130. }
  131. }
  132. }
  133. export function getHeaders() {
  134. const accessStore = useAccessStore.getState();
  135. const headers: Record<string, string> = {
  136. "Content-Type": "application/json",
  137. Accept: "application/json",
  138. };
  139. const modelConfig = useChatStore.getState().currentSession().mask.modelConfig;
  140. const isGoogle = modelConfig.model.startsWith("gemini");
  141. const isAzure = accessStore.provider === ServiceProvider.Azure;
  142. const isAnthropic = accessStore.provider === ServiceProvider.Anthropic;
  143. const authHeader = isAzure ? "api-key" : isAnthropic ? 'x-api-key' : "Authorization";
  144. const apiKey = isGoogle
  145. ? accessStore.googleApiKey
  146. : isAzure
  147. ? accessStore.azureApiKey
  148. : isAnthropic
  149. ? accessStore.anthropicApiKey
  150. : accessStore.openaiApiKey;
  151. const clientConfig = getClientConfig();
  152. const makeBearer = (s: string) => `${isAzure || isAnthropic ? "" : "Bearer "}${s.trim()}`;
  153. const validString = (x: string) => x && x.length > 0;
  154. // when using google api in app, not set auth header
  155. if (!(isGoogle && clientConfig?.isApp)) {
  156. // use user's api key first
  157. if (validString(apiKey)) {
  158. headers[authHeader] = makeBearer(apiKey);
  159. } else if (
  160. accessStore.enabledAccessControl() &&
  161. validString(accessStore.accessCode)
  162. ) {
  163. // access_code must send with header named `Authorization`, will using in auth middleware.
  164. headers['Authorization'] = makeBearer(
  165. ACCESS_CODE_PREFIX + accessStore.accessCode,
  166. );
  167. }
  168. }
  169. return headers;
  170. }