api.ts 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. import { getClientConfig } from "../config/client";
  2. import {
  3. ACCESS_CODE_PREFIX,
  4. Azure,
  5. ModelProvider,
  6. ServiceProvider,
  7. } from "../constant";
  8. import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
  9. import { ChatGPTApi } from "./platforms/openai";
  10. import { GeminiProApi } from "./platforms/google";
  11. export const ROLES = ["system", "user", "assistant"] as const;
  12. export type MessageRole = (typeof ROLES)[number];
  13. export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
  14. export type ChatModel = ModelType;
  15. export interface MultimodalContent {
  16. type: "text" | "image_url";
  17. text?: string;
  18. image_url?: {
  19. url: string;
  20. };
  21. }
  22. export interface RequestMessage {
  23. role: MessageRole;
  24. content: string | MultimodalContent[];
  25. }
  26. export interface LLMConfig {
  27. model: string;
  28. temperature?: number;
  29. top_p?: number;
  30. stream?: boolean;
  31. presence_penalty?: number;
  32. frequency_penalty?: number;
  33. }
  34. export interface ChatOptions {
  35. messages: RequestMessage[];
  36. config: LLMConfig;
  37. onUpdate?: (message: string, chunk: string) => void;
  38. onFinish: (message: string) => void;
  39. onError?: (err: Error) => void;
  40. onController?: (controller: AbortController) => void;
  41. }
  42. export interface LLMUsage {
  43. used: number;
  44. total: number;
  45. }
  46. export interface LLMModel {
  47. name: string;
  48. available: boolean;
  49. provider: LLMModelProvider;
  50. }
  51. export interface LLMModelProvider {
  52. id: string;
  53. providerName: string;
  54. providerType: string;
  55. }
  56. export abstract class LLMApi {
  57. abstract chat(options: ChatOptions): Promise<void>;
  58. abstract usage(): Promise<LLMUsage>;
  59. abstract models(): Promise<LLMModel[]>;
  60. }
  61. type ProviderName = "openai" | "azure" | "claude" | "palm";
  62. interface Model {
  63. name: string;
  64. provider: ProviderName;
  65. ctxlen: number;
  66. }
  67. interface ChatProvider {
  68. name: ProviderName;
  69. apiConfig: {
  70. baseUrl: string;
  71. apiKey: string;
  72. summaryModel: Model;
  73. };
  74. models: Model[];
  75. chat: () => void;
  76. usage: () => void;
  77. }
  78. export class ClientApi {
  79. public llm: LLMApi;
  80. constructor(provider: ModelProvider = ModelProvider.GPT) {
  81. if (provider === ModelProvider.GeminiPro) {
  82. this.llm = new GeminiProApi();
  83. return;
  84. }
  85. this.llm = new ChatGPTApi();
  86. }
  87. config() {}
  88. prompts() {}
  89. masks() {}
  90. async share(messages: ChatMessage[], avatarUrl: string | null = null) {
  91. const msgs = messages
  92. .map((m) => ({
  93. from: m.role === "user" ? "human" : "gpt",
  94. value: m.content,
  95. }))
  96. .concat([
  97. {
  98. from: "human",
  99. value:
  100. "Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web",
  101. },
  102. ]);
  103. // 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用
  104. // Please do not modify this message
  105. console.log("[Share]", messages, msgs);
  106. const clientConfig = getClientConfig();
  107. const proxyUrl = "/sharegpt";
  108. const rawUrl = "https://sharegpt.com/api/conversations";
  109. const shareUrl = clientConfig?.isApp ? rawUrl : proxyUrl;
  110. const res = await fetch(shareUrl, {
  111. body: JSON.stringify({
  112. avatarUrl,
  113. items: msgs,
  114. }),
  115. headers: {
  116. "Content-Type": "application/json",
  117. },
  118. method: "POST",
  119. });
  120. const resJson = await res.json();
  121. console.log("[Share]", resJson);
  122. if (resJson.id) {
  123. return `https://shareg.pt/${resJson.id}`;
  124. }
  125. }
  126. }
  127. export function getHeaders() {
  128. const accessStore = useAccessStore.getState();
  129. const headers: Record<string, string> = {
  130. "Content-Type": "application/json",
  131. Accept: "application/json",
  132. };
  133. const modelConfig = useChatStore.getState().currentSession().mask.modelConfig;
  134. const isGoogle = modelConfig.model.startsWith("gemini");
  135. const isAzure = accessStore.provider === ServiceProvider.Azure;
  136. const authHeader = isAzure ? "api-key" : "Authorization";
  137. const apiKey = isGoogle
  138. ? accessStore.googleApiKey
  139. : isAzure
  140. ? accessStore.azureApiKey
  141. : accessStore.openaiApiKey;
  142. const clientConfig = getClientConfig();
  143. const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
  144. const validString = (x: string) => x && x.length > 0;
  145. // when using google api in app, not set auth header
  146. if (!(isGoogle && clientConfig?.isApp)) {
  147. // use user's api key first
  148. if (validString(apiKey)) {
  149. headers[authHeader] = makeBearer(apiKey);
  150. } else if (
  151. accessStore.enabledAccessControl() &&
  152. validString(accessStore.accessCode)
  153. ) {
  154. headers[authHeader] = makeBearer(
  155. ACCESS_CODE_PREFIX + accessStore.accessCode,
  156. );
  157. }
  158. }
  159. return headers;
  160. }