openai.ts 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. "use client";
  2. // azure and openai, using same models. so using same LLMApi.
  3. import {
  4. ApiPath,
  5. OPENAI_BASE_URL,
  6. DEFAULT_MODELS,
  7. OpenaiPath,
  8. Azure,
  9. REQUEST_TIMEOUT_MS,
  10. ServiceProvider,
  11. } from "@/app/constant";
  12. import {
  13. ChatMessageTool,
  14. useAccessStore,
  15. useAppConfig,
  16. useChatStore,
  17. usePluginStore,
  18. } from "@/app/store";
  19. import { collectModelsWithDefaultModel } from "@/app/utils/model";
  20. import {
  21. preProcessImageContent,
  22. uploadImage,
  23. base64Image2Blob,
  24. streamWithThink,
  25. } from "@/app/utils/chat";
  26. import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
  27. import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
  28. import {
  29. ChatOptions,
  30. getHeaders,
  31. LLMApi,
  32. LLMModel,
  33. LLMUsage,
  34. MultimodalContent,
  35. SpeechOptions,
  36. } from "../api";
  37. import Locale from "../../locales";
  38. import { getClientConfig } from "@/app/config/client";
  39. import {
  40. getMessageTextContent,
  41. isVisionModel,
  42. isDalle3 as _isDalle3,
  43. getTimeoutMSByModel,
  44. } from "@/app/utils";
  45. import { fetch } from "@/app/utils/stream";
  46. export interface OpenAIListModelResponse {
  47. object: string;
  48. data: Array<{
  49. id: string;
  50. object: string;
  51. root: string;
  52. }>;
  53. }
  54. export interface RequestPayload {
  55. messages: {
  56. role: "developer" | "system" | "user" | "assistant";
  57. content: string | MultimodalContent[];
  58. }[];
  59. stream?: boolean;
  60. model: string;
  61. temperature: number;
  62. presence_penalty: number;
  63. frequency_penalty: number;
  64. top_p: number;
  65. max_tokens?: number;
  66. max_completion_tokens?: number;
  67. }
  68. export interface DalleRequestPayload {
  69. model: string;
  70. prompt: string;
  71. response_format: "url" | "b64_json";
  72. n: number;
  73. size: ModelSize;
  74. quality: DalleQuality;
  75. style: DalleStyle;
  76. }
  77. export class ChatGPTApi implements LLMApi {
  78. private disableListModels = true;
  79. path(path: string): string {
  80. const accessStore = useAccessStore.getState();
  81. let baseUrl = "";
  82. const isAzure = path.includes("deployments");
  83. if (accessStore.useCustomConfig) {
  84. if (isAzure && !accessStore.isValidAzure()) {
  85. throw Error(
  86. "incomplete azure config, please check it in your settings page",
  87. );
  88. }
  89. baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
  90. }
  91. if (baseUrl.length === 0) {
  92. const isApp = !!getClientConfig()?.isApp;
  93. const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI;
  94. baseUrl = isApp ? OPENAI_BASE_URL : apiPath;
  95. }
  96. if (baseUrl.endsWith("/")) {
  97. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  98. }
  99. if (
  100. !baseUrl.startsWith("http") &&
  101. !isAzure &&
  102. !baseUrl.startsWith(ApiPath.OpenAI)
  103. ) {
  104. baseUrl = "https://" + baseUrl;
  105. }
  106. console.log("[Proxy Endpoint] ", baseUrl, path);
  107. // try rebuild url, when using cloudflare ai gateway in client
  108. return cloudflareAIGatewayUrl([baseUrl, path].join("/"));
  109. }
  110. async extractMessage(res: any) {
  111. if (res.error) {
  112. return "```\n" + JSON.stringify(res, null, 4) + "\n```";
  113. }
  114. // dalle3 model return url, using url create image message
  115. if (res.data) {
  116. let url = res.data?.at(0)?.url ?? "";
  117. const b64_json = res.data?.at(0)?.b64_json ?? "";
  118. if (!url && b64_json) {
  119. // uploadImage
  120. url = await uploadImage(base64Image2Blob(b64_json, "image/png"));
  121. }
  122. return [
  123. {
  124. type: "image_url",
  125. image_url: {
  126. url,
  127. },
  128. },
  129. ];
  130. }
  131. return res.choices?.at(0)?.message?.content ?? res;
  132. }
  133. async speech(options: SpeechOptions): Promise<ArrayBuffer> {
  134. const requestPayload = {
  135. model: options.model,
  136. input: options.input,
  137. voice: options.voice,
  138. response_format: options.response_format,
  139. speed: options.speed,
  140. };
  141. console.log("[Request] openai speech payload: ", requestPayload);
  142. const controller = new AbortController();
  143. options.onController?.(controller);
  144. try {
  145. const speechPath = this.path(OpenaiPath.SpeechPath);
  146. const speechPayload = {
  147. method: "POST",
  148. body: JSON.stringify(requestPayload),
  149. signal: controller.signal,
  150. headers: getHeaders(),
  151. };
  152. // make a fetch request
  153. const requestTimeoutId = setTimeout(
  154. () => controller.abort(),
  155. REQUEST_TIMEOUT_MS,
  156. );
  157. const res = await fetch(speechPath, speechPayload);
  158. clearTimeout(requestTimeoutId);
  159. return await res.arrayBuffer();
  160. } catch (e) {
  161. console.log("[Request] failed to make a speech request", e);
  162. throw e;
  163. }
  164. }
  165. async chat(options: ChatOptions) {
  166. const modelConfig = {
  167. ...useAppConfig.getState().modelConfig,
  168. ...useChatStore.getState().currentSession().mask.modelConfig,
  169. ...{
  170. model: options.config.model,
  171. providerName: options.config.providerName,
  172. },
  173. };
  174. let requestPayload: RequestPayload | DalleRequestPayload;
  175. const isDalle3 = _isDalle3(options.config.model);
  176. const isO1OrO3 =
  177. options.config.model.startsWith("o1") ||
  178. options.config.model.startsWith("o3") ||
  179. options.config.model.startsWith("o4-mini");
  180. if (isDalle3) {
  181. const prompt = getMessageTextContent(
  182. options.messages.slice(-1)?.pop() as any,
  183. );
  184. requestPayload = {
  185. model: options.config.model,
  186. prompt,
  187. // URLs are only valid for 60 minutes after the image has been generated.
  188. response_format: "b64_json", // using b64_json, and save image in CacheStorage
  189. n: 1,
  190. size: options.config?.size ?? "1024x1024",
  191. quality: options.config?.quality ?? "standard",
  192. style: options.config?.style ?? "vivid",
  193. };
  194. } else {
  195. const visionModel = isVisionModel(options.config.model);
  196. const messages: ChatOptions["messages"] = [];
  197. for (const v of options.messages) {
  198. const content = visionModel
  199. ? await preProcessImageContent(v.content)
  200. : getMessageTextContent(v);
  201. if (!(isO1OrO3 && v.role === "system"))
  202. messages.push({ role: v.role, content });
  203. }
  204. // O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet.
  205. requestPayload = {
  206. messages,
  207. stream: options.config.stream,
  208. model: modelConfig.model,
  209. temperature: !isO1OrO3 ? modelConfig.temperature : 1,
  210. presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
  211. frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
  212. top_p: !isO1OrO3 ? modelConfig.top_p : 1,
  213. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  214. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  215. };
  216. if (isO1OrO3) {
  217. // by default the o1/o3 models will not attempt to produce output that includes markdown formatting
  218. // manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
  219. // (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
  220. requestPayload["messages"].unshift({
  221. role: "developer",
  222. content: "Formatting re-enabled",
  223. });
  224. // o1/o3 uses max_completion_tokens to control the number of tokens (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
  225. requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
  226. }
  227. // add max_tokens to vision model
  228. if (visionModel && !isO1OrO3) {
  229. requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
  230. }
  231. }
  232. console.log("[Request] openai payload: ", requestPayload);
  233. const shouldStream = !isDalle3 && !!options.config.stream;
  234. const controller = new AbortController();
  235. options.onController?.(controller);
  236. try {
  237. let chatPath = "";
  238. if (modelConfig.providerName === ServiceProvider.Azure) {
  239. // find model, and get displayName as deployName
  240. const { models: configModels, customModels: configCustomModels } =
  241. useAppConfig.getState();
  242. const {
  243. defaultModel,
  244. customModels: accessCustomModels,
  245. useCustomConfig,
  246. } = useAccessStore.getState();
  247. const models = collectModelsWithDefaultModel(
  248. configModels,
  249. [configCustomModels, accessCustomModels].join(","),
  250. defaultModel,
  251. );
  252. const model = models.find(
  253. (model) =>
  254. model.name === modelConfig.model &&
  255. model?.provider?.providerName === ServiceProvider.Azure,
  256. );
  257. chatPath = this.path(
  258. (isDalle3 ? Azure.ImagePath : Azure.ChatPath)(
  259. (model?.displayName ?? model?.name) as string,
  260. useCustomConfig ? useAccessStore.getState().azureApiVersion : "",
  261. ),
  262. );
  263. } else {
  264. chatPath = this.path(
  265. isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath,
  266. );
  267. }
  268. if (shouldStream) {
  269. let index = -1;
  270. const [tools, funcs] = usePluginStore
  271. .getState()
  272. .getAsTools(
  273. useChatStore.getState().currentSession().mask?.plugin || [],
  274. );
  275. // console.log("getAsTools", tools, funcs);
  276. streamWithThink(
  277. chatPath,
  278. requestPayload,
  279. getHeaders(),
  280. tools as any,
  281. funcs,
  282. controller,
  283. // parseSSE
  284. (text: string, runTools: ChatMessageTool[]) => {
  285. // console.log("parseSSE", text, runTools);
  286. const json = JSON.parse(text);
  287. const choices = json.choices as Array<{
  288. delta: {
  289. content: string;
  290. tool_calls: ChatMessageTool[];
  291. reasoning_content: string | null;
  292. };
  293. }>;
  294. if (!choices?.length) return { isThinking: false, content: "" };
  295. const tool_calls = choices[0]?.delta?.tool_calls;
  296. if (tool_calls?.length > 0) {
  297. const id = tool_calls[0]?.id;
  298. const args = tool_calls[0]?.function?.arguments;
  299. if (id) {
  300. index += 1;
  301. runTools.push({
  302. id,
  303. type: tool_calls[0]?.type,
  304. function: {
  305. name: tool_calls[0]?.function?.name as string,
  306. arguments: args,
  307. },
  308. });
  309. } else {
  310. // @ts-ignore
  311. runTools[index]["function"]["arguments"] += args;
  312. }
  313. }
  314. const reasoning = choices[0]?.delta?.reasoning_content;
  315. const content = choices[0]?.delta?.content;
  316. // Skip if both content and reasoning_content are empty or null
  317. if (
  318. (!reasoning || reasoning.length === 0) &&
  319. (!content || content.length === 0)
  320. ) {
  321. return {
  322. isThinking: false,
  323. content: "",
  324. };
  325. }
  326. if (reasoning && reasoning.length > 0) {
  327. return {
  328. isThinking: true,
  329. content: reasoning,
  330. };
  331. } else if (content && content.length > 0) {
  332. return {
  333. isThinking: false,
  334. content: content,
  335. };
  336. }
  337. return {
  338. isThinking: false,
  339. content: "",
  340. };
  341. },
  342. // processToolMessage, include tool_calls message and tool call results
  343. (
  344. requestPayload: RequestPayload,
  345. toolCallMessage: any,
  346. toolCallResult: any[],
  347. ) => {
  348. // reset index value
  349. index = -1;
  350. // @ts-ignore
  351. requestPayload?.messages?.splice(
  352. // @ts-ignore
  353. requestPayload?.messages?.length,
  354. 0,
  355. toolCallMessage,
  356. ...toolCallResult,
  357. );
  358. },
  359. options,
  360. );
  361. } else {
  362. const chatPayload = {
  363. method: "POST",
  364. body: JSON.stringify(requestPayload),
  365. signal: controller.signal,
  366. headers: getHeaders(),
  367. };
  368. // make a fetch request
  369. const requestTimeoutId = setTimeout(
  370. () => controller.abort(),
  371. getTimeoutMSByModel(options.config.model),
  372. );
  373. const res = await fetch(chatPath, chatPayload);
  374. clearTimeout(requestTimeoutId);
  375. const resJson = await res.json();
  376. const message = await this.extractMessage(resJson);
  377. options.onFinish(message, res);
  378. }
  379. } catch (e) {
  380. console.log("[Request] failed to make a chat request", e);
  381. options.onError?.(e as Error);
  382. }
  383. }
  384. async usage() {
  385. const formatDate = (d: Date) =>
  386. `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
  387. .getDate()
  388. .toString()
  389. .padStart(2, "0")}`;
  390. const ONE_DAY = 1 * 24 * 60 * 60 * 1000;
  391. const now = new Date();
  392. const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1);
  393. const startDate = formatDate(startOfMonth);
  394. const endDate = formatDate(new Date(Date.now() + ONE_DAY));
  395. const [used, subs] = await Promise.all([
  396. fetch(
  397. this.path(
  398. `${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`,
  399. ),
  400. {
  401. method: "GET",
  402. headers: getHeaders(),
  403. },
  404. ),
  405. fetch(this.path(OpenaiPath.SubsPath), {
  406. method: "GET",
  407. headers: getHeaders(),
  408. }),
  409. ]);
  410. if (used.status === 401) {
  411. throw new Error(Locale.Error.Unauthorized);
  412. }
  413. if (!used.ok || !subs.ok) {
  414. throw new Error("Failed to query usage from openai");
  415. }
  416. const response = (await used.json()) as {
  417. total_usage?: number;
  418. error?: {
  419. type: string;
  420. message: string;
  421. };
  422. };
  423. const total = (await subs.json()) as {
  424. hard_limit_usd?: number;
  425. };
  426. if (response.error && response.error.type) {
  427. throw Error(response.error.message);
  428. }
  429. if (response.total_usage) {
  430. response.total_usage = Math.round(response.total_usage) / 100;
  431. }
  432. if (total.hard_limit_usd) {
  433. total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100;
  434. }
  435. return {
  436. used: response.total_usage,
  437. total: total.hard_limit_usd,
  438. } as LLMUsage;
  439. }
  440. async models(): Promise<LLMModel[]> {
  441. if (this.disableListModels) {
  442. return DEFAULT_MODELS.slice();
  443. }
  444. const res = await fetch(this.path(OpenaiPath.ListModelPath), {
  445. method: "GET",
  446. headers: {
  447. ...getHeaders(),
  448. },
  449. });
  450. const resJson = (await res.json()) as OpenAIListModelResponse;
  451. const chatModels = resJson.data?.filter(
  452. (m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"),
  453. );
  454. console.log("[Models]", chatModels);
  455. if (!chatModels) {
  456. return [];
  457. }
  458. //由于目前 OpenAI 的 disableListModels 默认为 true,所以当前实际不会运行到这场
  459. let seq = 1000; //同 Constant.ts 中的排序保持一致
  460. return chatModels.map((m) => ({
  461. name: m.id,
  462. available: true,
  463. sorted: seq++,
  464. provider: {
  465. id: "openai",
  466. providerName: "OpenAI",
  467. providerType: "openai",
  468. sorted: 1,
  469. },
  470. }));
  471. }
  472. }
  473. export { OpenaiPath };