openai.ts 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. "use client";
  2. import {
  3. ApiPath,
  4. DEFAULT_API_HOST,
  5. DEFAULT_MODELS,
  6. OpenaiPath,
  7. REQUEST_TIMEOUT_MS,
  8. ServiceProvider,
  9. } from "@/app/constant";
  10. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  11. import {
  12. ChatOptions,
  13. getHeaders,
  14. LLMApi,
  15. LLMModel,
  16. LLMUsage,
  17. MultimodalContent,
  18. } from "../api";
  19. import Locale from "../../locales";
  20. import {
  21. EventStreamContentType,
  22. fetchEventSource,
  23. } from "@fortaine/fetch-event-source";
  24. import { prettyObject } from "@/app/utils/format";
  25. import { getClientConfig } from "@/app/config/client";
  26. import { makeAzurePath } from "@/app/azure";
  27. import {
  28. getMessageTextContent,
  29. getMessageImages,
  30. isVisionModel,
  31. } from "@/app/utils";
  32. export interface OpenAIListModelResponse {
  33. object: string;
  34. data: Array<{
  35. id: string;
  36. object: string;
  37. root: string;
  38. }>;
  39. }
  40. interface RequestPayload {
  41. messages: {
  42. role: "system" | "user" | "assistant";
  43. content: string | MultimodalContent[];
  44. }[];
  45. stream?: boolean;
  46. model: string;
  47. temperature: number;
  48. presence_penalty: number;
  49. frequency_penalty: number;
  50. top_p: number;
  51. max_tokens?: number;
  52. }
  53. export class ChatGPTApi implements LLMApi {
  54. private disableListModels = true;
  55. path(path: string): string {
  56. const accessStore = useAccessStore.getState();
  57. const isAzure = accessStore.provider === ServiceProvider.Azure;
  58. if (isAzure && !accessStore.isValidAzure()) {
  59. throw Error(
  60. "incomplete azure config, please check it in your settings page",
  61. );
  62. }
  63. let baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
  64. if (baseUrl.length === 0) {
  65. const isApp = !!getClientConfig()?.isApp;
  66. baseUrl = isApp
  67. ? DEFAULT_API_HOST + "/proxy" + ApiPath.OpenAI
  68. : ApiPath.OpenAI;
  69. }
  70. if (baseUrl.endsWith("/")) {
  71. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  72. }
  73. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.OpenAI)) {
  74. baseUrl = "https://" + baseUrl;
  75. }
  76. if (isAzure) {
  77. path = makeAzurePath(path, accessStore.azureApiVersion);
  78. }
  79. console.log("[Proxy Endpoint] ", baseUrl, path);
  80. return [baseUrl, path].join("/");
  81. }
  82. extractMessage(res: any) {
  83. return res.choices?.at(0)?.message?.content ?? "";
  84. }
  85. async chat(options: ChatOptions) {
  86. const visionModel = isVisionModel(options.config.model);
  87. const messages = options.messages.map((v) => ({
  88. role: v.role,
  89. content: visionModel ? v.content : getMessageTextContent(v),
  90. }));
  91. const modelConfig = {
  92. ...useAppConfig.getState().modelConfig,
  93. ...useChatStore.getState().currentSession().mask.modelConfig,
  94. ...{
  95. model: options.config.model,
  96. },
  97. };
  98. const requestPayload: RequestPayload = {
  99. messages,
  100. stream: options.config.stream,
  101. model: modelConfig.model,
  102. temperature: modelConfig.temperature,
  103. presence_penalty: modelConfig.presence_penalty,
  104. frequency_penalty: modelConfig.frequency_penalty,
  105. top_p: modelConfig.top_p,
  106. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  107. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  108. };
  109. // add max_tokens to vision model
  110. if (visionModel) {
  111. requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
  112. }
  113. console.log("[Request] openai payload: ", requestPayload);
  114. const shouldStream = !!options.config.stream;
  115. const controller = new AbortController();
  116. options.onController?.(controller);
  117. try {
  118. const chatPath = this.path(OpenaiPath.ChatPath);
  119. const chatPayload = {
  120. method: "POST",
  121. body: JSON.stringify(requestPayload),
  122. signal: controller.signal,
  123. headers: getHeaders(),
  124. };
  125. // make a fetch request
  126. const requestTimeoutId = setTimeout(
  127. () => controller.abort(),
  128. REQUEST_TIMEOUT_MS,
  129. );
  130. if (shouldStream) {
  131. let responseText = "";
  132. let remainText = "";
  133. let finished = false;
  134. // animate response to make it looks smooth
  135. function animateResponseText() {
  136. if (finished || controller.signal.aborted) {
  137. responseText += remainText;
  138. console.log("[Response Animation] finished");
  139. if (responseText?.length === 0) {
  140. options.onError?.(new Error("empty response from server"));
  141. }
  142. return;
  143. }
  144. if (remainText.length > 0) {
  145. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  146. const fetchText = remainText.slice(0, fetchCount);
  147. responseText += fetchText;
  148. remainText = remainText.slice(fetchCount);
  149. options.onUpdate?.(responseText, fetchText);
  150. }
  151. requestAnimationFrame(animateResponseText);
  152. }
  153. // start animaion
  154. animateResponseText();
  155. const finish = () => {
  156. if (!finished) {
  157. finished = true;
  158. options.onFinish(responseText + remainText);
  159. }
  160. };
  161. controller.signal.onabort = finish;
  162. fetchEventSource(chatPath, {
  163. ...chatPayload,
  164. async onopen(res) {
  165. clearTimeout(requestTimeoutId);
  166. const contentType = res.headers.get("content-type");
  167. console.log(
  168. "[OpenAI] request response content type: ",
  169. contentType,
  170. );
  171. if (contentType?.startsWith("text/plain")) {
  172. responseText = await res.clone().text();
  173. return finish();
  174. }
  175. if (
  176. !res.ok ||
  177. !res.headers
  178. .get("content-type")
  179. ?.startsWith(EventStreamContentType) ||
  180. res.status !== 200
  181. ) {
  182. const responseTexts = [responseText];
  183. let extraInfo = await res.clone().text();
  184. try {
  185. const resJson = await res.clone().json();
  186. extraInfo = prettyObject(resJson);
  187. } catch {}
  188. if (res.status === 401) {
  189. responseTexts.push(Locale.Error.Unauthorized);
  190. }
  191. if (extraInfo) {
  192. responseTexts.push(extraInfo);
  193. }
  194. responseText = responseTexts.join("\n\n");
  195. return finish();
  196. }
  197. },
  198. onmessage(msg) {
  199. if (msg.data === "[DONE]" || finished) {
  200. return finish();
  201. }
  202. const text = msg.data;
  203. try {
  204. const json = JSON.parse(text);
  205. const choices = json.choices as Array<{
  206. delta: { content: string };
  207. }>;
  208. const delta = choices[0]?.delta?.content;
  209. const textmoderation = json?.prompt_filter_results;
  210. if (delta) {
  211. remainText += delta;
  212. }
  213. if (
  214. textmoderation &&
  215. textmoderation.length > 0 &&
  216. ServiceProvider.Azure
  217. ) {
  218. const contentFilterResults =
  219. textmoderation[0]?.content_filter_results;
  220. console.log(
  221. `[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`,
  222. contentFilterResults,
  223. );
  224. }
  225. } catch (e) {
  226. console.error("[Request] parse error", text, msg);
  227. }
  228. },
  229. onclose() {
  230. finish();
  231. },
  232. onerror(e) {
  233. options.onError?.(e);
  234. throw e;
  235. },
  236. openWhenHidden: true,
  237. });
  238. } else {
  239. const res = await fetch(chatPath, chatPayload);
  240. clearTimeout(requestTimeoutId);
  241. const resJson = await res.json();
  242. const message = this.extractMessage(resJson);
  243. options.onFinish(message);
  244. }
  245. } catch (e) {
  246. console.log("[Request] failed to make a chat request", e);
  247. options.onError?.(e as Error);
  248. }
  249. }
  250. async usage() {
  251. const formatDate = (d: Date) =>
  252. `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
  253. .getDate()
  254. .toString()
  255. .padStart(2, "0")}`;
  256. const ONE_DAY = 1 * 24 * 60 * 60 * 1000;
  257. const now = new Date();
  258. const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1);
  259. const startDate = formatDate(startOfMonth);
  260. const endDate = formatDate(new Date(Date.now() + ONE_DAY));
  261. const [used, subs] = await Promise.all([
  262. fetch(
  263. this.path(
  264. `${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`,
  265. ),
  266. {
  267. method: "GET",
  268. headers: getHeaders(),
  269. },
  270. ),
  271. fetch(this.path(OpenaiPath.SubsPath), {
  272. method: "GET",
  273. headers: getHeaders(),
  274. }),
  275. ]);
  276. if (used.status === 401) {
  277. throw new Error(Locale.Error.Unauthorized);
  278. }
  279. if (!used.ok || !subs.ok) {
  280. throw new Error("Failed to query usage from openai");
  281. }
  282. const response = (await used.json()) as {
  283. total_usage?: number;
  284. error?: {
  285. type: string;
  286. message: string;
  287. };
  288. };
  289. const total = (await subs.json()) as {
  290. hard_limit_usd?: number;
  291. };
  292. if (response.error && response.error.type) {
  293. throw Error(response.error.message);
  294. }
  295. if (response.total_usage) {
  296. response.total_usage = Math.round(response.total_usage) / 100;
  297. }
  298. if (total.hard_limit_usd) {
  299. total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100;
  300. }
  301. return {
  302. used: response.total_usage,
  303. total: total.hard_limit_usd,
  304. } as LLMUsage;
  305. }
  306. async models(): Promise<LLMModel[]> {
  307. if (this.disableListModels) {
  308. return DEFAULT_MODELS.slice();
  309. }
  310. const res = await fetch(this.path(OpenaiPath.ListModelPath), {
  311. method: "GET",
  312. headers: {
  313. ...getHeaders(),
  314. },
  315. });
  316. const resJson = (await res.json()) as OpenAIListModelResponse;
  317. const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-"));
  318. console.log("[Models]", chatModels);
  319. if (!chatModels) {
  320. return [];
  321. }
  322. return chatModels.map((m) => ({
  323. name: m.id,
  324. available: true,
  325. provider: {
  326. id: "openai",
  327. providerName: "OpenAI",
  328. providerType: "openai",
  329. },
  330. }));
  331. }
  332. }
  333. export { OpenaiPath };