openai.ts 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. "use client";
  2. // azure and openai, using same models. so using same LLMApi.
  3. import {
  4. ApiPath,
  5. DEFAULT_API_HOST,
  6. DEFAULT_MODELS,
  7. OpenaiPath,
  8. Azure,
  9. REQUEST_TIMEOUT_MS,
  10. ServiceProvider,
  11. } from "@/app/constant";
  12. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  13. import { collectModelsWithDefaultModel } from "@/app/utils/model";
  14. import {
  15. ChatOptions,
  16. getHeaders,
  17. LLMApi,
  18. LLMModel,
  19. LLMUsage,
  20. MultimodalContent,
  21. } from "../api";
  22. import Locale from "../../locales";
  23. import {
  24. EventStreamContentType,
  25. fetchEventSource,
  26. } from "@fortaine/fetch-event-source";
  27. import { prettyObject } from "@/app/utils/format";
  28. import { getClientConfig } from "@/app/config/client";
  29. import {
  30. getMessageTextContent,
  31. getMessageImages,
  32. isVisionModel,
  33. } from "@/app/utils";
  34. export interface OpenAIListModelResponse {
  35. object: string;
  36. data: Array<{
  37. id: string;
  38. object: string;
  39. root: string;
  40. }>;
  41. }
  42. interface RequestPayload {
  43. messages: {
  44. role: "system" | "user" | "assistant";
  45. content: string | MultimodalContent[];
  46. }[];
  47. stream?: boolean;
  48. model: string;
  49. temperature: number;
  50. presence_penalty: number;
  51. frequency_penalty: number;
  52. top_p: number;
  53. max_tokens?: number;
  54. }
  55. export class ChatGPTApi implements LLMApi {
  56. private disableListModels = true;
  57. path(path: string): string {
  58. const accessStore = useAccessStore.getState();
  59. let baseUrl = "";
  60. const isAzure = path.includes("deployments");
  61. if (accessStore.useCustomConfig) {
  62. if (isAzure && !accessStore.isValidAzure()) {
  63. throw Error(
  64. "incomplete azure config, please check it in your settings page",
  65. );
  66. }
  67. baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
  68. }
  69. if (baseUrl.length === 0) {
  70. const isApp = !!getClientConfig()?.isApp;
  71. const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI;
  72. baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
  73. }
  74. if (baseUrl.endsWith("/")) {
  75. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  76. }
  77. if (
  78. !baseUrl.startsWith("http") &&
  79. !isAzure &&
  80. !baseUrl.startsWith(ApiPath.OpenAI)
  81. ) {
  82. baseUrl = "https://" + baseUrl;
  83. }
  84. console.log("[Proxy Endpoint] ", baseUrl, path);
  85. return [baseUrl, path].join("/");
  86. }
  87. extractMessage(res: any) {
  88. return res.choices?.at(0)?.message?.content ?? "";
  89. }
  90. async chat(options: ChatOptions) {
  91. const visionModel = isVisionModel(options.config.model);
  92. const messages = options.messages.map((v) => ({
  93. role: v.role,
  94. content: visionModel ? v.content : getMessageTextContent(v),
  95. }));
  96. const modelConfig = {
  97. ...useAppConfig.getState().modelConfig,
  98. ...useChatStore.getState().currentSession().mask.modelConfig,
  99. ...{
  100. model: options.config.model,
  101. providerName: options.config.providerName,
  102. },
  103. };
  104. const requestPayload: RequestPayload = {
  105. messages,
  106. stream: options.config.stream,
  107. model: modelConfig.model,
  108. temperature: modelConfig.temperature,
  109. presence_penalty: modelConfig.presence_penalty,
  110. frequency_penalty: modelConfig.frequency_penalty,
  111. top_p: modelConfig.top_p,
  112. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  113. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  114. };
  115. // add max_tokens to vision model
  116. if (visionModel && modelConfig.model.includes("preview")) {
  117. requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
  118. }
  119. console.log("[Request] openai payload: ", requestPayload);
  120. const shouldStream = !!options.config.stream;
  121. const controller = new AbortController();
  122. options.onController?.(controller);
  123. try {
  124. let chatPath = "";
  125. if (modelConfig.providerName === ServiceProvider.Azure) {
  126. // find model, and get displayName as deployName
  127. const { models: configModels, customModels: configCustomModels } =
  128. useAppConfig.getState();
  129. const {
  130. defaultModel,
  131. customModels: accessCustomModels,
  132. useCustomConfig,
  133. } = useAccessStore.getState();
  134. const models = collectModelsWithDefaultModel(
  135. configModels,
  136. [configCustomModels, accessCustomModels].join(","),
  137. defaultModel,
  138. );
  139. const model = models.find(
  140. (model) =>
  141. model.name === modelConfig.model &&
  142. model?.provider?.providerName === ServiceProvider.Azure,
  143. );
  144. chatPath = this.path(
  145. Azure.ChatPath(
  146. (model?.displayName ?? model?.name) as string,
  147. useCustomConfig ? useAccessStore.getState().azureApiVersion : "",
  148. ),
  149. );
  150. } else {
  151. chatPath = this.path(OpenaiPath.ChatPath);
  152. }
  153. const chatPayload = {
  154. method: "POST",
  155. body: JSON.stringify(requestPayload),
  156. signal: controller.signal,
  157. headers: getHeaders(),
  158. };
  159. // make a fetch request
  160. const requestTimeoutId = setTimeout(
  161. () => controller.abort(),
  162. REQUEST_TIMEOUT_MS,
  163. );
  164. if (shouldStream) {
  165. let responseText = "";
  166. let remainText = "";
  167. let finished = false;
  168. // animate response to make it looks smooth
  169. function animateResponseText() {
  170. if (finished || controller.signal.aborted) {
  171. responseText += remainText;
  172. console.log("[Response Animation] finished");
  173. if (responseText?.length === 0) {
  174. options.onError?.(new Error("empty response from server"));
  175. }
  176. return;
  177. }
  178. if (remainText.length > 0) {
  179. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  180. const fetchText = remainText.slice(0, fetchCount);
  181. responseText += fetchText;
  182. remainText = remainText.slice(fetchCount);
  183. options.onUpdate?.(responseText, fetchText);
  184. }
  185. requestAnimationFrame(animateResponseText);
  186. }
  187. // start animaion
  188. animateResponseText();
  189. const finish = () => {
  190. if (!finished) {
  191. finished = true;
  192. options.onFinish(responseText + remainText);
  193. }
  194. };
  195. controller.signal.onabort = finish;
  196. fetchEventSource(chatPath, {
  197. ...chatPayload,
  198. async onopen(res) {
  199. clearTimeout(requestTimeoutId);
  200. const contentType = res.headers.get("content-type");
  201. console.log(
  202. "[OpenAI] request response content type: ",
  203. contentType,
  204. );
  205. if (contentType?.startsWith("text/plain")) {
  206. responseText = await res.clone().text();
  207. return finish();
  208. }
  209. if (
  210. !res.ok ||
  211. !res.headers
  212. .get("content-type")
  213. ?.startsWith(EventStreamContentType) ||
  214. res.status !== 200
  215. ) {
  216. const responseTexts = [responseText];
  217. let extraInfo = await res.clone().text();
  218. try {
  219. const resJson = await res.clone().json();
  220. extraInfo = prettyObject(resJson);
  221. } catch {}
  222. if (res.status === 401) {
  223. responseTexts.push(Locale.Error.Unauthorized);
  224. }
  225. if (extraInfo) {
  226. responseTexts.push(extraInfo);
  227. }
  228. responseText = responseTexts.join("\n\n");
  229. return finish();
  230. }
  231. },
  232. onmessage(msg) {
  233. if (msg.data === "[DONE]" || finished) {
  234. return finish();
  235. }
  236. const text = msg.data;
  237. try {
  238. const json = JSON.parse(text);
  239. const choices = json.choices as Array<{
  240. delta: { content: string };
  241. }>;
  242. const delta = choices[0]?.delta?.content;
  243. const textmoderation = json?.prompt_filter_results;
  244. if (delta) {
  245. remainText += delta;
  246. }
  247. if (
  248. textmoderation &&
  249. textmoderation.length > 0 &&
  250. ServiceProvider.Azure
  251. ) {
  252. const contentFilterResults =
  253. textmoderation[0]?.content_filter_results;
  254. console.log(
  255. `[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`,
  256. contentFilterResults,
  257. );
  258. }
  259. } catch (e) {
  260. console.error("[Request] parse error", text, msg);
  261. }
  262. },
  263. onclose() {
  264. finish();
  265. },
  266. onerror(e) {
  267. options.onError?.(e);
  268. throw e;
  269. },
  270. openWhenHidden: true,
  271. });
  272. } else {
  273. const res = await fetch(chatPath, chatPayload);
  274. clearTimeout(requestTimeoutId);
  275. const resJson = await res.json();
  276. const message = this.extractMessage(resJson);
  277. options.onFinish(message);
  278. }
  279. } catch (e) {
  280. console.log("[Request] failed to make a chat request", e);
  281. options.onError?.(e as Error);
  282. }
  283. }
  284. async usage() {
  285. const formatDate = (d: Date) =>
  286. `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
  287. .getDate()
  288. .toString()
  289. .padStart(2, "0")}`;
  290. const ONE_DAY = 1 * 24 * 60 * 60 * 1000;
  291. const now = new Date();
  292. const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1);
  293. const startDate = formatDate(startOfMonth);
  294. const endDate = formatDate(new Date(Date.now() + ONE_DAY));
  295. const [used, subs] = await Promise.all([
  296. fetch(
  297. this.path(
  298. `${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`,
  299. ),
  300. {
  301. method: "GET",
  302. headers: getHeaders(),
  303. },
  304. ),
  305. fetch(this.path(OpenaiPath.SubsPath), {
  306. method: "GET",
  307. headers: getHeaders(),
  308. }),
  309. ]);
  310. if (used.status === 401) {
  311. throw new Error(Locale.Error.Unauthorized);
  312. }
  313. if (!used.ok || !subs.ok) {
  314. throw new Error("Failed to query usage from openai");
  315. }
  316. const response = (await used.json()) as {
  317. total_usage?: number;
  318. error?: {
  319. type: string;
  320. message: string;
  321. };
  322. };
  323. const total = (await subs.json()) as {
  324. hard_limit_usd?: number;
  325. };
  326. if (response.error && response.error.type) {
  327. throw Error(response.error.message);
  328. }
  329. if (response.total_usage) {
  330. response.total_usage = Math.round(response.total_usage) / 100;
  331. }
  332. if (total.hard_limit_usd) {
  333. total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100;
  334. }
  335. return {
  336. used: response.total_usage,
  337. total: total.hard_limit_usd,
  338. } as LLMUsage;
  339. }
  340. async models(): Promise<LLMModel[]> {
  341. if (this.disableListModels) {
  342. return DEFAULT_MODELS.slice();
  343. }
  344. const res = await fetch(this.path(OpenaiPath.ListModelPath), {
  345. method: "GET",
  346. headers: {
  347. ...getHeaders(),
  348. },
  349. });
  350. const resJson = (await res.json()) as OpenAIListModelResponse;
  351. const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-"));
  352. console.log("[Models]", chatModels);
  353. if (!chatModels) {
  354. return [];
  355. }
  356. return chatModels.map((m) => ({
  357. name: m.id,
  358. available: true,
  359. provider: {
  360. id: "openai",
  361. providerName: "OpenAI",
  362. providerType: "openai",
  363. },
  364. }));
  365. }
  366. }
  367. export { OpenaiPath };