openai.ts 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. "use client";
  2. // azure and openai, using same models. so using same LLMApi.
  3. import {
  4. ApiPath,
  5. DEFAULT_API_HOST,
  6. DEFAULT_MODELS,
  7. OpenaiPath,
  8. Azure,
  9. REQUEST_TIMEOUT_MS,
  10. ServiceProvider,
  11. } from "@/app/constant";
  12. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  13. import { collectModelsWithDefaultModel } from "@/app/utils/model";
  14. import {
  15. ChatOptions,
  16. getHeaders,
  17. LLMApi,
  18. LLMModel,
  19. LLMUsage,
  20. MultimodalContent,
  21. } from "../api";
  22. import Locale from "../../locales";
  23. import {
  24. EventStreamContentType,
  25. fetchEventSource,
  26. } from "@fortaine/fetch-event-source";
  27. import { prettyObject } from "@/app/utils/format";
  28. import { getClientConfig } from "@/app/config/client";
  29. import {
  30. getMessageTextContent,
  31. getMessageImages,
  32. isVisionModel,
  33. } from "@/app/utils";
  34. export interface OpenAIListModelResponse {
  35. object: string;
  36. data: Array<{
  37. id: string;
  38. object: string;
  39. root: string;
  40. }>;
  41. }
  42. interface RequestPayload {
  43. messages: {
  44. role: "system" | "user" | "assistant";
  45. content: string | MultimodalContent[];
  46. }[];
  47. stream?: boolean;
  48. model: string;
  49. temperature: number;
  50. presence_penalty: number;
  51. frequency_penalty: number;
  52. top_p: number;
  53. max_tokens?: number;
  54. }
  55. export class ChatGPTApi implements LLMApi {
  56. private disableListModels = true;
  57. path(path: string): string {
  58. const accessStore = useAccessStore.getState();
  59. let baseUrl = "";
  60. const isAzure = path.includes("deployments");
  61. if (accessStore.useCustomConfig) {
  62. if (isAzure && !accessStore.isValidAzure()) {
  63. throw Error(
  64. "incomplete azure config, please check it in your settings page",
  65. );
  66. }
  67. baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
  68. }
  69. if (baseUrl.length === 0) {
  70. const isApp = !!getClientConfig()?.isApp;
  71. const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI;
  72. baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
  73. }
  74. if (baseUrl.endsWith("/")) {
  75. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  76. }
  77. if (
  78. !baseUrl.startsWith("http") &&
  79. !isAzure &&
  80. !baseUrl.startsWith(ApiPath.OpenAI)
  81. ) {
  82. baseUrl = "https://" + baseUrl;
  83. }
  84. console.log("[Proxy Endpoint] ", baseUrl, path);
  85. return [baseUrl, path].join("/");
  86. }
  87. extractMessage(res: any) {
  88. return res.choices?.at(0)?.message?.content ?? "";
  89. }
  90. async chat(options: ChatOptions) {
  91. const visionModel = isVisionModel(options.config.model);
  92. const messages = options.messages.map((v) => ({
  93. role: v.role,
  94. content: visionModel ? v.content : getMessageTextContent(v),
  95. }));
  96. const modelConfig = {
  97. ...useAppConfig.getState().modelConfig,
  98. ...useChatStore.getState().currentSession().mask.modelConfig,
  99. ...{
  100. model: options.config.model,
  101. providerName: options.config.providerName,
  102. },
  103. };
  104. const requestPayload: RequestPayload = {
  105. messages,
  106. stream: options.config.stream,
  107. model: modelConfig.model,
  108. temperature: modelConfig.temperature,
  109. presence_penalty: modelConfig.presence_penalty,
  110. frequency_penalty: modelConfig.frequency_penalty,
  111. top_p: modelConfig.top_p,
  112. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  113. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  114. };
  115. // add max_tokens to vision model
  116. if (visionModel && modelConfig.model.includes("preview")) {
  117. requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
  118. }
  119. console.log("[Request] openai payload: ", requestPayload);
  120. const shouldStream = !!options.config.stream;
  121. const controller = new AbortController();
  122. options.onController?.(controller);
  123. try {
  124. let chatPath = "";
  125. if (modelConfig.providerName == ServiceProvider.Azure) {
  126. // find model, and get displayName as deployName
  127. const { models: configModels, customModels: configCustomModels } =
  128. useAppConfig.getState();
  129. const { defaultModel, customModels: accessCustomModels } =
  130. useAccessStore.getState();
  131. const models = collectModelsWithDefaultModel(
  132. configModels,
  133. [configCustomModels, accessCustomModels].join(","),
  134. defaultModel,
  135. );
  136. const model = models.find(
  137. (model) =>
  138. model.name == modelConfig.model &&
  139. model?.provider?.providerName == ServiceProvider.Azure,
  140. );
  141. chatPath = this.path(
  142. Azure.ChatPath(
  143. (model?.displayName ?? model?.name) as string,
  144. useAccessStore.getState().azureApiVersion,
  145. ),
  146. );
  147. } else {
  148. chatPath = this.path(OpenaiPath.ChatPath);
  149. }
  150. const chatPayload = {
  151. method: "POST",
  152. body: JSON.stringify(requestPayload),
  153. signal: controller.signal,
  154. headers: getHeaders(),
  155. };
  156. // make a fetch request
  157. const requestTimeoutId = setTimeout(
  158. () => controller.abort(),
  159. REQUEST_TIMEOUT_MS,
  160. );
  161. if (shouldStream) {
  162. let responseText = "";
  163. let remainText = "";
  164. let finished = false;
  165. // animate response to make it looks smooth
  166. function animateResponseText() {
  167. if (finished || controller.signal.aborted) {
  168. responseText += remainText;
  169. console.log("[Response Animation] finished");
  170. if (responseText?.length === 0) {
  171. options.onError?.(new Error("empty response from server"));
  172. }
  173. return;
  174. }
  175. if (remainText.length > 0) {
  176. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  177. const fetchText = remainText.slice(0, fetchCount);
  178. responseText += fetchText;
  179. remainText = remainText.slice(fetchCount);
  180. options.onUpdate?.(responseText, fetchText);
  181. }
  182. requestAnimationFrame(animateResponseText);
  183. }
  184. // start animaion
  185. animateResponseText();
  186. const finish = () => {
  187. if (!finished) {
  188. finished = true;
  189. options.onFinish(responseText + remainText);
  190. }
  191. };
  192. controller.signal.onabort = finish;
  193. fetchEventSource(chatPath, {
  194. ...chatPayload,
  195. async onopen(res) {
  196. clearTimeout(requestTimeoutId);
  197. const contentType = res.headers.get("content-type");
  198. console.log(
  199. "[OpenAI] request response content type: ",
  200. contentType,
  201. );
  202. if (contentType?.startsWith("text/plain")) {
  203. responseText = await res.clone().text();
  204. return finish();
  205. }
  206. if (
  207. !res.ok ||
  208. !res.headers
  209. .get("content-type")
  210. ?.startsWith(EventStreamContentType) ||
  211. res.status !== 200
  212. ) {
  213. const responseTexts = [responseText];
  214. let extraInfo = await res.clone().text();
  215. try {
  216. const resJson = await res.clone().json();
  217. extraInfo = prettyObject(resJson);
  218. } catch {}
  219. if (res.status === 401) {
  220. responseTexts.push(Locale.Error.Unauthorized);
  221. }
  222. if (extraInfo) {
  223. responseTexts.push(extraInfo);
  224. }
  225. responseText = responseTexts.join("\n\n");
  226. return finish();
  227. }
  228. },
  229. onmessage(msg) {
  230. if (msg.data === "[DONE]" || finished) {
  231. return finish();
  232. }
  233. const text = msg.data;
  234. try {
  235. const json = JSON.parse(text);
  236. const choices = json.choices as Array<{
  237. delta: { content: string };
  238. }>;
  239. const delta = choices[0]?.delta?.content;
  240. const textmoderation = json?.prompt_filter_results;
  241. if (delta) {
  242. remainText += delta;
  243. }
  244. if (
  245. textmoderation &&
  246. textmoderation.length > 0 &&
  247. ServiceProvider.Azure
  248. ) {
  249. const contentFilterResults =
  250. textmoderation[0]?.content_filter_results;
  251. console.log(
  252. `[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`,
  253. contentFilterResults,
  254. );
  255. }
  256. } catch (e) {
  257. console.error("[Request] parse error", text, msg);
  258. }
  259. },
  260. onclose() {
  261. finish();
  262. },
  263. onerror(e) {
  264. options.onError?.(e);
  265. throw e;
  266. },
  267. openWhenHidden: true,
  268. });
  269. } else {
  270. const res = await fetch(chatPath, chatPayload);
  271. clearTimeout(requestTimeoutId);
  272. const resJson = await res.json();
  273. const message = this.extractMessage(resJson);
  274. options.onFinish(message);
  275. }
  276. } catch (e) {
  277. console.log("[Request] failed to make a chat request", e);
  278. options.onError?.(e as Error);
  279. }
  280. }
  281. async usage() {
  282. const formatDate = (d: Date) =>
  283. `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
  284. .getDate()
  285. .toString()
  286. .padStart(2, "0")}`;
  287. const ONE_DAY = 1 * 24 * 60 * 60 * 1000;
  288. const now = new Date();
  289. const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1);
  290. const startDate = formatDate(startOfMonth);
  291. const endDate = formatDate(new Date(Date.now() + ONE_DAY));
  292. const [used, subs] = await Promise.all([
  293. fetch(
  294. this.path(
  295. `${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`,
  296. ),
  297. {
  298. method: "GET",
  299. headers: getHeaders(),
  300. },
  301. ),
  302. fetch(this.path(OpenaiPath.SubsPath), {
  303. method: "GET",
  304. headers: getHeaders(),
  305. }),
  306. ]);
  307. if (used.status === 401) {
  308. throw new Error(Locale.Error.Unauthorized);
  309. }
  310. if (!used.ok || !subs.ok) {
  311. throw new Error("Failed to query usage from openai");
  312. }
  313. const response = (await used.json()) as {
  314. total_usage?: number;
  315. error?: {
  316. type: string;
  317. message: string;
  318. };
  319. };
  320. const total = (await subs.json()) as {
  321. hard_limit_usd?: number;
  322. };
  323. if (response.error && response.error.type) {
  324. throw Error(response.error.message);
  325. }
  326. if (response.total_usage) {
  327. response.total_usage = Math.round(response.total_usage) / 100;
  328. }
  329. if (total.hard_limit_usd) {
  330. total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100;
  331. }
  332. return {
  333. used: response.total_usage,
  334. total: total.hard_limit_usd,
  335. } as LLMUsage;
  336. }
  337. async models(): Promise<LLMModel[]> {
  338. if (this.disableListModels) {
  339. return DEFAULT_MODELS.slice();
  340. }
  341. const res = await fetch(this.path(OpenaiPath.ListModelPath), {
  342. method: "GET",
  343. headers: {
  344. ...getHeaders(),
  345. },
  346. });
  347. const resJson = (await res.json()) as OpenAIListModelResponse;
  348. const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-"));
  349. console.log("[Models]", chatModels);
  350. if (!chatModels) {
  351. return [];
  352. }
  353. return chatModels.map((m) => ({
  354. name: m.id,
  355. available: true,
  356. provider: {
  357. id: "openai",
  358. providerName: "OpenAI",
  359. providerType: "openai",
  360. },
  361. }));
  362. }
  363. }
  364. export { OpenaiPath };