index.ts 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. import {
  2. EventStreamContentType,
  3. fetchEventSource,
  4. } from "@fortaine/fetch-event-source";
  5. import {
  6. API_PREFIX,
  7. ApiPath,
  8. DEFAULT_MODELS,
  9. OpenaiPath,
  10. } from "@/app/constant";
  11. import { ModelConfig, ProviderConfig } from "@/app/store";
  12. import { OpenAI } from "./types";
  13. import { ChatOptions, LLMModel, LLMUsage } from "../types";
  14. import Locale from "@/app/locales";
  15. import { prettyObject } from "@/app/utils/format";
  16. import { getApiPath } from "@/app/utils/path";
  17. import { trimEnd } from "@/app/utils/string";
  18. import { omit } from "@/app/utils/object";
  19. import { createLogger } from "@/app/utils/log";
  20. import { getAuthHeaders } from "../common/auth";
  21. export function createOpenAiClient(
  22. providerConfigs: ProviderConfig,
  23. modelConfig: ModelConfig,
  24. ) {
  25. const openaiConfig = { ...providerConfigs.openai };
  26. const logger = createLogger("[OpenAI Client]");
  27. const openaiModelConfig = { ...modelConfig.openai };
  28. return {
  29. headers() {
  30. return {
  31. "Content-Type": "application/json",
  32. ...getAuthHeaders(openaiConfig.apiKey),
  33. };
  34. },
  35. path(path: OpenaiPath): string {
  36. let baseUrl = openaiConfig.endpoint;
  37. // if endpoint is empty, use default endpoint
  38. if (baseUrl.trim().length === 0) {
  39. baseUrl = getApiPath(ApiPath.OpenAI);
  40. }
  41. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(API_PREFIX)) {
  42. baseUrl = "https://" + baseUrl;
  43. }
  44. baseUrl = trimEnd(baseUrl, "/");
  45. return `${baseUrl}/${path}`;
  46. },
  47. extractMessage(res: OpenAI.ChatCompletionResponse) {
  48. return res.choices[0]?.message?.content ?? "";
  49. },
  50. beforeRequest(options: ChatOptions, stream = false) {
  51. const messages = options.messages.map((v) => ({
  52. role: v.role,
  53. content: v.content,
  54. }));
  55. if (options.shouldSummarize) {
  56. openaiModelConfig.model = openaiModelConfig.summarizeModel;
  57. }
  58. const requestBody: OpenAI.ChatCompletionRequest = {
  59. messages,
  60. stream,
  61. ...omit(openaiModelConfig, "summarizeModel"),
  62. };
  63. const path = this.path(OpenaiPath.Chat);
  64. logger.log("path = ", path, requestBody);
  65. const controller = new AbortController();
  66. options.onController?.(controller);
  67. const payload = {
  68. method: "POST",
  69. body: JSON.stringify(requestBody),
  70. signal: controller.signal,
  71. headers: this.headers(),
  72. };
  73. return {
  74. path,
  75. payload,
  76. controller,
  77. };
  78. },
  79. async chat(options: ChatOptions) {
  80. try {
  81. const { path, payload, controller } = this.beforeRequest(
  82. options,
  83. false,
  84. );
  85. controller.signal.onabort = () => options.onFinish("");
  86. const res = await fetch(path, payload);
  87. const resJson = await res.json();
  88. const message = this.extractMessage(resJson);
  89. options.onFinish(message);
  90. } catch (e) {
  91. logger.error("failed to chat", e);
  92. options.onError?.(e as Error);
  93. }
  94. },
  95. async chatStream(options: ChatOptions) {
  96. try {
  97. const { path, payload, controller } = this.beforeRequest(options, true);
  98. const context = {
  99. text: "",
  100. finished: false,
  101. };
  102. const finish = () => {
  103. if (!context.finished) {
  104. options.onFinish(context.text);
  105. context.finished = true;
  106. }
  107. };
  108. controller.signal.onabort = finish;
  109. fetchEventSource(path, {
  110. ...payload,
  111. async onopen(res) {
  112. const contentType = res.headers.get("content-type");
  113. logger.log("response content type: ", contentType);
  114. if (contentType?.startsWith("text/plain")) {
  115. context.text = await res.clone().text();
  116. return finish();
  117. }
  118. if (
  119. !res.ok ||
  120. !res.headers
  121. .get("content-type")
  122. ?.startsWith(EventStreamContentType) ||
  123. res.status !== 200
  124. ) {
  125. const responseTexts = [context.text];
  126. let extraInfo = await res.clone().text();
  127. try {
  128. const resJson = await res.clone().json();
  129. extraInfo = prettyObject(resJson);
  130. } catch {}
  131. if (res.status === 401) {
  132. responseTexts.push(Locale.Error.Unauthorized);
  133. }
  134. if (extraInfo) {
  135. responseTexts.push(extraInfo);
  136. }
  137. context.text = responseTexts.join("\n\n");
  138. return finish();
  139. }
  140. },
  141. onmessage(msg) {
  142. if (msg.data === "[DONE]" || context.finished) {
  143. return finish();
  144. }
  145. const chunk = msg.data;
  146. try {
  147. const chunkJson = JSON.parse(
  148. chunk,
  149. ) as OpenAI.ChatCompletionStreamResponse;
  150. const delta = chunkJson.choices[0].delta.content;
  151. if (delta) {
  152. context.text += delta;
  153. options.onUpdate?.(context.text, delta);
  154. }
  155. } catch (e) {
  156. logger.error("[Request] parse error", chunk, msg);
  157. }
  158. },
  159. onclose() {
  160. finish();
  161. },
  162. onerror(e) {
  163. options.onError?.(e);
  164. },
  165. openWhenHidden: true,
  166. });
  167. } catch (e) {
  168. logger.error("failed to chat", e);
  169. options.onError?.(e as Error);
  170. }
  171. },
  172. async usage() {
  173. const formatDate = (d: Date) =>
  174. `${d.getFullYear()}-${(d.getMonth() + 1)
  175. .toString()
  176. .padStart(2, "0")}-${d.getDate().toString().padStart(2, "0")}`;
  177. const ONE_DAY = 1 * 24 * 60 * 60 * 1000;
  178. const now = new Date();
  179. const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1);
  180. const startDate = formatDate(startOfMonth);
  181. const endDate = formatDate(new Date(Date.now() + ONE_DAY));
  182. const [used, subs] = await Promise.all([
  183. fetch(
  184. `${this.path(
  185. OpenaiPath.Usage,
  186. )}?start_date=${startDate}&end_date=${endDate}`,
  187. {
  188. method: "GET",
  189. headers: this.headers(),
  190. },
  191. ),
  192. fetch(this.path(OpenaiPath.Subs), {
  193. method: "GET",
  194. headers: this.headers(),
  195. }),
  196. ]);
  197. if (!used.ok || !subs.ok) {
  198. throw new Error("Failed to query usage from openai");
  199. }
  200. const response = (await used.json()) as {
  201. total_usage?: number;
  202. error?: {
  203. type: string;
  204. message: string;
  205. };
  206. };
  207. const total = (await subs.json()) as {
  208. hard_limit_usd?: number;
  209. };
  210. if (response.error?.type) {
  211. throw Error(response.error?.message);
  212. }
  213. response.total_usage = Math.round(response.total_usage ?? 0) / 100;
  214. total.hard_limit_usd =
  215. Math.round((total.hard_limit_usd ?? 0) * 100) / 100;
  216. return {
  217. used: response.total_usage,
  218. total: total.hard_limit_usd,
  219. } as LLMUsage;
  220. },
  221. async models(): Promise<LLMModel[]> {
  222. const customModels = openaiConfig.customModels
  223. .split(",")
  224. .map((v) => v.trim())
  225. .map((v) => ({
  226. name: v,
  227. available: true,
  228. }));
  229. if (!openaiConfig.autoFetchModels) {
  230. return [...DEFAULT_MODELS.slice(), ...customModels];
  231. }
  232. const res = await fetch(this.path(OpenaiPath.ListModel), {
  233. method: "GET",
  234. headers: this.headers(),
  235. });
  236. const resJson = (await res.json()) as OpenAI.ListModelResponse;
  237. const chatModels =
  238. resJson.data?.filter((m) => m.id.startsWith("gpt-")) ?? [];
  239. return chatModels
  240. .map((m) => ({
  241. name: m.id,
  242. available: true,
  243. }))
  244. .concat(customModels);
  245. },
  246. };
  247. }