index.ts 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. import {
  2. EventStreamContentType,
  3. fetchEventSource,
  4. } from "@fortaine/fetch-event-source";
  5. import { API_PREFIX, ApiPath, OpenaiPath } from "@/app/constant";
  6. import { ModelConfig, ProviderConfig } from "@/app/store";
  7. import { OpenAI } from "./types";
  8. import { ChatOptions, LLMModel, LLMUsage } from "../types";
  9. import Locale from "@/app/locales";
  10. import { prettyObject } from "@/app/utils/format";
  11. import { getApiPath } from "@/app/utils/path";
  12. import { trimEnd } from "@/app/utils/string";
  13. import { omit } from "@/app/utils/object";
  14. import { createLogger } from "@/app/utils/log";
  15. import { getAuthKey } from "../common/auth";
  16. import { OpenAIConfig } from "./config";
  17. export function createOpenAiClient(
  18. providerConfigs: ProviderConfig,
  19. modelConfig: ModelConfig,
  20. ) {
  21. const openaiConfig = { ...providerConfigs.openai };
  22. const logger = createLogger("[OpenAI Client]");
  23. const openaiModelConfig = { ...modelConfig.openai };
  24. return {
  25. headers() {
  26. return {
  27. "Content-Type": "application/json",
  28. Authorization: getAuthKey(),
  29. };
  30. },
  31. path(path: OpenaiPath): string {
  32. let baseUrl: string = openaiConfig.endpoint;
  33. // if endpoint is empty, use default endpoint
  34. if (baseUrl.trim().length === 0) {
  35. baseUrl = getApiPath(ApiPath.OpenAI);
  36. }
  37. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(API_PREFIX)) {
  38. baseUrl = "https://" + baseUrl;
  39. }
  40. baseUrl = trimEnd(baseUrl, "/");
  41. return `${baseUrl}/${path}`;
  42. },
  43. extractMessage(res: OpenAI.ChatCompletionResponse) {
  44. return res.choices[0]?.message?.content ?? "";
  45. },
  46. beforeRequest(options: ChatOptions, stream = false) {
  47. const messages = options.messages.map((v) => ({
  48. role: v.role,
  49. content: v.content,
  50. }));
  51. if (options.shouldSummarize) {
  52. openaiModelConfig.model = openaiModelConfig.summarizeModel;
  53. }
  54. const requestBody: OpenAI.ChatCompletionRequest = {
  55. messages,
  56. stream,
  57. ...omit(openaiModelConfig, "summarizeModel"),
  58. };
  59. const path = this.path(OpenaiPath.Chat);
  60. logger.log("path = ", path, requestBody);
  61. const controller = new AbortController();
  62. options.onController?.(controller);
  63. const payload = {
  64. method: "POST",
  65. body: JSON.stringify(requestBody),
  66. signal: controller.signal,
  67. headers: this.headers(),
  68. };
  69. return {
  70. path,
  71. payload,
  72. controller,
  73. };
  74. },
  75. async chat(options: ChatOptions) {
  76. try {
  77. const { path, payload, controller } = this.beforeRequest(
  78. options,
  79. false,
  80. );
  81. controller.signal.onabort = () => options.onFinish("");
  82. const res = await fetch(path, payload);
  83. const resJson = await res.json();
  84. const message = this.extractMessage(resJson);
  85. options.onFinish(message);
  86. } catch (e) {
  87. logger.error("failed to chat", e);
  88. options.onError?.(e as Error);
  89. }
  90. },
  91. async chatStream(options: ChatOptions) {
  92. try {
  93. const { path, payload, controller } = this.beforeRequest(options, true);
  94. const context = {
  95. text: "",
  96. finished: false,
  97. };
  98. const finish = () => {
  99. if (!context.finished) {
  100. options.onFinish(context.text);
  101. context.finished = true;
  102. }
  103. };
  104. controller.signal.onabort = finish;
  105. fetchEventSource(path, {
  106. ...payload,
  107. async onopen(res) {
  108. const contentType = res.headers.get("content-type");
  109. logger.log("response content type: ", contentType);
  110. if (contentType?.startsWith("text/plain")) {
  111. context.text = await res.clone().text();
  112. return finish();
  113. }
  114. if (
  115. !res.ok ||
  116. !res.headers
  117. .get("content-type")
  118. ?.startsWith(EventStreamContentType) ||
  119. res.status !== 200
  120. ) {
  121. const responseTexts = [context.text];
  122. let extraInfo = await res.clone().text();
  123. try {
  124. const resJson = await res.clone().json();
  125. extraInfo = prettyObject(resJson);
  126. } catch {}
  127. if (res.status === 401) {
  128. responseTexts.push(Locale.Error.Unauthorized);
  129. }
  130. if (extraInfo) {
  131. responseTexts.push(extraInfo);
  132. }
  133. context.text = responseTexts.join("\n\n");
  134. return finish();
  135. }
  136. },
  137. onmessage(msg) {
  138. if (msg.data === "[DONE]" || context.finished) {
  139. return finish();
  140. }
  141. const chunk = msg.data;
  142. try {
  143. const chunkJson = JSON.parse(
  144. chunk,
  145. ) as OpenAI.ChatCompletionStreamResponse;
  146. const delta = chunkJson.choices[0].delta.content;
  147. if (delta) {
  148. context.text += delta;
  149. options.onUpdate?.(context.text, delta);
  150. }
  151. } catch (e) {
  152. logger.error("[Request] parse error", chunk, msg);
  153. }
  154. },
  155. onclose() {
  156. finish();
  157. },
  158. onerror(e) {
  159. options.onError?.(e);
  160. },
  161. openWhenHidden: true,
  162. });
  163. } catch (e) {
  164. logger.error("failed to chat", e);
  165. options.onError?.(e as Error);
  166. }
  167. },
  168. async usage() {
  169. return {
  170. used: 0,
  171. total: 0,
  172. } as LLMUsage;
  173. },
  174. async models(): Promise<LLMModel[]> {
  175. const customModels = openaiConfig.customModels
  176. .split(",")
  177. .map((v) => v.trim())
  178. .filter((v) => !!v)
  179. .map((v) => ({
  180. name: v,
  181. available: true,
  182. }));
  183. if (!openaiConfig.autoFetchModels) {
  184. return [...OpenAIConfig.provider.models.slice(), ...customModels];
  185. }
  186. const res = await fetch(this.path(OpenaiPath.ListModel), {
  187. method: "GET",
  188. headers: this.headers(),
  189. });
  190. const resJson = (await res.json()) as OpenAI.ListModelResponse;
  191. const chatModels =
  192. resJson.data?.filter((m) => m.id.startsWith("gpt-")) ?? [];
  193. return chatModels
  194. .map((m) => ({
  195. name: m.id,
  196. available: true,
  197. }))
  198. .concat(customModels);
  199. },
  200. };
  201. }