tencent.ts 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. "use client";
  2. import {
  3. ApiPath,
  4. DEFAULT_API_HOST,
  5. REQUEST_TIMEOUT_MS,
  6. Tencent,
  7. } from "@/app/constant";
  8. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  9. import {
  10. ChatOptions,
  11. getHeaders,
  12. LLMApi,
  13. LLMModel,
  14. MultimodalContent,
  15. } from "../api";
  16. import Locale from "../../locales";
  17. import {
  18. EventStreamContentType,
  19. fetchEventSource,
  20. } from "@fortaine/fetch-event-source";
  21. import { prettyObject } from "@/app/utils/format";
  22. import { getClientConfig } from "@/app/config/client";
  23. import { getMessageTextContent, isVisionModel } from "@/app/utils";
  24. export interface OpenAIListModelResponse {
  25. object: string;
  26. data: Array<{
  27. id: string;
  28. object: string;
  29. root: string;
  30. }>;
  31. }
  32. interface RequestPayload {
  33. messages: {
  34. role: "system" | "user" | "assistant";
  35. content: string | MultimodalContent[];
  36. }[];
  37. stream?: boolean;
  38. model: string;
  39. temperature: number;
  40. presence_penalty: number;
  41. frequency_penalty: number;
  42. top_p: number;
  43. max_tokens?: number;
  44. }
  45. export class HunyuanApi implements LLMApi {
  46. path(path: string): string {
  47. const accessStore = useAccessStore.getState();
  48. let baseUrl = "";
  49. if (accessStore.useCustomConfig) {
  50. baseUrl = accessStore.tencentUrl;
  51. }
  52. if (baseUrl.length === 0) {
  53. const isApp = !!getClientConfig()?.isApp;
  54. baseUrl = isApp
  55. ? DEFAULT_API_HOST + "/api/proxy/bytedance"
  56. : ApiPath.Tencent;
  57. }
  58. if (baseUrl.endsWith("/")) {
  59. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  60. }
  61. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Tencent)) {
  62. baseUrl = "https://" + baseUrl;
  63. }
  64. console.log("[Proxy Endpoint] ", baseUrl);
  65. return [baseUrl, path].join("/");
  66. }
  67. extractMessage(res: any) {
  68. return res.choices?.at(0)?.message?.content ?? "";
  69. }
  70. async chat(options: ChatOptions) {
  71. const visionModel = isVisionModel(options.config.model);
  72. const messages = options.messages.map((v) => ({
  73. role: v.role,
  74. content: visionModel ? v.content : getMessageTextContent(v),
  75. }));
  76. const modelConfig = {
  77. ...useAppConfig.getState().modelConfig,
  78. ...useChatStore.getState().currentSession().mask.modelConfig,
  79. ...{
  80. model: options.config.model,
  81. },
  82. };
  83. const requestPayload: RequestPayload = {
  84. messages,
  85. stream: options.config.stream,
  86. model: modelConfig.model,
  87. temperature: modelConfig.temperature,
  88. presence_penalty: modelConfig.presence_penalty,
  89. frequency_penalty: modelConfig.frequency_penalty,
  90. top_p: modelConfig.top_p,
  91. };
  92. console.log("[Request] Tencent payload: ", requestPayload);
  93. const shouldStream = !!options.config.stream;
  94. const controller = new AbortController();
  95. options.onController?.(controller);
  96. try {
  97. const chatPath = this.path(Tencent.ChatPath);
  98. const chatPayload = {
  99. method: "POST",
  100. body: JSON.stringify(requestPayload),
  101. signal: controller.signal,
  102. headers: getHeaders(),
  103. };
  104. // make a fetch request
  105. const requestTimeoutId = setTimeout(
  106. () => controller.abort(),
  107. REQUEST_TIMEOUT_MS,
  108. );
  109. if (shouldStream) {
  110. let responseText = "";
  111. let remainText = "";
  112. let finished = false;
  113. // animate response to make it looks smooth
  114. function animateResponseText() {
  115. if (finished || controller.signal.aborted) {
  116. responseText += remainText;
  117. console.log("[Response Animation] finished");
  118. if (responseText?.length === 0) {
  119. options.onError?.(new Error("empty response from server"));
  120. }
  121. return;
  122. }
  123. if (remainText.length > 0) {
  124. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  125. const fetchText = remainText.slice(0, fetchCount);
  126. responseText += fetchText;
  127. remainText = remainText.slice(fetchCount);
  128. options.onUpdate?.(responseText, fetchText);
  129. }
  130. requestAnimationFrame(animateResponseText);
  131. }
  132. // start animaion
  133. animateResponseText();
  134. const finish = () => {
  135. if (!finished) {
  136. finished = true;
  137. options.onFinish(responseText + remainText);
  138. }
  139. };
  140. controller.signal.onabort = finish;
  141. fetchEventSource(chatPath, {
  142. ...chatPayload,
  143. async onopen(res) {
  144. clearTimeout(requestTimeoutId);
  145. const contentType = res.headers.get("content-type");
  146. console.log(
  147. "[Tencent] request response content type: ",
  148. contentType,
  149. );
  150. if (contentType?.startsWith("text/plain")) {
  151. responseText = await res.clone().text();
  152. return finish();
  153. }
  154. if (
  155. !res.ok ||
  156. !res.headers
  157. .get("content-type")
  158. ?.startsWith(EventStreamContentType) ||
  159. res.status !== 200
  160. ) {
  161. const responseTexts = [responseText];
  162. let extraInfo = await res.clone().text();
  163. try {
  164. const resJson = await res.clone().json();
  165. extraInfo = prettyObject(resJson);
  166. } catch {}
  167. if (res.status === 401) {
  168. responseTexts.push(Locale.Error.Unauthorized);
  169. }
  170. if (extraInfo) {
  171. responseTexts.push(extraInfo);
  172. }
  173. responseText = responseTexts.join("\n\n");
  174. return finish();
  175. }
  176. },
  177. onmessage(msg) {
  178. if (msg.data === "[DONE]" || finished) {
  179. return finish();
  180. }
  181. const text = msg.data;
  182. try {
  183. const json = JSON.parse(text);
  184. const choices = json.choices as Array<{
  185. delta: { content: string };
  186. }>;
  187. const delta = choices[0]?.delta?.content;
  188. if (delta) {
  189. remainText += delta;
  190. }
  191. } catch (e) {
  192. console.error("[Request] parse error", text, msg);
  193. }
  194. },
  195. onclose() {
  196. finish();
  197. },
  198. onerror(e) {
  199. options.onError?.(e);
  200. throw e;
  201. },
  202. openWhenHidden: true,
  203. });
  204. } else {
  205. const res = await fetch(chatPath, chatPayload);
  206. clearTimeout(requestTimeoutId);
  207. const resJson = await res.json();
  208. const message = this.extractMessage(resJson);
  209. options.onFinish(message);
  210. }
  211. } catch (e) {
  212. console.log("[Request] failed to make a chat request", e);
  213. options.onError?.(e as Error);
  214. }
  215. }
  216. async usage() {
  217. return {
  218. used: 0,
  219. total: 0,
  220. };
  221. }
  222. async models(): Promise<LLMModel[]> {
  223. return [];
  224. }
  225. }