tencent.ts 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. "use client";
  2. import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant";
  3. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  4. import {
  5. ChatOptions,
  6. getHeaders,
  7. LLMApi,
  8. LLMModel,
  9. MultimodalContent,
  10. SpeechOptions,
  11. } from "../api";
  12. import Locale from "../../locales";
  13. import {
  14. EventStreamContentType,
  15. fetchEventSource,
  16. } from "@fortaine/fetch-event-source";
  17. import { prettyObject } from "@/app/utils/format";
  18. import { getClientConfig } from "@/app/config/client";
  19. import { getMessageTextContent, isVisionModel } from "@/app/utils";
  20. import mapKeys from "lodash-es/mapKeys";
  21. import mapValues from "lodash-es/mapValues";
  22. import isArray from "lodash-es/isArray";
  23. import isObject from "lodash-es/isObject";
  24. import { fetch } from "@/app/utils/stream";
  25. export interface OpenAIListModelResponse {
  26. object: string;
  27. data: Array<{
  28. id: string;
  29. object: string;
  30. root: string;
  31. }>;
  32. }
  33. interface RequestPayload {
  34. Messages: {
  35. Role: "system" | "user" | "assistant";
  36. Content: string | MultimodalContent[];
  37. }[];
  38. Stream?: boolean;
  39. Model: string;
  40. Temperature: number;
  41. TopP: number;
  42. }
  43. function capitalizeKeys(obj: any): any {
  44. if (isArray(obj)) {
  45. return obj.map(capitalizeKeys);
  46. } else if (isObject(obj)) {
  47. return mapValues(
  48. mapKeys(obj, (value: any, key: string) =>
  49. key.replace(/(^|_)(\w)/g, (m, $1, $2) => $2.toUpperCase()),
  50. ),
  51. capitalizeKeys,
  52. );
  53. } else {
  54. return obj;
  55. }
  56. }
  57. export class HunyuanApi implements LLMApi {
  58. path(): string {
  59. const accessStore = useAccessStore.getState();
  60. let baseUrl = "";
  61. if (accessStore.useCustomConfig) {
  62. baseUrl = accessStore.tencentUrl;
  63. }
  64. if (baseUrl.length === 0) {
  65. const isApp = !!getClientConfig()?.isApp;
  66. baseUrl = isApp ? TENCENT_BASE_URL : ApiPath.Tencent;
  67. }
  68. if (baseUrl.endsWith("/")) {
  69. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  70. }
  71. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Tencent)) {
  72. baseUrl = "https://" + baseUrl;
  73. }
  74. console.log("[Proxy Endpoint] ", baseUrl);
  75. return baseUrl;
  76. }
  77. extractMessage(res: any) {
  78. return res.Choices?.at(0)?.Message?.Content ?? "";
  79. }
  80. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  81. throw new Error("Method not implemented.");
  82. }
  83. async chat(options: ChatOptions) {
  84. const visionModel = isVisionModel(options.config.model);
  85. const messages = options.messages.map((v, index) => ({
  86. // "Messages 中 system 角色必须位于列表的最开始"
  87. role: index !== 0 && v.role === "system" ? "user" : v.role,
  88. content: visionModel ? v.content : getMessageTextContent(v),
  89. }));
  90. const modelConfig = {
  91. ...useAppConfig.getState().modelConfig,
  92. ...useChatStore.getState().currentSession().mask.modelConfig,
  93. ...{
  94. model: options.config.model,
  95. },
  96. };
  97. const requestPayload: RequestPayload = capitalizeKeys({
  98. model: modelConfig.model,
  99. messages,
  100. temperature: modelConfig.temperature,
  101. top_p: modelConfig.top_p,
  102. stream: options.config.stream,
  103. });
  104. console.log("[Request] Tencent payload: ", requestPayload);
  105. const shouldStream = !!options.config.stream;
  106. const controller = new AbortController();
  107. options.onController?.(controller);
  108. try {
  109. const chatPath = this.path();
  110. const chatPayload = {
  111. method: "POST",
  112. body: JSON.stringify(requestPayload),
  113. signal: controller.signal,
  114. headers: getHeaders(),
  115. };
  116. // make a fetch request
  117. const requestTimeoutId = setTimeout(
  118. () => controller.abort(),
  119. REQUEST_TIMEOUT_MS,
  120. );
  121. if (shouldStream) {
  122. let responseText = "";
  123. let remainText = "";
  124. let finished = false;
  125. let responseRes: Response;
  126. // animate response to make it looks smooth
  127. function animateResponseText() {
  128. if (finished || controller.signal.aborted) {
  129. responseText += remainText;
  130. console.log("[Response Animation] finished");
  131. if (responseText?.length === 0) {
  132. options.onError?.(new Error("empty response from server"));
  133. }
  134. return;
  135. }
  136. if (remainText.length > 0) {
  137. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  138. const fetchText = remainText.slice(0, fetchCount);
  139. responseText += fetchText;
  140. remainText = remainText.slice(fetchCount);
  141. options.onUpdate?.(responseText, fetchText);
  142. }
  143. requestAnimationFrame(animateResponseText);
  144. }
  145. // start animaion
  146. animateResponseText();
  147. const finish = () => {
  148. if (!finished) {
  149. finished = true;
  150. options.onFinish(responseText + remainText, responseRes);
  151. }
  152. };
  153. controller.signal.onabort = finish;
  154. fetchEventSource(chatPath, {
  155. fetch: fetch as any,
  156. ...chatPayload,
  157. async onopen(res) {
  158. clearTimeout(requestTimeoutId);
  159. const contentType = res.headers.get("content-type");
  160. console.log(
  161. "[Tencent] request response content type: ",
  162. contentType,
  163. );
  164. responseRes = res;
  165. if (contentType?.startsWith("text/plain")) {
  166. responseText = await res.clone().text();
  167. return finish();
  168. }
  169. if (
  170. !res.ok ||
  171. !res.headers
  172. .get("content-type")
  173. ?.startsWith(EventStreamContentType) ||
  174. res.status !== 200
  175. ) {
  176. const responseTexts = [responseText];
  177. let extraInfo = await res.clone().text();
  178. try {
  179. const resJson = await res.clone().json();
  180. extraInfo = prettyObject(resJson);
  181. } catch {}
  182. if (res.status === 401) {
  183. responseTexts.push(Locale.Error.Unauthorized);
  184. }
  185. if (extraInfo) {
  186. responseTexts.push(extraInfo);
  187. }
  188. responseText = responseTexts.join("\n\n");
  189. return finish();
  190. }
  191. },
  192. onmessage(msg) {
  193. if (msg.data === "[DONE]" || finished) {
  194. return finish();
  195. }
  196. const text = msg.data;
  197. try {
  198. const json = JSON.parse(text);
  199. const choices = json.Choices as Array<{
  200. Delta: { Content: string };
  201. }>;
  202. const delta = choices[0]?.Delta?.Content;
  203. if (delta) {
  204. remainText += delta;
  205. }
  206. } catch (e) {
  207. console.error("[Request] parse error", text, msg);
  208. }
  209. },
  210. onclose() {
  211. finish();
  212. },
  213. onerror(e) {
  214. options.onError?.(e);
  215. throw e;
  216. },
  217. openWhenHidden: true,
  218. });
  219. } else {
  220. const res = await fetch(chatPath, chatPayload);
  221. clearTimeout(requestTimeoutId);
  222. const resJson = await res.json();
  223. const message = this.extractMessage(resJson);
  224. options.onFinish(message, res);
  225. }
  226. } catch (e) {
  227. console.log("[Request] failed to make a chat request", e);
  228. options.onError?.(e as Error);
  229. }
  230. }
  231. async usage() {
  232. return {
  233. used: 0,
  234. total: 0,
  235. };
  236. }
  237. async models(): Promise<LLMModel[]> {
  238. return [];
  239. }
  240. }