tencent.ts 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. "use client";
  2. import { ApiPath, DEFAULT_API_HOST, REQUEST_TIMEOUT_MS } from "@/app/constant";
  3. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  4. import {
  5. ChatOptions,
  6. getHeaders,
  7. LLMApi,
  8. LLMModel,
  9. MultimodalContent,
  10. SpeechOptions,
  11. TranscriptionOptions,
  12. } from "../api";
  13. import Locale from "../../locales";
  14. import {
  15. EventStreamContentType,
  16. fetchEventSource,
  17. } from "@fortaine/fetch-event-source";
  18. import { prettyObject } from "@/app/utils/format";
  19. import { getClientConfig } from "@/app/config/client";
  20. import { getMessageTextContent, isVisionModel } from "@/app/utils";
  21. import mapKeys from "lodash-es/mapKeys";
  22. import mapValues from "lodash-es/mapValues";
  23. import isArray from "lodash-es/isArray";
  24. import isObject from "lodash-es/isObject";
  25. export interface OpenAIListModelResponse {
  26. object: string;
  27. data: Array<{
  28. id: string;
  29. object: string;
  30. root: string;
  31. }>;
  32. }
  33. interface RequestPayload {
  34. Messages: {
  35. Role: "system" | "user" | "assistant";
  36. Content: string | MultimodalContent[];
  37. }[];
  38. Stream?: boolean;
  39. Model: string;
  40. Temperature: number;
  41. TopP: number;
  42. }
  43. function capitalizeKeys(obj: any): any {
  44. if (isArray(obj)) {
  45. return obj.map(capitalizeKeys);
  46. } else if (isObject(obj)) {
  47. return mapValues(
  48. mapKeys(obj, (value: any, key: string) =>
  49. key.replace(/(^|_)(\w)/g, (m, $1, $2) => $2.toUpperCase()),
  50. ),
  51. capitalizeKeys,
  52. );
  53. } else {
  54. return obj;
  55. }
  56. }
  57. export class HunyuanApi implements LLMApi {
  58. path(): string {
  59. const accessStore = useAccessStore.getState();
  60. let baseUrl = "";
  61. if (accessStore.useCustomConfig) {
  62. baseUrl = accessStore.tencentUrl;
  63. }
  64. if (baseUrl.length === 0) {
  65. const isApp = !!getClientConfig()?.isApp;
  66. baseUrl = isApp
  67. ? DEFAULT_API_HOST + "/api/proxy/tencent"
  68. : ApiPath.Tencent;
  69. }
  70. if (baseUrl.endsWith("/")) {
  71. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  72. }
  73. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Tencent)) {
  74. baseUrl = "https://" + baseUrl;
  75. }
  76. console.log("[Proxy Endpoint] ", baseUrl);
  77. return baseUrl;
  78. }
  79. extractMessage(res: any) {
  80. return res.Choices?.at(0)?.Message?.Content ?? "";
  81. }
  82. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  83. throw new Error("Method not implemented.");
  84. }
  85. transcription(options: TranscriptionOptions): Promise<string> {
  86. throw new Error("Method not implemented.");
  87. }
  88. async chat(options: ChatOptions) {
  89. const visionModel = isVisionModel(options.config.model);
  90. const messages = options.messages.map((v, index) => ({
  91. // "Messages 中 system 角色必须位于列表的最开始"
  92. role: index !== 0 && v.role === "system" ? "user" : v.role,
  93. content: visionModel ? v.content : getMessageTextContent(v),
  94. }));
  95. const modelConfig = {
  96. ...useAppConfig.getState().modelConfig,
  97. ...useChatStore.getState().currentSession().mask.modelConfig,
  98. ...{
  99. model: options.config.model,
  100. },
  101. };
  102. const requestPayload: RequestPayload = capitalizeKeys({
  103. model: modelConfig.model,
  104. messages,
  105. temperature: modelConfig.temperature,
  106. top_p: modelConfig.top_p,
  107. stream: options.config.stream,
  108. });
  109. console.log("[Request] Tencent payload: ", requestPayload);
  110. const shouldStream = !!options.config.stream;
  111. const controller = new AbortController();
  112. options.onController?.(controller);
  113. try {
  114. const chatPath = this.path();
  115. const chatPayload = {
  116. method: "POST",
  117. body: JSON.stringify(requestPayload),
  118. signal: controller.signal,
  119. headers: getHeaders(),
  120. };
  121. // make a fetch request
  122. const requestTimeoutId = setTimeout(
  123. () => controller.abort(),
  124. REQUEST_TIMEOUT_MS,
  125. );
  126. if (shouldStream) {
  127. let responseText = "";
  128. let remainText = "";
  129. let finished = false;
  130. // animate response to make it looks smooth
  131. function animateResponseText() {
  132. if (finished || controller.signal.aborted) {
  133. responseText += remainText;
  134. console.log("[Response Animation] finished");
  135. if (responseText?.length === 0) {
  136. options.onError?.(new Error("empty response from server"));
  137. }
  138. return;
  139. }
  140. if (remainText.length > 0) {
  141. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  142. const fetchText = remainText.slice(0, fetchCount);
  143. responseText += fetchText;
  144. remainText = remainText.slice(fetchCount);
  145. options.onUpdate?.(responseText, fetchText);
  146. }
  147. requestAnimationFrame(animateResponseText);
  148. }
  149. // start animaion
  150. animateResponseText();
  151. const finish = () => {
  152. if (!finished) {
  153. finished = true;
  154. options.onFinish(responseText + remainText);
  155. }
  156. };
  157. controller.signal.onabort = finish;
  158. fetchEventSource(chatPath, {
  159. ...chatPayload,
  160. async onopen(res) {
  161. clearTimeout(requestTimeoutId);
  162. const contentType = res.headers.get("content-type");
  163. console.log(
  164. "[Tencent] request response content type: ",
  165. contentType,
  166. );
  167. if (contentType?.startsWith("text/plain")) {
  168. responseText = await res.clone().text();
  169. return finish();
  170. }
  171. if (
  172. !res.ok ||
  173. !res.headers
  174. .get("content-type")
  175. ?.startsWith(EventStreamContentType) ||
  176. res.status !== 200
  177. ) {
  178. const responseTexts = [responseText];
  179. let extraInfo = await res.clone().text();
  180. try {
  181. const resJson = await res.clone().json();
  182. extraInfo = prettyObject(resJson);
  183. } catch {}
  184. if (res.status === 401) {
  185. responseTexts.push(Locale.Error.Unauthorized);
  186. }
  187. if (extraInfo) {
  188. responseTexts.push(extraInfo);
  189. }
  190. responseText = responseTexts.join("\n\n");
  191. return finish();
  192. }
  193. },
  194. onmessage(msg) {
  195. if (msg.data === "[DONE]" || finished) {
  196. return finish();
  197. }
  198. const text = msg.data;
  199. try {
  200. const json = JSON.parse(text);
  201. const choices = json.Choices as Array<{
  202. Delta: { Content: string };
  203. }>;
  204. const delta = choices[0]?.Delta?.Content;
  205. if (delta) {
  206. remainText += delta;
  207. }
  208. } catch (e) {
  209. console.error("[Request] parse error", text, msg);
  210. }
  211. },
  212. onclose() {
  213. finish();
  214. },
  215. onerror(e) {
  216. options.onError?.(e);
  217. throw e;
  218. },
  219. openWhenHidden: true,
  220. });
  221. } else {
  222. const res = await fetch(chatPath, chatPayload);
  223. clearTimeout(requestTimeoutId);
  224. const resJson = await res.json();
  225. const message = this.extractMessage(resJson);
  226. options.onFinish(message);
  227. }
  228. } catch (e) {
  229. console.log("[Request] failed to make a chat request", e);
  230. options.onError?.(e as Error);
  231. }
  232. }
  233. async usage() {
  234. return {
  235. used: 0,
  236. total: 0,
  237. };
  238. }
  239. async models(): Promise<LLMModel[]> {
  240. return [];
  241. }
  242. }