iflytek.ts 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. "use client";
  2. import {
  3. ApiPath,
  4. IFLYTEK_BASE_URL,
  5. Iflytek,
  6. REQUEST_TIMEOUT_MS,
  7. } from "@/app/constant";
  8. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  9. import {
  10. ChatOptions,
  11. getHeaders,
  12. LLMApi,
  13. LLMModel,
  14. SpeechOptions,
  15. } from "../api";
  16. import Locale from "../../locales";
  17. import {
  18. EventStreamContentType,
  19. fetchEventSource,
  20. } from "@fortaine/fetch-event-source";
  21. import { prettyObject } from "@/app/utils/format";
  22. import { getClientConfig } from "@/app/config/client";
  23. import { getMessageTextContent } from "@/app/utils";
  24. import { fetch } from "@/app/utils/stream";
  25. import { RequestPayload } from "./openai";
  26. export class SparkApi implements LLMApi {
  27. private disableListModels = true;
  28. path(path: string): string {
  29. const accessStore = useAccessStore.getState();
  30. let baseUrl = "";
  31. if (accessStore.useCustomConfig) {
  32. baseUrl = accessStore.iflytekUrl;
  33. }
  34. if (baseUrl.length === 0) {
  35. const isApp = !!getClientConfig()?.isApp;
  36. const apiPath = ApiPath.Iflytek;
  37. baseUrl = isApp ? IFLYTEK_BASE_URL + apiPath : apiPath;
  38. }
  39. if (baseUrl.endsWith("/")) {
  40. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  41. }
  42. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Iflytek)) {
  43. baseUrl = "https://" + baseUrl;
  44. }
  45. console.log("[Proxy Endpoint] ", baseUrl, path);
  46. return [baseUrl, path].join("/");
  47. }
  48. extractMessage(res: any) {
  49. return res.choices?.at(0)?.message?.content ?? "";
  50. }
  51. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  52. throw new Error("Method not implemented.");
  53. }
  54. async chat(options: ChatOptions) {
  55. const messages: ChatOptions["messages"] = [];
  56. for (const v of options.messages) {
  57. const content = getMessageTextContent(v);
  58. messages.push({ role: v.role, content });
  59. }
  60. const modelConfig = {
  61. ...useAppConfig.getState().modelConfig,
  62. ...useChatStore.getState().currentSession().mask.modelConfig,
  63. ...{
  64. model: options.config.model,
  65. providerName: options.config.providerName,
  66. },
  67. };
  68. const requestPayload: RequestPayload = {
  69. messages,
  70. stream: options.config.stream,
  71. model: modelConfig.model,
  72. temperature: modelConfig.temperature,
  73. presence_penalty: modelConfig.presence_penalty,
  74. frequency_penalty: modelConfig.frequency_penalty,
  75. top_p: modelConfig.top_p,
  76. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  77. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  78. };
  79. console.log("[Request] Spark payload: ", requestPayload);
  80. const shouldStream = !!options.config.stream;
  81. const controller = new AbortController();
  82. options.onController?.(controller);
  83. try {
  84. const chatPath = this.path(Iflytek.ChatPath);
  85. const chatPayload = {
  86. method: "POST",
  87. body: JSON.stringify(requestPayload),
  88. signal: controller.signal,
  89. headers: getHeaders(),
  90. };
  91. // Make a fetch request
  92. const requestTimeoutId = setTimeout(
  93. () => controller.abort(),
  94. REQUEST_TIMEOUT_MS,
  95. );
  96. if (shouldStream) {
  97. let responseText = "";
  98. let remainText = "";
  99. let finished = false;
  100. // Animate response text to make it look smooth
  101. function animateResponseText() {
  102. if (finished || controller.signal.aborted) {
  103. responseText += remainText;
  104. console.log("[Response Animation] finished");
  105. return;
  106. }
  107. if (remainText.length > 0) {
  108. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  109. const fetchText = remainText.slice(0, fetchCount);
  110. responseText += fetchText;
  111. remainText = remainText.slice(fetchCount);
  112. options.onUpdate?.(responseText, fetchText);
  113. }
  114. requestAnimationFrame(animateResponseText);
  115. }
  116. // Start animation
  117. animateResponseText();
  118. const finish = () => {
  119. if (!finished) {
  120. finished = true;
  121. options.onFinish(responseText + remainText);
  122. }
  123. };
  124. controller.signal.onabort = finish;
  125. fetchEventSource(chatPath, {
  126. fetch: fetch as any,
  127. ...chatPayload,
  128. async onopen(res) {
  129. clearTimeout(requestTimeoutId);
  130. const contentType = res.headers.get("content-type");
  131. console.log("[Spark] request response content type: ", contentType);
  132. if (contentType?.startsWith("text/plain")) {
  133. responseText = await res.clone().text();
  134. return finish();
  135. }
  136. // Handle different error scenarios
  137. if (
  138. !res.ok ||
  139. !res.headers
  140. .get("content-type")
  141. ?.startsWith(EventStreamContentType) ||
  142. res.status !== 200
  143. ) {
  144. let extraInfo = await res.clone().text();
  145. try {
  146. const resJson = await res.clone().json();
  147. extraInfo = prettyObject(resJson);
  148. } catch {}
  149. if (res.status === 401) {
  150. extraInfo = Locale.Error.Unauthorized;
  151. }
  152. options.onError?.(
  153. new Error(
  154. `Request failed with status ${res.status}: ${extraInfo}`,
  155. ),
  156. );
  157. return finish();
  158. }
  159. },
  160. onmessage(msg) {
  161. if (msg.data === "[DONE]" || finished) {
  162. return finish();
  163. }
  164. const text = msg.data;
  165. try {
  166. const json = JSON.parse(text);
  167. const choices = json.choices as Array<{
  168. delta: { content: string };
  169. }>;
  170. const delta = choices[0]?.delta?.content;
  171. if (delta) {
  172. remainText += delta;
  173. }
  174. } catch (e) {
  175. console.error("[Request] parse error", text);
  176. options.onError?.(new Error(`Failed to parse response: ${text}`));
  177. }
  178. },
  179. onclose() {
  180. finish();
  181. },
  182. onerror(e) {
  183. options.onError?.(e);
  184. throw e;
  185. },
  186. openWhenHidden: true,
  187. });
  188. } else {
  189. const res = await fetch(chatPath, chatPayload);
  190. clearTimeout(requestTimeoutId);
  191. if (!res.ok) {
  192. const errorText = await res.text();
  193. options.onError?.(
  194. new Error(`Request failed with status ${res.status}: ${errorText}`),
  195. );
  196. return;
  197. }
  198. const resJson = await res.json();
  199. const message = this.extractMessage(resJson);
  200. options.onFinish(message);
  201. }
  202. } catch (e) {
  203. console.log("[Request] failed to make a chat request", e);
  204. options.onError?.(e as Error);
  205. }
  206. }
  207. async usage() {
  208. return {
  209. used: 0,
  210. total: 0,
  211. };
  212. }
  213. async models(): Promise<LLMModel[]> {
  214. return [];
  215. }
  216. }