google.ts 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
  2. import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
  3. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  4. import {
  5. EventStreamContentType,
  6. fetchEventSource,
  7. } from "@fortaine/fetch-event-source";
  8. import { prettyObject } from "@/app/utils/format";
  9. import { getClientConfig } from "@/app/config/client";
  10. import Locale from "../../locales";
  11. import { getServerSideConfig } from "@/app/config/server";
  12. export class GeminiProApi implements LLMApi {
  13. extractMessage(res: any) {
  14. console.log("[Response] gemini-pro response: ", res);
  15. return (
  16. res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
  17. res?.error?.message ||
  18. ""
  19. );
  20. }
  21. async chat(options: ChatOptions): Promise<void> {
  22. const messages = options.messages.map((v) => ({
  23. role: v.role.replace("assistant", "model").replace("system", "model"),
  24. parts: [{ text: v.content }],
  25. }));
  26. const modelConfig = {
  27. ...useAppConfig.getState().modelConfig,
  28. ...useChatStore.getState().currentSession().mask.modelConfig,
  29. ...{
  30. model: options.config.model,
  31. },
  32. };
  33. const requestPayload = {
  34. contents: messages,
  35. generationConfig: {
  36. // stopSequences: [
  37. // "Title"
  38. // ],
  39. temperature: modelConfig.temperature,
  40. maxOutputTokens: modelConfig.max_tokens,
  41. topP: modelConfig.top_p,
  42. // "topK": modelConfig.top_k,
  43. },
  44. // stream: options.config.stream,
  45. // model: modelConfig.model,
  46. // temperature: modelConfig.temperature,
  47. // presence_penalty: modelConfig.presence_penalty,
  48. // frequency_penalty: modelConfig.frequency_penalty,
  49. // top_p: modelConfig.top_p,
  50. // max_tokens: Math.max(modelConfig.max_tokens, 1024),
  51. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
  52. };
  53. console.log("[Request] google payload: ", requestPayload);
  54. // todo: support stream later
  55. const shouldStream = false;
  56. const controller = new AbortController();
  57. options.onController?.(controller);
  58. try {
  59. const chatPath = this.path(Google.ChatPath);
  60. const chatPayload = {
  61. method: "POST",
  62. body: JSON.stringify(requestPayload),
  63. signal: controller.signal,
  64. headers: getHeaders(),
  65. };
  66. // make a fetch request
  67. const requestTimeoutId = setTimeout(
  68. () => controller.abort(),
  69. REQUEST_TIMEOUT_MS,
  70. );
  71. if (shouldStream) {
  72. let responseText = "";
  73. let remainText = "";
  74. let finished = false;
  75. // animate response to make it looks smooth
  76. function animateResponseText() {
  77. if (finished || controller.signal.aborted) {
  78. responseText += remainText;
  79. console.log("[Response Animation] finished");
  80. return;
  81. }
  82. if (remainText.length > 0) {
  83. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  84. const fetchText = remainText.slice(0, fetchCount);
  85. responseText += fetchText;
  86. remainText = remainText.slice(fetchCount);
  87. options.onUpdate?.(responseText, fetchText);
  88. }
  89. requestAnimationFrame(animateResponseText);
  90. }
  91. // start animaion
  92. animateResponseText();
  93. const finish = () => {
  94. if (!finished) {
  95. finished = true;
  96. options.onFinish(responseText + remainText);
  97. }
  98. };
  99. controller.signal.onabort = finish;
  100. fetchEventSource(chatPath, {
  101. ...chatPayload,
  102. async onopen(res) {
  103. clearTimeout(requestTimeoutId);
  104. const contentType = res.headers.get("content-type");
  105. console.log(
  106. "[OpenAI] request response content type: ",
  107. contentType,
  108. );
  109. if (contentType?.startsWith("text/plain")) {
  110. responseText = await res.clone().text();
  111. return finish();
  112. }
  113. if (
  114. !res.ok ||
  115. !res.headers
  116. .get("content-type")
  117. ?.startsWith(EventStreamContentType) ||
  118. res.status !== 200
  119. ) {
  120. const responseTexts = [responseText];
  121. let extraInfo = await res.clone().text();
  122. try {
  123. const resJson = await res.clone().json();
  124. extraInfo = prettyObject(resJson);
  125. } catch {}
  126. if (res.status === 401) {
  127. responseTexts.push(Locale.Error.Unauthorized);
  128. }
  129. if (extraInfo) {
  130. responseTexts.push(extraInfo);
  131. }
  132. responseText = responseTexts.join("\n\n");
  133. return finish();
  134. }
  135. },
  136. onmessage(msg) {
  137. if (msg.data === "[DONE]" || finished) {
  138. return finish();
  139. }
  140. const text = msg.data;
  141. try {
  142. const json = JSON.parse(text) as {
  143. choices: Array<{
  144. delta: {
  145. content: string;
  146. };
  147. }>;
  148. };
  149. const delta = json.choices[0]?.delta?.content;
  150. if (delta) {
  151. remainText += delta;
  152. }
  153. } catch (e) {
  154. console.error("[Request] parse error", text);
  155. }
  156. },
  157. onclose() {
  158. finish();
  159. },
  160. onerror(e) {
  161. options.onError?.(e);
  162. throw e;
  163. },
  164. openWhenHidden: true,
  165. });
  166. } else {
  167. const res = await fetch(chatPath, chatPayload);
  168. clearTimeout(requestTimeoutId);
  169. const resJson = await res.json();
  170. if (resJson?.promptFeedback?.blockReason) {
  171. // being blocked
  172. options.onError?.(
  173. new Error(
  174. "Message is being blocked for reason: " +
  175. resJson.promptFeedback.blockReason,
  176. ),
  177. );
  178. }
  179. const message = this.extractMessage(resJson);
  180. options.onFinish(message);
  181. }
  182. } catch (e) {
  183. console.log("[Request] failed to make a chat request", e);
  184. options.onError?.(e as Error);
  185. }
  186. }
  187. usage(): Promise<LLMUsage> {
  188. throw new Error("Method not implemented.");
  189. }
  190. async models(): Promise<LLMModel[]> {
  191. return [];
  192. }
  193. path(path: string): string {
  194. return "/api/google/" + path;
  195. }
  196. }