google.ts 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
  2. import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
  3. import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
  4. import { getClientConfig } from "@/app/config/client";
  5. import { DEFAULT_API_HOST } from "@/app/constant";
  6. import {
  7. getMessageTextContent,
  8. getMessageImages,
  9. isVisionModel,
  10. } from "@/app/utils";
  11. export class GeminiProApi implements LLMApi {
  12. extractMessage(res: any) {
  13. console.log("[Response] gemini-pro response: ", res);
  14. return (
  15. res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
  16. res?.error?.message ||
  17. ""
  18. );
  19. }
  20. async chat(options: ChatOptions): Promise<void> {
  21. // const apiClient = this;
  22. let multimodal = false;
  23. const messages = options.messages.map((v) => {
  24. let parts: any[] = [{ text: getMessageTextContent(v) }];
  25. if (isVisionModel(options.config.model)) {
  26. const images = getMessageImages(v);
  27. if (images.length > 0) {
  28. multimodal = true;
  29. parts = parts.concat(
  30. images.map((image) => {
  31. const imageType = image.split(";")[0].split(":")[1];
  32. const imageData = image.split(",")[1];
  33. return {
  34. inline_data: {
  35. mime_type: imageType,
  36. data: imageData,
  37. },
  38. };
  39. }),
  40. );
  41. }
  42. }
  43. return {
  44. role: v.role.replace("assistant", "model").replace("system", "user"),
  45. parts: parts,
  46. };
  47. });
  48. // google requires that role in neighboring messages must not be the same
  49. for (let i = 0; i < messages.length - 1; ) {
  50. // Check if current and next item both have the role "model"
  51. if (messages[i].role === messages[i + 1].role) {
  52. // Concatenate the 'parts' of the current and next item
  53. messages[i].parts = messages[i].parts.concat(messages[i + 1].parts);
  54. // Remove the next item
  55. messages.splice(i + 1, 1);
  56. } else {
  57. // Move to the next item
  58. i++;
  59. }
  60. }
  61. // if (visionModel && messages.length > 1) {
  62. // options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision"));
  63. // }
  64. const modelConfig = {
  65. ...useAppConfig.getState().modelConfig,
  66. ...useChatStore.getState().currentSession().mask.modelConfig,
  67. ...{
  68. model: options.config.model,
  69. },
  70. };
  71. const requestPayload = {
  72. contents: messages,
  73. generationConfig: {
  74. // stopSequences: [
  75. // "Title"
  76. // ],
  77. temperature: modelConfig.temperature,
  78. maxOutputTokens: modelConfig.max_tokens,
  79. topP: modelConfig.top_p,
  80. // "topK": modelConfig.top_k,
  81. },
  82. safetySettings: [
  83. {
  84. category: "HARM_CATEGORY_HARASSMENT",
  85. threshold: "BLOCK_ONLY_HIGH",
  86. },
  87. {
  88. category: "HARM_CATEGORY_HATE_SPEECH",
  89. threshold: "BLOCK_ONLY_HIGH",
  90. },
  91. {
  92. category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
  93. threshold: "BLOCK_ONLY_HIGH",
  94. },
  95. {
  96. category: "HARM_CATEGORY_DANGEROUS_CONTENT",
  97. threshold: "BLOCK_ONLY_HIGH",
  98. },
  99. ],
  100. };
  101. const accessStore = useAccessStore.getState();
  102. let baseUrl = "";
  103. if (accessStore.useCustomConfig) {
  104. baseUrl = accessStore.googleUrl;
  105. }
  106. const isApp = !!getClientConfig()?.isApp;
  107. let shouldStream = !!options.config.stream;
  108. const controller = new AbortController();
  109. options.onController?.(controller);
  110. try {
  111. // let baseUrl = accessStore.googleUrl;
  112. if (!baseUrl) {
  113. baseUrl = isApp
  114. ? DEFAULT_API_HOST +
  115. "/api/proxy/google/" +
  116. Google.ChatPath(modelConfig.model)
  117. : this.path(Google.ChatPath(modelConfig.model));
  118. }
  119. if (isApp) {
  120. baseUrl += `?key=${accessStore.googleApiKey}`;
  121. }
  122. const chatPayload = {
  123. method: "POST",
  124. body: JSON.stringify(requestPayload),
  125. signal: controller.signal,
  126. headers: getHeaders(),
  127. };
  128. // make a fetch request
  129. const requestTimeoutId = setTimeout(
  130. () => controller.abort(),
  131. REQUEST_TIMEOUT_MS,
  132. );
  133. if (shouldStream) {
  134. let responseText = "";
  135. let remainText = "";
  136. let finished = false;
  137. let existingTexts: string[] = [];
  138. const finish = () => {
  139. finished = true;
  140. options.onFinish(existingTexts.join(""));
  141. };
  142. // animate response to make it looks smooth
  143. function animateResponseText() {
  144. if (finished || controller.signal.aborted) {
  145. responseText += remainText;
  146. finish();
  147. return;
  148. }
  149. if (remainText.length > 0) {
  150. const fetchCount = Math.max(1, Math.round(remainText.length / 60));
  151. const fetchText = remainText.slice(0, fetchCount);
  152. responseText += fetchText;
  153. remainText = remainText.slice(fetchCount);
  154. options.onUpdate?.(responseText, fetchText);
  155. }
  156. requestAnimationFrame(animateResponseText);
  157. }
  158. // start animaion
  159. animateResponseText();
  160. fetch(
  161. baseUrl.replace("generateContent", "streamGenerateContent"),
  162. chatPayload,
  163. )
  164. .then((response) => {
  165. const reader = response?.body?.getReader();
  166. const decoder = new TextDecoder();
  167. let partialData = "";
  168. return reader?.read().then(function processText({
  169. done,
  170. value,
  171. }): Promise<any> {
  172. if (done) {
  173. if (response.status !== 200) {
  174. try {
  175. let data = JSON.parse(ensureProperEnding(partialData));
  176. if (data && data[0].error) {
  177. options.onError?.(new Error(data[0].error.message));
  178. } else {
  179. options.onError?.(new Error("Request failed"));
  180. }
  181. } catch (_) {
  182. options.onError?.(new Error("Request failed"));
  183. }
  184. }
  185. console.log("Stream complete");
  186. // options.onFinish(responseText + remainText);
  187. finished = true;
  188. return Promise.resolve();
  189. }
  190. partialData += decoder.decode(value, { stream: true });
  191. try {
  192. let data = JSON.parse(ensureProperEnding(partialData));
  193. const textArray = data.reduce(
  194. (acc: string[], item: { candidates: any[] }) => {
  195. const texts = item.candidates.map((candidate) =>
  196. candidate.content.parts
  197. .map((part: { text: any }) => part.text)
  198. .join(""),
  199. );
  200. return acc.concat(texts);
  201. },
  202. [],
  203. );
  204. if (textArray.length > existingTexts.length) {
  205. const deltaArray = textArray.slice(existingTexts.length);
  206. existingTexts = textArray;
  207. remainText += deltaArray.join("");
  208. }
  209. } catch (error) {
  210. // console.log("[Response Animation] error: ", error,partialData);
  211. // skip error message when parsing json
  212. }
  213. return reader.read().then(processText);
  214. });
  215. })
  216. .catch((error) => {
  217. console.error("Error:", error);
  218. });
  219. } else {
  220. const res = await fetch(baseUrl, chatPayload);
  221. clearTimeout(requestTimeoutId);
  222. const resJson = await res.json();
  223. if (resJson?.promptFeedback?.blockReason) {
  224. // being blocked
  225. options.onError?.(
  226. new Error(
  227. "Message is being blocked for reason: " +
  228. resJson.promptFeedback.blockReason,
  229. ),
  230. );
  231. }
  232. const message = this.extractMessage(resJson);
  233. options.onFinish(message);
  234. }
  235. } catch (e) {
  236. console.log("[Request] failed to make a chat request", e);
  237. options.onError?.(e as Error);
  238. }
  239. }
  240. usage(): Promise<LLMUsage> {
  241. throw new Error("Method not implemented.");
  242. }
  243. async models(): Promise<LLMModel[]> {
  244. return [];
  245. }
  246. path(path: string): string {
  247. return "/api/google/" + path;
  248. }
  249. }
  250. function ensureProperEnding(str: string) {
  251. if (str.startsWith("[") && !str.endsWith("]")) {
  252. return str + "]";
  253. }
  254. return str;
  255. }