google.ts 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
  2. import {
  3. ChatOptions,
  4. getHeaders,
  5. LLMApi,
  6. LLMModel,
  7. LLMUsage,
  8. SpeechOptions,
  9. } from "../api";
  10. import {
  11. useAccessStore,
  12. useAppConfig,
  13. useChatStore,
  14. usePluginStore,
  15. ChatMessageTool,
  16. } from "@/app/store";
  17. import { stream } from "@/app/utils/chat";
  18. import { getClientConfig } from "@/app/config/client";
  19. import { GEMINI_BASE_URL } from "@/app/constant";
  20. import {
  21. getMessageTextContent,
  22. getMessageImages,
  23. isVisionModel,
  24. } from "@/app/utils";
  25. import { preProcessImageContent } from "@/app/utils/chat";
  26. import { nanoid } from "nanoid";
  27. import { RequestPayload } from "./openai";
  28. import { fetch } from "@/app/utils/stream";
  29. export class GeminiProApi implements LLMApi {
  30. path(path: string, shouldStream = false): string {
  31. const accessStore = useAccessStore.getState();
  32. let baseUrl = "";
  33. if (accessStore.useCustomConfig) {
  34. baseUrl = accessStore.googleUrl;
  35. }
  36. const isApp = !!getClientConfig()?.isApp;
  37. if (baseUrl.length === 0) {
  38. baseUrl = isApp ? GEMINI_BASE_URL : ApiPath.Google;
  39. }
  40. if (baseUrl.endsWith("/")) {
  41. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  42. }
  43. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Google)) {
  44. baseUrl = "https://" + baseUrl;
  45. }
  46. console.log("[Proxy Endpoint] ", baseUrl, path);
  47. let chatPath = [baseUrl, path].join("/");
  48. if (shouldStream) {
  49. chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse";
  50. }
  51. return chatPath;
  52. }
  53. extractMessage(res: any) {
  54. console.log("[Response] gemini-pro response: ", res);
  55. const getTextFromParts = (parts: any[]) => {
  56. if (!Array.isArray(parts)) return "";
  57. return parts
  58. .map((part) => part?.text || "")
  59. .filter((text) => text.trim() !== "")
  60. .join("\n\n");
  61. };
  62. return (
  63. getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
  64. getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
  65. res?.error?.message ||
  66. ""
  67. );
  68. }
  69. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  70. throw new Error("Method not implemented.");
  71. }
  72. async chat(options: ChatOptions): Promise<void> {
  73. const apiClient = this;
  74. let multimodal = false;
  75. // try get base64image from local cache image_url
  76. const _messages: ChatOptions["messages"] = [];
  77. for (const v of options.messages) {
  78. const content = await preProcessImageContent(v.content);
  79. _messages.push({ role: v.role, content });
  80. }
  81. const messages = _messages.map((v) => {
  82. let parts: any[] = [{ text: getMessageTextContent(v) }];
  83. if (isVisionModel(options.config.model)) {
  84. const images = getMessageImages(v);
  85. if (images.length > 0) {
  86. multimodal = true;
  87. parts = parts.concat(
  88. images.map((image) => {
  89. const imageType = image.split(";")[0].split(":")[1];
  90. const imageData = image.split(",")[1];
  91. return {
  92. inline_data: {
  93. mime_type: imageType,
  94. data: imageData,
  95. },
  96. };
  97. }),
  98. );
  99. }
  100. }
  101. return {
  102. role: v.role.replace("assistant", "model").replace("system", "user"),
  103. parts: parts,
  104. };
  105. });
  106. // google requires that role in neighboring messages must not be the same
  107. for (let i = 0; i < messages.length - 1; ) {
  108. // Check if current and next item both have the role "model"
  109. if (messages[i].role === messages[i + 1].role) {
  110. // Concatenate the 'parts' of the current and next item
  111. messages[i].parts = messages[i].parts.concat(messages[i + 1].parts);
  112. // Remove the next item
  113. messages.splice(i + 1, 1);
  114. } else {
  115. // Move to the next item
  116. i++;
  117. }
  118. }
  119. // if (visionModel && messages.length > 1) {
  120. // options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision"));
  121. // }
  122. const accessStore = useAccessStore.getState();
  123. const modelConfig = {
  124. ...useAppConfig.getState().modelConfig,
  125. ...useChatStore.getState().currentSession().mask.modelConfig,
  126. ...{
  127. model: options.config.model,
  128. },
  129. };
  130. const requestPayload = {
  131. contents: messages,
  132. generationConfig: {
  133. // stopSequences: [
  134. // "Title"
  135. // ],
  136. temperature: modelConfig.temperature,
  137. maxOutputTokens: modelConfig.max_tokens,
  138. topP: modelConfig.top_p,
  139. // "topK": modelConfig.top_k,
  140. },
  141. safetySettings: [
  142. {
  143. category: "HARM_CATEGORY_HARASSMENT",
  144. threshold: accessStore.googleSafetySettings,
  145. },
  146. {
  147. category: "HARM_CATEGORY_HATE_SPEECH",
  148. threshold: accessStore.googleSafetySettings,
  149. },
  150. {
  151. category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
  152. threshold: accessStore.googleSafetySettings,
  153. },
  154. {
  155. category: "HARM_CATEGORY_DANGEROUS_CONTENT",
  156. threshold: accessStore.googleSafetySettings,
  157. },
  158. ],
  159. };
  160. let shouldStream = !!options.config.stream;
  161. const controller = new AbortController();
  162. options.onController?.(controller);
  163. try {
  164. // https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb
  165. const chatPath = this.path(
  166. Google.ChatPath(modelConfig.model),
  167. shouldStream,
  168. );
  169. const chatPayload = {
  170. method: "POST",
  171. body: JSON.stringify(requestPayload),
  172. signal: controller.signal,
  173. headers: getHeaders(),
  174. };
  175. // make a fetch request
  176. const requestTimeoutId = setTimeout(
  177. () => controller.abort(),
  178. REQUEST_TIMEOUT_MS,
  179. );
  180. if (shouldStream) {
  181. const [tools, funcs] = usePluginStore
  182. .getState()
  183. .getAsTools(
  184. useChatStore.getState().currentSession().mask?.plugin || [],
  185. );
  186. return stream(
  187. chatPath,
  188. requestPayload,
  189. getHeaders(),
  190. // @ts-ignore
  191. tools.length > 0
  192. ? // @ts-ignore
  193. [{ functionDeclarations: tools.map((tool) => tool.function) }]
  194. : [],
  195. funcs,
  196. controller,
  197. // parseSSE
  198. (text: string, runTools: ChatMessageTool[]) => {
  199. // console.log("parseSSE", text, runTools);
  200. const chunkJson = JSON.parse(text);
  201. const functionCall = chunkJson?.candidates
  202. ?.at(0)
  203. ?.content.parts.at(0)?.functionCall;
  204. if (functionCall) {
  205. const { name, args } = functionCall;
  206. runTools.push({
  207. id: nanoid(),
  208. type: "function",
  209. function: {
  210. name,
  211. arguments: JSON.stringify(args), // utils.chat call function, using JSON.parse
  212. },
  213. });
  214. }
  215. return chunkJson?.candidates
  216. ?.at(0)
  217. ?.content.parts?.map((part: { text: string }) => part.text)
  218. .join("\n\n");
  219. },
  220. // processToolMessage, include tool_calls message and tool call results
  221. (
  222. requestPayload: RequestPayload,
  223. toolCallMessage: any,
  224. toolCallResult: any[],
  225. ) => {
  226. // @ts-ignore
  227. requestPayload?.contents?.splice(
  228. // @ts-ignore
  229. requestPayload?.contents?.length,
  230. 0,
  231. {
  232. role: "model",
  233. parts: toolCallMessage.tool_calls.map(
  234. (tool: ChatMessageTool) => ({
  235. functionCall: {
  236. name: tool?.function?.name,
  237. args: JSON.parse(tool?.function?.arguments as string),
  238. },
  239. }),
  240. ),
  241. },
  242. // @ts-ignore
  243. ...toolCallResult.map((result) => ({
  244. role: "function",
  245. parts: [
  246. {
  247. functionResponse: {
  248. name: result.name,
  249. response: {
  250. name: result.name,
  251. content: result.content, // TODO just text content...
  252. },
  253. },
  254. },
  255. ],
  256. })),
  257. );
  258. },
  259. options,
  260. );
  261. } else {
  262. const res = await fetch(chatPath, chatPayload);
  263. clearTimeout(requestTimeoutId);
  264. const resJson = await res.json();
  265. if (resJson?.promptFeedback?.blockReason) {
  266. // being blocked
  267. options.onError?.(
  268. new Error(
  269. "Message is being blocked for reason: " +
  270. resJson.promptFeedback.blockReason,
  271. ),
  272. );
  273. }
  274. const message = apiClient.extractMessage(resJson);
  275. options.onFinish(message, res);
  276. }
  277. } catch (e) {
  278. console.log("[Request] failed to make a chat request", e);
  279. options.onError?.(e as Error);
  280. }
  281. }
  282. usage(): Promise<LLMUsage> {
  283. throw new Error("Method not implemented.");
  284. }
  285. async models(): Promise<LLMModel[]> {
  286. return [];
  287. }
  288. }