google.ts 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. import {
  2. ApiPath,
  3. Google,
  4. REQUEST_TIMEOUT_MS,
  5. REQUEST_TIMEOUT_MS_FOR_THINKING,
  6. } from "@/app/constant";
  7. import {
  8. ChatOptions,
  9. getHeaders,
  10. LLMApi,
  11. LLMModel,
  12. LLMUsage,
  13. SpeechOptions,
  14. } from "../api";
  15. import {
  16. useAccessStore,
  17. useAppConfig,
  18. useChatStore,
  19. usePluginStore,
  20. ChatMessageTool,
  21. } from "@/app/store";
  22. import { stream } from "@/app/utils/chat";
  23. import { getClientConfig } from "@/app/config/client";
  24. import { GEMINI_BASE_URL } from "@/app/constant";
  25. import {
  26. getMessageTextContent,
  27. getMessageImages,
  28. isVisionModel,
  29. } from "@/app/utils";
  30. import { preProcessImageContent } from "@/app/utils/chat";
  31. import { nanoid } from "nanoid";
  32. import { RequestPayload } from "./openai";
  33. import { fetch } from "@/app/utils/stream";
  34. export class GeminiProApi implements LLMApi {
  35. path(path: string, shouldStream = false): string {
  36. const accessStore = useAccessStore.getState();
  37. let baseUrl = "";
  38. if (accessStore.useCustomConfig) {
  39. baseUrl = accessStore.googleUrl;
  40. }
  41. const isApp = !!getClientConfig()?.isApp;
  42. if (baseUrl.length === 0) {
  43. baseUrl = isApp ? GEMINI_BASE_URL : ApiPath.Google;
  44. }
  45. if (baseUrl.endsWith("/")) {
  46. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  47. }
  48. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Google)) {
  49. baseUrl = "https://" + baseUrl;
  50. }
  51. console.log("[Proxy Endpoint] ", baseUrl, path);
  52. let chatPath = [baseUrl, path].join("/");
  53. if (shouldStream) {
  54. chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse";
  55. }
  56. return chatPath;
  57. }
  58. extractMessage(res: any) {
  59. console.log("[Response] gemini-pro response: ", res);
  60. const getTextFromParts = (parts: any[]) => {
  61. if (!Array.isArray(parts)) return "";
  62. return parts
  63. .map((part) => part?.text || "")
  64. .filter((text) => text.trim() !== "")
  65. .join("\n\n");
  66. };
  67. let content = "";
  68. if (Array.isArray(res)) {
  69. res.map((item) => {
  70. content += getTextFromParts(item?.candidates?.at(0)?.content?.parts);
  71. });
  72. }
  73. return (
  74. getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
  75. content || //getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
  76. res?.error?.message ||
  77. ""
  78. );
  79. }
  80. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  81. throw new Error("Method not implemented.");
  82. }
  83. async chat(options: ChatOptions): Promise<void> {
  84. const apiClient = this;
  85. let multimodal = false;
  86. // try get base64image from local cache image_url
  87. const _messages: ChatOptions["messages"] = [];
  88. for (const v of options.messages) {
  89. const content = await preProcessImageContent(v.content);
  90. _messages.push({ role: v.role, content });
  91. }
  92. const messages = _messages.map((v) => {
  93. let parts: any[] = [{ text: getMessageTextContent(v) }];
  94. if (isVisionModel(options.config.model)) {
  95. const images = getMessageImages(v);
  96. if (images.length > 0) {
  97. multimodal = true;
  98. parts = parts.concat(
  99. images.map((image) => {
  100. const imageType = image.split(";")[0].split(":")[1];
  101. const imageData = image.split(",")[1];
  102. return {
  103. inline_data: {
  104. mime_type: imageType,
  105. data: imageData,
  106. },
  107. };
  108. }),
  109. );
  110. }
  111. }
  112. return {
  113. role: v.role.replace("assistant", "model").replace("system", "user"),
  114. parts: parts,
  115. };
  116. });
  117. // google requires that role in neighboring messages must not be the same
  118. for (let i = 0; i < messages.length - 1; ) {
  119. // Check if current and next item both have the role "model"
  120. if (messages[i].role === messages[i + 1].role) {
  121. // Concatenate the 'parts' of the current and next item
  122. messages[i].parts = messages[i].parts.concat(messages[i + 1].parts);
  123. // Remove the next item
  124. messages.splice(i + 1, 1);
  125. } else {
  126. // Move to the next item
  127. i++;
  128. }
  129. }
  130. // if (visionModel && messages.length > 1) {
  131. // options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision"));
  132. // }
  133. const accessStore = useAccessStore.getState();
  134. const modelConfig = {
  135. ...useAppConfig.getState().modelConfig,
  136. ...useChatStore.getState().currentSession().mask.modelConfig,
  137. ...{
  138. model: options.config.model,
  139. },
  140. };
  141. const requestPayload = {
  142. contents: messages,
  143. generationConfig: {
  144. // stopSequences: [
  145. // "Title"
  146. // ],
  147. temperature: modelConfig.temperature,
  148. maxOutputTokens: modelConfig.max_tokens,
  149. topP: modelConfig.top_p,
  150. // "topK": modelConfig.top_k,
  151. },
  152. safetySettings: [
  153. {
  154. category: "HARM_CATEGORY_HARASSMENT",
  155. threshold: accessStore.googleSafetySettings,
  156. },
  157. {
  158. category: "HARM_CATEGORY_HATE_SPEECH",
  159. threshold: accessStore.googleSafetySettings,
  160. },
  161. {
  162. category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
  163. threshold: accessStore.googleSafetySettings,
  164. },
  165. {
  166. category: "HARM_CATEGORY_DANGEROUS_CONTENT",
  167. threshold: accessStore.googleSafetySettings,
  168. },
  169. ],
  170. };
  171. let shouldStream = !!options.config.stream;
  172. const controller = new AbortController();
  173. options.onController?.(controller);
  174. try {
  175. // https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb
  176. const chatPath = this.path(
  177. Google.ChatPath(modelConfig.model),
  178. shouldStream,
  179. );
  180. const chatPayload = {
  181. method: "POST",
  182. body: JSON.stringify(requestPayload),
  183. signal: controller.signal,
  184. headers: getHeaders(),
  185. };
  186. const isThinking = options.config.model.includes("-thinking");
  187. // make a fetch request
  188. const requestTimeoutId = setTimeout(
  189. () => controller.abort(),
  190. isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
  191. );
  192. if (shouldStream) {
  193. const [tools, funcs] = usePluginStore
  194. .getState()
  195. .getAsTools(
  196. useChatStore.getState().currentSession().mask?.plugin || [],
  197. );
  198. return stream(
  199. chatPath,
  200. requestPayload,
  201. getHeaders(),
  202. // @ts-ignore
  203. tools.length > 0
  204. ? // @ts-ignore
  205. [{ functionDeclarations: tools.map((tool) => tool.function) }]
  206. : [],
  207. funcs,
  208. controller,
  209. // parseSSE
  210. (text: string, runTools: ChatMessageTool[]) => {
  211. // console.log("parseSSE", text, runTools);
  212. const chunkJson = JSON.parse(text);
  213. const functionCall = chunkJson?.candidates
  214. ?.at(0)
  215. ?.content.parts.at(0)?.functionCall;
  216. if (functionCall) {
  217. const { name, args } = functionCall;
  218. runTools.push({
  219. id: nanoid(),
  220. type: "function",
  221. function: {
  222. name,
  223. arguments: JSON.stringify(args), // utils.chat call function, using JSON.parse
  224. },
  225. });
  226. }
  227. return chunkJson?.candidates
  228. ?.at(0)
  229. ?.content.parts?.map((part: { text: string }) => part.text)
  230. .join("\n\n");
  231. },
  232. // processToolMessage, include tool_calls message and tool call results
  233. (
  234. requestPayload: RequestPayload,
  235. toolCallMessage: any,
  236. toolCallResult: any[],
  237. ) => {
  238. // @ts-ignore
  239. requestPayload?.contents?.splice(
  240. // @ts-ignore
  241. requestPayload?.contents?.length,
  242. 0,
  243. {
  244. role: "model",
  245. parts: toolCallMessage.tool_calls.map(
  246. (tool: ChatMessageTool) => ({
  247. functionCall: {
  248. name: tool?.function?.name,
  249. args: JSON.parse(tool?.function?.arguments as string),
  250. },
  251. }),
  252. ),
  253. },
  254. // @ts-ignore
  255. ...toolCallResult.map((result) => ({
  256. role: "function",
  257. parts: [
  258. {
  259. functionResponse: {
  260. name: result.name,
  261. response: {
  262. name: result.name,
  263. content: result.content, // TODO just text content...
  264. },
  265. },
  266. },
  267. ],
  268. })),
  269. );
  270. },
  271. options,
  272. );
  273. } else {
  274. const res = await fetch(chatPath, chatPayload);
  275. clearTimeout(requestTimeoutId);
  276. const resJson = await res.json();
  277. if (resJson?.promptFeedback?.blockReason) {
  278. // being blocked
  279. options.onError?.(
  280. new Error(
  281. "Message is being blocked for reason: " +
  282. resJson.promptFeedback.blockReason,
  283. ),
  284. );
  285. }
  286. const message = apiClient.extractMessage(resJson);
  287. options.onFinish(message, res);
  288. }
  289. } catch (e) {
  290. console.log("[Request] failed to make a chat request", e);
  291. options.onError?.(e as Error);
  292. }
  293. }
  294. usage(): Promise<LLMUsage> {
  295. throw new Error("Method not implemented.");
  296. }
  297. async models(): Promise<LLMModel[]> {
  298. return [];
  299. }
  300. }