google.ts 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
  2. import {
  3. ChatOptions,
  4. getHeaders,
  5. LLMApi,
  6. LLMModel,
  7. LLMUsage,
  8. SpeechOptions,
  9. } from "../api";
  10. import {
  11. useAccessStore,
  12. useAppConfig,
  13. useChatStore,
  14. usePluginStore,
  15. ChatMessageTool,
  16. } from "@/app/store";
  17. import { stream } from "@/app/utils/chat";
  18. import { getClientConfig } from "@/app/config/client";
  19. import { GEMINI_BASE_URL } from "@/app/constant";
  20. import {
  21. getMessageTextContent,
  22. getMessageImages,
  23. isVisionModel,
  24. } from "@/app/utils";
  25. import { preProcessImageContent } from "@/app/utils/chat";
  26. import { nanoid } from "nanoid";
  27. import { RequestPayload } from "./openai";
  28. import { fetch } from "@/app/utils/stream";
  29. export class GeminiProApi implements LLMApi {
  30. path(path: string): string {
  31. const accessStore = useAccessStore.getState();
  32. let baseUrl = "";
  33. if (accessStore.useCustomConfig) {
  34. baseUrl = accessStore.googleUrl;
  35. }
  36. const isApp = !!getClientConfig()?.isApp;
  37. if (baseUrl.length === 0) {
  38. baseUrl = isApp ? GEMINI_BASE_URL : ApiPath.Google;
  39. }
  40. if (baseUrl.endsWith("/")) {
  41. baseUrl = baseUrl.slice(0, baseUrl.length - 1);
  42. }
  43. if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Google)) {
  44. baseUrl = "https://" + baseUrl;
  45. }
  46. console.log("[Proxy Endpoint] ", baseUrl, path);
  47. let chatPath = [baseUrl, path].join("/");
  48. chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse";
  49. return chatPath;
  50. }
  51. extractMessage(res: any) {
  52. console.log("[Response] gemini-pro response: ", res);
  53. return (
  54. res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
  55. res?.error?.message ||
  56. ""
  57. );
  58. }
  59. speech(options: SpeechOptions): Promise<ArrayBuffer> {
  60. throw new Error("Method not implemented.");
  61. }
  62. async chat(options: ChatOptions): Promise<void> {
  63. const apiClient = this;
  64. let multimodal = false;
  65. // try get base64image from local cache image_url
  66. const _messages: ChatOptions["messages"] = [];
  67. for (const v of options.messages) {
  68. const content = await preProcessImageContent(v.content);
  69. _messages.push({ role: v.role, content });
  70. }
  71. const messages = _messages.map((v) => {
  72. let parts: any[] = [{ text: getMessageTextContent(v) }];
  73. if (isVisionModel(options.config.model)) {
  74. const images = getMessageImages(v);
  75. if (images.length > 0) {
  76. multimodal = true;
  77. parts = parts.concat(
  78. images.map((image) => {
  79. const imageType = image.split(";")[0].split(":")[1];
  80. const imageData = image.split(",")[1];
  81. return {
  82. inline_data: {
  83. mime_type: imageType,
  84. data: imageData,
  85. },
  86. };
  87. }),
  88. );
  89. }
  90. }
  91. return {
  92. role: v.role.replace("assistant", "model").replace("system", "user"),
  93. parts: parts,
  94. };
  95. });
  96. // google requires that role in neighboring messages must not be the same
  97. for (let i = 0; i < messages.length - 1; ) {
  98. // Check if current and next item both have the role "model"
  99. if (messages[i].role === messages[i + 1].role) {
  100. // Concatenate the 'parts' of the current and next item
  101. messages[i].parts = messages[i].parts.concat(messages[i + 1].parts);
  102. // Remove the next item
  103. messages.splice(i + 1, 1);
  104. } else {
  105. // Move to the next item
  106. i++;
  107. }
  108. }
  109. // if (visionModel && messages.length > 1) {
  110. // options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision"));
  111. // }
  112. const accessStore = useAccessStore.getState();
  113. const modelConfig = {
  114. ...useAppConfig.getState().modelConfig,
  115. ...useChatStore.getState().currentSession().mask.modelConfig,
  116. ...{
  117. model: options.config.model,
  118. },
  119. };
  120. const requestPayload = {
  121. contents: messages,
  122. generationConfig: {
  123. // stopSequences: [
  124. // "Title"
  125. // ],
  126. temperature: modelConfig.temperature,
  127. maxOutputTokens: modelConfig.max_tokens,
  128. topP: modelConfig.top_p,
  129. // "topK": modelConfig.top_k,
  130. },
  131. safetySettings: [
  132. {
  133. category: "HARM_CATEGORY_HARASSMENT",
  134. threshold: accessStore.googleSafetySettings,
  135. },
  136. {
  137. category: "HARM_CATEGORY_HATE_SPEECH",
  138. threshold: accessStore.googleSafetySettings,
  139. },
  140. {
  141. category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
  142. threshold: accessStore.googleSafetySettings,
  143. },
  144. {
  145. category: "HARM_CATEGORY_DANGEROUS_CONTENT",
  146. threshold: accessStore.googleSafetySettings,
  147. },
  148. ],
  149. };
  150. let shouldStream = !!options.config.stream;
  151. const controller = new AbortController();
  152. options.onController?.(controller);
  153. try {
  154. // https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb
  155. const chatPath = this.path(Google.ChatPath(modelConfig.model));
  156. const chatPayload = {
  157. method: "POST",
  158. body: JSON.stringify(requestPayload),
  159. signal: controller.signal,
  160. headers: getHeaders(),
  161. };
  162. // make a fetch request
  163. const requestTimeoutId = setTimeout(
  164. () => controller.abort(),
  165. REQUEST_TIMEOUT_MS,
  166. );
  167. if (shouldStream) {
  168. const [tools, funcs] = usePluginStore
  169. .getState()
  170. .getAsTools(
  171. useChatStore.getState().currentSession().mask?.plugin || [],
  172. );
  173. return stream(
  174. chatPath,
  175. requestPayload,
  176. getHeaders(),
  177. // @ts-ignore
  178. [{ functionDeclarations: tools.map((tool) => tool.function) }],
  179. funcs,
  180. controller,
  181. // parseSSE
  182. (text: string, runTools: ChatMessageTool[]) => {
  183. // console.log("parseSSE", text, runTools);
  184. const chunkJson = JSON.parse(text);
  185. const functionCall = chunkJson?.candidates
  186. ?.at(0)
  187. ?.content.parts.at(0)?.functionCall;
  188. if (functionCall) {
  189. const { name, args } = functionCall;
  190. runTools.push({
  191. id: nanoid(),
  192. type: "function",
  193. function: {
  194. name,
  195. arguments: JSON.stringify(args), // utils.chat call function, using JSON.parse
  196. },
  197. });
  198. }
  199. return chunkJson?.candidates?.at(0)?.content.parts.at(0)?.text;
  200. },
  201. // processToolMessage, include tool_calls message and tool call results
  202. (
  203. requestPayload: RequestPayload,
  204. toolCallMessage: any,
  205. toolCallResult: any[],
  206. ) => {
  207. // @ts-ignore
  208. requestPayload?.contents?.splice(
  209. // @ts-ignore
  210. requestPayload?.contents?.length,
  211. 0,
  212. {
  213. role: "model",
  214. parts: toolCallMessage.tool_calls.map(
  215. (tool: ChatMessageTool) => ({
  216. functionCall: {
  217. name: tool?.function?.name,
  218. args: JSON.parse(tool?.function?.arguments as string),
  219. },
  220. }),
  221. ),
  222. },
  223. // @ts-ignore
  224. ...toolCallResult.map((result) => ({
  225. role: "function",
  226. parts: [
  227. {
  228. functionResponse: {
  229. name: result.name,
  230. response: {
  231. name: result.name,
  232. content: result.content, // TODO just text content...
  233. },
  234. },
  235. },
  236. ],
  237. })),
  238. );
  239. },
  240. options,
  241. );
  242. } else {
  243. const res = await fetch(chatPath, chatPayload);
  244. clearTimeout(requestTimeoutId);
  245. const resJson = await res.json();
  246. if (resJson?.promptFeedback?.blockReason) {
  247. // being blocked
  248. options.onError?.(
  249. new Error(
  250. "Message is being blocked for reason: " +
  251. resJson.promptFeedback.blockReason,
  252. ),
  253. );
  254. }
  255. const message = apiClient.extractMessage(resJson);
  256. options.onFinish(message);
  257. }
  258. } catch (e) {
  259. console.log("[Request] failed to make a chat request", e);
  260. options.onError?.(e as Error);
  261. }
  262. }
  263. usage(): Promise<LLMUsage> {
  264. throw new Error("Method not implemented.");
  265. }
  266. async models(): Promise<LLMModel[]> {
  267. return [];
  268. }
  269. }