Parcourir la source

Merge branch 'main' into fix-export-garbled

fred-bf il y a 1 an
Parent
commit
2bf6111bf5

+ 0 - 43
.github/ISSUE_TEMPLATE/bug_report.md

@@ -1,43 +0,0 @@
----
-name: Bug report
-about: Create a report to help us improve
-title: "[Bug] "
-labels: ''
-assignees: ''
-
----
-
-**Describe the bug**
-A clear and concise description of what the bug is.
-
-**To Reproduce**
-Steps to reproduce the behavior:
-1. Go to '...'
-2. Click on '....'
-3. Scroll down to '....'
-4. See error
-
-**Expected behavior**
-A clear and concise description of what you expected to happen.
-
-**Screenshots**
-If applicable, add screenshots to help explain your problem.
-
-**Deployment**
-- [ ] Docker
-- [ ] Vercel
-- [ ] Server
-
-**Desktop (please complete the following information):**
- - OS: [e.g. iOS]
- - Browser [e.g. chrome, safari]
- - Version [e.g. 22]
-
-**Smartphone (please complete the following information):**
- - Device: [e.g. iPhone6]
- - OS: [e.g. iOS8.1]
- - Browser [e.g. stock browser, safari]
- - Version [e.g. 22]
-
-**Additional Logs**
-Add any logs about the problem here.

+ 146 - 0
.github/ISSUE_TEMPLATE/bug_report.yml

@@ -0,0 +1,146 @@
+name: Bug report
+description: Create a report to help us improve
+title: "[Bug] "
+labels: ["bug"]
+
+body:
+  - type: markdown
+    attributes:
+      value: "## Describe the bug"
+  - type: textarea
+    id: bug-description
+    attributes:
+      label: "Bug Description"
+      description: "A clear and concise description of what the bug is."
+      placeholder: "Explain the bug..."
+    validations:
+      required: true
+
+  - type: markdown
+    attributes:
+      value: "## To Reproduce"
+  - type: textarea
+    id: steps-to-reproduce
+    attributes:
+      label: "Steps to Reproduce"
+      description: "Steps to reproduce the behavior:"
+      placeholder: |
+        1. Go to '...'
+        2. Click on '....'
+        3. Scroll down to '....'
+        4. See error
+    validations:
+      required: true
+
+  - type: markdown
+    attributes:
+      value: "## Expected behavior"
+  - type: textarea
+    id: expected-behavior
+    attributes:
+      label: "Expected Behavior"
+      description: "A clear and concise description of what you expected to happen."
+      placeholder: "Describe what you expected to happen..."
+    validations:
+      required: true
+
+  - type: markdown
+    attributes:
+      value: "## Screenshots"
+  - type: textarea
+    id: screenshots
+    attributes:
+      label: "Screenshots"
+      description: "If applicable, add screenshots to help explain your problem."
+      placeholder: "Paste your screenshots here or write 'N/A' if not applicable..."
+    validations:
+      required: false
+
+  - type: markdown
+    attributes:
+      value: "## Deployment"
+  - type: checkboxes
+    id: deployment
+    attributes:
+      label: "Deployment Method"
+      description: "Please select the deployment method you are using."
+      options:
+        - label: "Docker"
+        - label: "Vercel"
+        - label: "Server"
+
+  - type: markdown
+    attributes:
+      value: "## Desktop (please complete the following information):"
+  - type: input
+    id: desktop-os
+    attributes:
+      label: "Desktop OS"
+      description: "Your desktop operating system."
+      placeholder: "e.g., Windows 10"
+    validations:
+      required: false
+  - type: input
+    id: desktop-browser
+    attributes:
+      label: "Desktop Browser"
+      description: "Your desktop browser."
+      placeholder: "e.g., Chrome, Safari"
+    validations:
+      required: false
+  - type: input
+    id: desktop-version
+    attributes:
+      label: "Desktop Browser Version"
+      description: "Version of your desktop browser."
+      placeholder: "e.g., 89.0"
+    validations:
+      required: false
+
+  - type: markdown
+    attributes:
+      value: "## Smartphone (please complete the following information):"
+  - type: input
+    id: smartphone-device
+    attributes:
+      label: "Smartphone Device"
+      description: "Your smartphone device."
+      placeholder: "e.g., iPhone X"
+    validations:
+      required: false
+  - type: input
+    id: smartphone-os
+    attributes:
+      label: "Smartphone OS"
+      description: "Your smartphone operating system."
+      placeholder: "e.g., iOS 14.4"
+    validations:
+      required: false
+  - type: input
+    id: smartphone-browser
+    attributes:
+      label: "Smartphone Browser"
+      description: "Your smartphone browser."
+      placeholder: "e.g., Safari"
+    validations:
+      required: false
+  - type: input
+    id: smartphone-version
+    attributes:
+      label: "Smartphone Browser Version"
+      description: "Version of your smartphone browser."
+      placeholder: "e.g., 14"
+    validations:
+      required: false
+
+  - type: markdown
+    attributes:
+      value: "## Additional Logs"
+  - type: textarea
+    id: additional-logs
+    attributes:
+      label: "Additional Logs"
+      description: "Add any logs about the problem here."
+      placeholder: "Paste any relevant logs here..."
+    validations:
+      required: false

+ 0 - 20
.github/ISSUE_TEMPLATE/feature_request.md

@@ -1,20 +0,0 @@
----
-name: Feature request
-about: Suggest an idea for this project
-title: "[Feature] "
-labels: ''
-assignees: ''
-
----
-
-**Is your feature request related to a problem? Please describe.**
-A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-
-**Describe the solution you'd like**
-A clear and concise description of what you want to happen.
-
-**Describe alternatives you've considered**
-A clear and concise description of any alternative solutions or features you've considered.
-
-**Additional context**
-Add any other context or screenshots about the feature request here.

+ 53 - 0
.github/ISSUE_TEMPLATE/feature_request.yml

@@ -0,0 +1,53 @@
+name: Feature request
+description: Suggest an idea for this project
+title: "[Feature Request]: "
+labels: ["enhancement"]
+
+body:
+  - type: markdown
+    attributes:
+      value: "## Is your feature request related to a problem? Please describe."
+  - type: textarea
+    id: problem-description
+    attributes:
+      label: Problem Description
+      description: "A clear and concise description of what the problem is. Example: I'm always frustrated when [...]"
+      placeholder: "Explain the problem you are facing..."
+    validations:
+      required: true
+
+  - type: markdown
+    attributes:
+      value: "## Describe the solution you'd like"
+  - type: textarea
+    id: desired-solution
+    attributes:
+      label: Solution Description
+      description: A clear and concise description of what you want to happen.
+      placeholder: "Describe the solution you'd like..."
+    validations:
+      required: true
+
+  - type: markdown
+    attributes:
+      value: "## Describe alternatives you've considered"
+  - type: textarea
+    id: alternatives-considered
+    attributes:
+      label: Alternatives Considered
+      description: A clear and concise description of any alternative solutions or features you've considered.
+      placeholder: "Describe any alternative solutions or features you've considered..."
+    validations:
+      required: false
+
+  - type: markdown
+    attributes:
+      value: "## Additional context"
+  - type: textarea
+    id: additional-context
+    attributes:
+      label: Additional Context
+      description: Add any other context or screenshots about the feature request here.
+      placeholder: "Add any other context or screenshots about the feature request here..."
+    validations:
+      required: false

+ 0 - 24
.github/ISSUE_TEMPLATE/功能建议.md

@@ -1,24 +0,0 @@
----
-name: 功能建议
-about: 请告诉我们你的灵光一闪
-title: "[Feature] "
-labels: ''
-assignees: ''
-
----
-
-> 为了提高交流效率,我们设立了官方 QQ 群和 QQ 频道,如果你在使用或者搭建过程中遇到了任何问题,请先第一时间加群或者频道咨询解决,除非是可以稳定复现的 Bug 或者较为有创意的功能建议,否则请不要随意往 Issue 区发送低质无意义帖子。
-
-> [点击加入官方群聊](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724)
-
-**这个功能与现有的问题有关吗?**
-如果有关,请在此列出链接或者描述问题。
-
-**你想要什么功能或者有什么建议?**
-尽管告诉我们。
-
-**有没有可以参考的同类竞品?**
-可以给出参考产品的链接或者截图。
-
-**其他信息**
-可以说说你的其他考虑。

+ 0 - 36
.github/ISSUE_TEMPLATE/反馈问题.md

@@ -1,36 +0,0 @@
----
-name: 反馈问题
-about: 请告诉我们你遇到的问题
-title: "[Bug] "
-labels: ''
-assignees: ''
-
----
-
-> 为了提高交流效率,我们设立了官方 QQ 群和 QQ 频道,如果你在使用或者搭建过程中遇到了任何问题,请先第一时间加群或者频道咨询解决,除非是可以稳定复现的 Bug 或者较为有创意的功能建议,否则请不要随意往 Issue 区发送低质无意义帖子。
-
-> [点击加入官方群聊](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724)
-
-**反馈须知**
-
-⚠️ 注意:不遵循此模板的任何帖子都会被立即关闭,如果没有提供下方的信息,我们无法定位你的问题。
-
-请在下方中括号内输入 x 来表示你已经知晓相关内容。
-- [ ] 我确认已经在 [常见问题](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/docs/faq-cn.md) 中搜索了此次反馈的问题,没有找到解答;
-- [ ] 我确认已经在 [Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) 列表(包括已经 Close 的)中搜索了此次反馈的问题,没有找到解答。
-- [ ] 我确认已经在 [Vercel 使用教程](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/docs/vercel-cn.md) 中搜索了此次反馈的问题,没有找到解答。
-
-**描述问题**
-请在此描述你遇到了什么问题。
-
-**如何复现**
-请告诉我们你是通过什么操作触发的该问题。
-
-**截图**
-请在此提供控制台截图、屏幕截图或者服务端的 log 截图。
-
-**一些必要的信息**
- - 系统:[比如 windows 10/ macos 12/ linux / android 11 / ios 16]
- - 浏览器: [比如 chrome, safari]
- - 版本: [填写设置页面的版本号]
- - 部署方式:[比如 vercel、docker 或者服务器部署]

+ 10 - 5
.github/workflows/app.yml

@@ -43,12 +43,9 @@ jobs:
           - os: ubuntu-latest
             arch: x86_64
             rust_target: x86_64-unknown-linux-gnu
-          - os: macos-latest
-            arch: x86_64
-            rust_target: x86_64-apple-darwin
           - os: macos-latest
             arch: aarch64
-            rust_target: aarch64-apple-darwin
+            rust_target: x86_64-apple-darwin,aarch64-apple-darwin
           - os: windows-latest
             arch: x86_64
             rust_target: x86_64-pc-windows-msvc
@@ -60,13 +57,14 @@ jobs:
         uses: actions/setup-node@v3
         with:
           node-version: 18
+          cache: 'yarn'
       - name: install Rust stable
         uses: dtolnay/rust-toolchain@stable
         with:
           targets: ${{ matrix.config.rust_target }}
       - uses: Swatinem/rust-cache@v2
         with:
-          key: ${{ matrix.config.rust_target }}
+          key: ${{ matrix.config.os }}
       - name: install dependencies (ubuntu only)
         if: matrix.config.os == 'ubuntu-latest'
         run: |
@@ -79,8 +77,15 @@ jobs:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }}
           TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }}
+          APPLE_CERTIFICATE: ${{ secrets.APPLE_CERTIFICATE }}
+          APPLE_CERTIFICATE_PASSWORD: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
+          APPLE_SIGNING_IDENTITY: ${{ secrets.APPLE_SIGNING_IDENTITY }}
+          APPLE_ID: ${{ secrets.APPLE_ID }}
+          APPLE_PASSWORD: ${{ secrets.APPLE_PASSWORD }}
+          APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }}
         with:
           releaseId: ${{ needs.create-release.outputs.release_id }}
+          args: ${{ matrix.config.os == 'macos-latest' && '--target universal-apple-darwin' || '' }}
 
   publish-release:
     permissions:

+ 1 - 0
.github/workflows/deploy_preview.yml

@@ -5,6 +5,7 @@ on:
     types:
       - opened
       - synchronize
+      - reopened
 
 env:
   VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }}

+ 25 - 15
app/client/api.ts

@@ -14,9 +14,17 @@ export type MessageRole = (typeof ROLES)[number];
 export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
 export type ChatModel = ModelType;
 
+export interface MultimodalContent {
+  type: "text" | "image_url";
+  text?: string;
+  image_url?: {
+    url: string;
+  };
+}
+
 export interface RequestMessage {
   role: MessageRole;
-  content: string;
+  content: string | MultimodalContent[];
 }
 
 export interface LLMConfig {
@@ -143,11 +151,10 @@ export function getHeaders() {
   const accessStore = useAccessStore.getState();
   const headers: Record<string, string> = {
     "Content-Type": "application/json",
-    "x-requested-with": "XMLHttpRequest",
-    "Accept": "application/json",
+    Accept: "application/json",
   };
   const modelConfig = useChatStore.getState().currentSession().mask.modelConfig;
-  const isGoogle = modelConfig.model === "gemini-pro";
+  const isGoogle = modelConfig.model.startsWith("gemini");
   const isAzure = accessStore.provider === ServiceProvider.Azure;
   const authHeader = isAzure ? "api-key" : "Authorization";
   const apiKey = isGoogle
@@ -155,20 +162,23 @@ export function getHeaders() {
     : isAzure
     ? accessStore.azureApiKey
     : accessStore.openaiApiKey;
-
+  const clientConfig = getClientConfig();
   const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
   const validString = (x: string) => x && x.length > 0;
 
-  // use user's api key first
-  if (validString(apiKey)) {
-    headers[authHeader] = makeBearer(apiKey);
-  } else if (
-    accessStore.enabledAccessControl() &&
-    validString(accessStore.accessCode)
-  ) {
-    headers[authHeader] = makeBearer(
-      ACCESS_CODE_PREFIX + accessStore.accessCode,
-    );
+  // when using google api in app, not set auth header
+  if (!(isGoogle && clientConfig?.isApp)) {
+    // use user's api key first
+    if (validString(apiKey)) {
+      headers[authHeader] = makeBearer(apiKey);
+    } else if (
+      accessStore.enabledAccessControl() &&
+      validString(accessStore.accessCode)
+    ) {
+      headers[authHeader] = makeBearer(
+        ACCESS_CODE_PREFIX + accessStore.accessCode,
+      );
+    }
   }
 
   return headers;

+ 77 - 25
app/client/platforms/google.ts

@@ -1,15 +1,14 @@
 import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
 import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
 import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
-import {
-  EventStreamContentType,
-  fetchEventSource,
-} from "@fortaine/fetch-event-source";
-import { prettyObject } from "@/app/utils/format";
 import { getClientConfig } from "@/app/config/client";
-import Locale from "../../locales";
-import { getServerSideConfig } from "@/app/config/server";
-import de from "@/app/locales/de";
+import { DEFAULT_API_HOST } from "@/app/constant";
+import {
+  getMessageTextContent,
+  getMessageImages,
+  isVisionModel,
+} from "@/app/utils";
+
 export class GeminiProApi implements LLMApi {
   extractMessage(res: any) {
     console.log("[Response] gemini-pro response: ", res);
@@ -21,11 +20,34 @@ export class GeminiProApi implements LLMApi {
     );
   }
   async chat(options: ChatOptions): Promise<void> {
-    const apiClient = this;
-    const messages = options.messages.map((v) => ({
-      role: v.role.replace("assistant", "model").replace("system", "user"),
-      parts: [{ text: v.content }],
-    }));
+    // const apiClient = this;
+    const visionModel = isVisionModel(options.config.model);
+    let multimodal = false;
+    const messages = options.messages.map((v) => {
+      let parts: any[] = [{ text: getMessageTextContent(v) }];
+      if (visionModel) {
+        const images = getMessageImages(v);
+        if (images.length > 0) {
+          multimodal = true;
+          parts = parts.concat(
+            images.map((image) => {
+              const imageType = image.split(";")[0].split(":")[1];
+              const imageData = image.split(",")[1];
+              return {
+                inline_data: {
+                  mime_type: imageType,
+                  data: imageData,
+                },
+              };
+            }),
+          );
+        }
+      }
+      return {
+        role: v.role.replace("assistant", "model").replace("system", "user"),
+        parts: parts,
+      };
+    });
 
     // google requires that role in neighboring messages must not be the same
     for (let i = 0; i < messages.length - 1; ) {
@@ -40,7 +62,9 @@ export class GeminiProApi implements LLMApi {
         i++;
       }
     }
-
+    // if (visionModel && messages.length > 1) {
+    //   options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision"));
+    // }
     const modelConfig = {
       ...useAppConfig.getState().modelConfig,
       ...useChatStore.getState().currentSession().mask.modelConfig,
@@ -79,13 +103,30 @@ export class GeminiProApi implements LLMApi {
       ],
     };
 
-    console.log("[Request] google payload: ", requestPayload);
+    const accessStore = useAccessStore.getState();
+    let baseUrl = accessStore.googleUrl;
+    const isApp = !!getClientConfig()?.isApp;
 
-    const shouldStream = !!options.config.stream;
+    let shouldStream = !!options.config.stream;
     const controller = new AbortController();
     options.onController?.(controller);
     try {
-      const chatPath = this.path(Google.ChatPath);
+      let googleChatPath = visionModel
+        ? Google.VisionChatPath
+        : Google.ChatPath;
+      let chatPath = this.path(googleChatPath);
+
+      // let baseUrl = accessStore.googleUrl;
+
+      if (!baseUrl) {
+        baseUrl = isApp
+          ? DEFAULT_API_HOST + "/api/proxy/google/" + googleChatPath
+          : chatPath;
+      }
+
+      if (isApp) {
+        baseUrl += `?key=${accessStore.googleApiKey}`;
+      }
       const chatPayload = {
         method: "POST",
         body: JSON.stringify(requestPayload),
@@ -101,10 +142,6 @@ export class GeminiProApi implements LLMApi {
       if (shouldStream) {
         let responseText = "";
         let remainText = "";
-        let streamChatPath = chatPath.replace(
-          "generateContent",
-          "streamGenerateContent",
-        );
         let finished = false;
 
         let existingTexts: string[] = [];
@@ -134,7 +171,11 @@ export class GeminiProApi implements LLMApi {
 
         // start animaion
         animateResponseText();
-        fetch(streamChatPath, chatPayload)
+
+        fetch(
+          baseUrl.replace("generateContent", "streamGenerateContent"),
+          chatPayload,
+        )
           .then((response) => {
             const reader = response?.body?.getReader();
             const decoder = new TextDecoder();
@@ -145,6 +186,19 @@ export class GeminiProApi implements LLMApi {
               value,
             }): Promise<any> {
               if (done) {
+                if (response.status !== 200) {
+                  try {
+                    let data = JSON.parse(ensureProperEnding(partialData));
+                    if (data && data[0].error) {
+                      options.onError?.(new Error(data[0].error.message));
+                    } else {
+                      options.onError?.(new Error("Request failed"));
+                    }
+                  } catch (_) {
+                    options.onError?.(new Error("Request failed"));
+                  }
+                }
+
                 console.log("Stream complete");
                 // options.onFinish(responseText + remainText);
                 finished = true;
@@ -185,11 +239,9 @@ export class GeminiProApi implements LLMApi {
             console.error("Error:", error);
           });
       } else {
-        const res = await fetch(chatPath, chatPayload);
+        const res = await fetch(baseUrl, chatPayload);
         clearTimeout(requestTimeoutId);
-
         const resJson = await res.json();
-
         if (resJson?.promptFeedback?.blockReason) {
           // being blocked
           options.onError?.(

+ 31 - 3
app/client/platforms/openai.ts

@@ -1,3 +1,4 @@
+"use client";
 import {
   ApiPath,
   DEFAULT_API_HOST,
@@ -8,7 +9,14 @@ import {
 } from "@/app/constant";
 import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
 
-import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
+import {
+  ChatOptions,
+  getHeaders,
+  LLMApi,
+  LLMModel,
+  LLMUsage,
+  MultimodalContent,
+} from "../api";
 import Locale from "../../locales";
 import {
   EventStreamContentType,
@@ -17,6 +25,11 @@ import {
 import { prettyObject } from "@/app/utils/format";
 import { getClientConfig } from "@/app/config/client";
 import { makeAzurePath } from "@/app/azure";
+import {
+  getMessageTextContent,
+  getMessageImages,
+  isVisionModel,
+} from "@/app/utils";
 
 export interface OpenAIListModelResponse {
   object: string;
@@ -45,7 +58,9 @@ export class ChatGPTApi implements LLMApi {
 
     if (baseUrl.length === 0) {
       const isApp = !!getClientConfig()?.isApp;
-      baseUrl = isApp ? DEFAULT_API_HOST : ApiPath.OpenAI;
+      baseUrl = isApp
+        ? DEFAULT_API_HOST + "/proxy" + ApiPath.OpenAI
+        : ApiPath.OpenAI;
     }
 
     if (baseUrl.endsWith("/")) {
@@ -59,6 +74,8 @@ export class ChatGPTApi implements LLMApi {
       path = makeAzurePath(path, accessStore.azureApiVersion);
     }
 
+    console.log("[Proxy Endpoint] ", baseUrl, path);
+
     return [baseUrl, path].join("/");
   }
 
@@ -67,9 +84,10 @@ export class ChatGPTApi implements LLMApi {
   }
 
   async chat(options: ChatOptions) {
+    const visionModel = isVisionModel(options.config.model);
     const messages = options.messages.map((v) => ({
       role: v.role,
-      content: v.content,
+      content: visionModel ? v.content : getMessageTextContent(v),
     }));
 
     const modelConfig = {
@@ -92,6 +110,16 @@ export class ChatGPTApi implements LLMApi {
       // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
     };
 
+    // add max_tokens to vision model
+    if (visionModel) {
+      Object.defineProperty(requestPayload, "max_tokens", {
+        enumerable: true,
+        configurable: true,
+        writable: true,
+        value: modelConfig.max_tokens,
+      });
+    }
+
     console.log("[Request] openai payload: ", requestPayload);
 
     const shouldStream = !!options.config.stream;

+ 122 - 13
app/components/chat.module.scss

@@ -1,5 +1,47 @@
 @import "../styles/animation.scss";
 
+.attach-images {
+  position: absolute;
+  left: 30px;
+  bottom: 32px;
+  display: flex;
+}
+
+.attach-image {
+  cursor: default;
+  width: 64px;
+  height: 64px;
+  border: rgba($color: #888, $alpha: 0.2) 1px solid;
+  border-radius: 5px;
+  margin-right: 10px;
+  background-size: cover;
+  background-position: center;
+  background-color: var(--white);
+
+  .attach-image-mask {
+    width: 100%;
+    height: 100%;
+    opacity: 0;
+    transition: all ease 0.2s;
+  }
+
+  .attach-image-mask:hover {
+    opacity: 1;
+  }
+
+  .delete-image {
+    width: 24px;
+    height: 24px;
+    cursor: pointer;
+    display: flex;
+    align-items: center;
+    justify-content: center;
+    border-radius: 5px;
+    float: right;
+    background-color: var(--white);
+  }
+}
+
 .chat-input-actions {
   display: flex;
   flex-wrap: wrap;
@@ -189,12 +231,10 @@
 
   animation: slide-in ease 0.3s;
 
-  $linear: linear-gradient(
-    to right,
-    rgba(0, 0, 0, 0),
-    rgba(0, 0, 0, 1),
-    rgba(0, 0, 0, 0)
-  );
+  $linear: linear-gradient(to right,
+      rgba(0, 0, 0, 0),
+      rgba(0, 0, 0, 1),
+      rgba(0, 0, 0, 0));
   mask-image: $linear;
 
   @mixin show {
@@ -327,7 +367,7 @@
   }
 }
 
-.chat-message-user > .chat-message-container {
+.chat-message-user>.chat-message-container {
   align-items: flex-end;
 }
 
@@ -349,6 +389,7 @@
       padding: 7px;
     }
   }
+
   /* Specific styles for iOS devices */
   @media screen and (max-device-width: 812px) and (-webkit-min-device-pixel-ratio: 2) {
     @supports (-webkit-touch-callout: none) {
@@ -381,6 +422,64 @@
   transition: all ease 0.3s;
 }
 
+.chat-message-item-image {
+  width: 100%;
+  margin-top: 10px;
+}
+
+.chat-message-item-images {
+  width: 100%;
+  display: grid;
+  justify-content: left;
+  grid-gap: 10px;
+  grid-template-columns: repeat(var(--image-count), auto);
+  margin-top: 10px;
+}
+
+.chat-message-item-image-multi {
+  object-fit: cover;
+  background-size: cover;
+  background-position: center;
+  background-repeat: no-repeat;
+}
+
+.chat-message-item-image,
+.chat-message-item-image-multi {
+  box-sizing: border-box;
+  border-radius: 10px;
+  border: rgba($color: #888, $alpha: 0.2) 1px solid;
+}
+
+
+@media only screen and (max-width: 600px) {
+  $calc-image-width: calc(100vw/3*2/var(--image-count));
+
+  .chat-message-item-image-multi {
+    width: $calc-image-width;
+    height: $calc-image-width;
+  }
+  
+  .chat-message-item-image {
+    max-width: calc(100vw/3*2);
+  }
+}
+
+@media screen and (min-width: 600px) {
+  $max-image-width: calc(calc(1200px - var(--sidebar-width))/3*2/var(--image-count));
+  $image-width: calc(calc(var(--window-width) - var(--sidebar-width))/3*2/var(--image-count));
+
+  .chat-message-item-image-multi {
+    width: $image-width;
+    height: $image-width;
+    max-width: $max-image-width;
+    max-height: $max-image-width;
+  }
+
+  .chat-message-item-image {
+    max-width: calc(calc(1200px - var(--sidebar-width))/3*2);
+  }
+}
+
 .chat-message-action-date {
   font-size: 12px;
   opacity: 0.2;
@@ -395,7 +494,7 @@
   z-index: 1;
 }
 
-.chat-message-user > .chat-message-container > .chat-message-item {
+.chat-message-user>.chat-message-container>.chat-message-item {
   background-color: var(--second);
 
   &:hover {
@@ -460,6 +559,7 @@
 
       @include single-line();
     }
+
     .hint-content {
       font-size: 12px;
 
@@ -474,15 +574,26 @@
 }
 
 .chat-input-panel-inner {
+  cursor: text;
   display: flex;
   flex: 1;
+  border-radius: 10px;
+  border: var(--border-in-light);
+}
+
+.chat-input-panel-inner-attach {
+  padding-bottom: 80px;
+}
+
+.chat-input-panel-inner:has(.chat-input:focus) {
+  border: 1px solid var(--primary);
 }
 
 .chat-input {
   height: 100%;
   width: 100%;
   border-radius: 10px;
-  border: var(--border-in-light);
+  border: none;
   box-shadow: 0 -2px 5px rgba(0, 0, 0, 0.03);
   background-color: var(--white);
   color: var(--black);
@@ -494,9 +605,7 @@
   min-height: 68px;
 }
 
-.chat-input:focus {
-  border: 1px solid var(--primary);
-}
+.chat-input:focus {}
 
 .chat-input-send {
   background-color: var(--primary);
@@ -515,4 +624,4 @@
   .chat-input-send {
     bottom: 30px;
   }
-}
+}

+ 240 - 17
app/components/chat.tsx

@@ -6,6 +6,7 @@ import React, {
   useMemo,
   useCallback,
   Fragment,
+  RefObject,
 } from "react";
 
 import SendWhiteIcon from "../icons/send-white.svg";
@@ -15,6 +16,7 @@ import ExportIcon from "../icons/share.svg";
 import ReturnIcon from "../icons/return.svg";
 import CopyIcon from "../icons/copy.svg";
 import LoadingIcon from "../icons/three-dots.svg";
+import LoadingButtonIcon from "../icons/loading.svg";
 import PromptIcon from "../icons/prompt.svg";
 import MaskIcon from "../icons/mask.svg";
 import MaxIcon from "../icons/max.svg";
@@ -27,6 +29,7 @@ import PinIcon from "../icons/pin.svg";
 import EditIcon from "../icons/rename.svg";
 import ConfirmIcon from "../icons/confirm.svg";
 import CancelIcon from "../icons/cancel.svg";
+import ImageIcon from "../icons/image.svg";
 
 import LightIcon from "../icons/light.svg";
 import DarkIcon from "../icons/dark.svg";
@@ -53,6 +56,10 @@ import {
   selectOrCopy,
   autoGrowTextArea,
   useMobileScreen,
+  getMessageTextContent,
+  getMessageImages,
+  isVisionModel,
+  compressImage,
 } from "../utils";
 
 import dynamic from "next/dynamic";
@@ -89,6 +96,7 @@ import { prettyObject } from "../utils/format";
 import { ExportMessageModal } from "./exporter";
 import { getClientConfig } from "../config/client";
 import { useAllModels } from "../utils/hooks";
+import { MultimodalContent } from "../client/api";
 
 const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
   loading: () => <LoadingIcon />,
@@ -375,11 +383,13 @@ function ChatAction(props: {
   );
 }
 
-function useScrollToBottom() {
+function useScrollToBottom(
+  scrollRef: RefObject<HTMLDivElement>,
+  detach: boolean = false,
+) {
   // for auto-scroll
-  const scrollRef = useRef<HTMLDivElement>(null);
-  const [autoScroll, setAutoScroll] = useState(true);
 
+  const [autoScroll, setAutoScroll] = useState(true);
   function scrollDomToBottom() {
     const dom = scrollRef.current;
     if (dom) {
@@ -392,7 +402,7 @@ function useScrollToBottom() {
 
   // auto scroll
   useEffect(() => {
-    if (autoScroll) {
+    if (autoScroll && !detach) {
       scrollDomToBottom();
     }
   });
@@ -406,10 +416,14 @@ function useScrollToBottom() {
 }
 
 export function ChatActions(props: {
+  uploadImage: () => void;
+  setAttachImages: (images: string[]) => void;
+  setUploading: (uploading: boolean) => void;
   showPromptModal: () => void;
   scrollToBottom: () => void;
   showPromptHints: () => void;
   hitBottom: boolean;
+  uploading: boolean;
 }) {
   const config = useAppConfig();
   const navigate = useNavigate();
@@ -437,8 +451,16 @@ export function ChatActions(props: {
     [allModels],
   );
   const [showModelSelector, setShowModelSelector] = useState(false);
+  const [showUploadImage, setShowUploadImage] = useState(false);
 
   useEffect(() => {
+    const show = isVisionModel(currentModel);
+    setShowUploadImage(show);
+    if (!show) {
+      props.setAttachImages([]);
+      props.setUploading(false);
+    }
+
     // if current model is not available
     // switch to first available model
     const isUnavaliableModel = !models.some((m) => m.name === currentModel);
@@ -475,6 +497,13 @@ export function ChatActions(props: {
         />
       )}
 
+      {showUploadImage && (
+        <ChatAction
+          onClick={props.uploadImage}
+          text={Locale.Chat.InputActions.UploadImage}
+          icon={props.uploading ? <LoadingButtonIcon /> : <ImageIcon />}
+        />
+      )}
       <ChatAction
         onClick={nextTheme}
         text={Locale.Chat.InputActions.Theme[theme]}
@@ -610,6 +639,14 @@ export function EditMessageModal(props: { onClose: () => void }) {
   );
 }
 
+export function DeleteImageButton(props: { deleteImage: () => void }) {
+  return (
+    <div className={styles["delete-image"]} onClick={props.deleteImage}>
+      <DeleteIcon />
+    </div>
+  );
+}
+
 function _Chat() {
   type RenderMessage = ChatMessage & { preview?: boolean };
 
@@ -624,10 +661,22 @@ function _Chat() {
   const [userInput, setUserInput] = useState("");
   const [isLoading, setIsLoading] = useState(false);
   const { submitKey, shouldSubmit } = useSubmitHandler();
-  const { scrollRef, setAutoScroll, scrollDomToBottom } = useScrollToBottom();
+  const scrollRef = useRef<HTMLDivElement>(null);
+  const isScrolledToBottom = scrollRef?.current
+    ? Math.abs(
+        scrollRef.current.scrollHeight -
+          (scrollRef.current.scrollTop + scrollRef.current.clientHeight),
+      ) <= 1
+    : false;
+  const { setAutoScroll, scrollDomToBottom } = useScrollToBottom(
+    scrollRef,
+    isScrolledToBottom,
+  );
   const [hitBottom, setHitBottom] = useState(true);
   const isMobileScreen = useMobileScreen();
   const navigate = useNavigate();
+  const [attachImages, setAttachImages] = useState<string[]>([]);
+  const [uploading, setUploading] = useState(false);
 
   // prompt hints
   const promptStore = usePromptStore();
@@ -705,7 +754,10 @@ function _Chat() {
       return;
     }
     setIsLoading(true);
-    chatStore.onUserInput(userInput).then(() => setIsLoading(false));
+    chatStore
+      .onUserInput(userInput, attachImages)
+      .then(() => setIsLoading(false));
+    setAttachImages([]);
     localStorage.setItem(LAST_INPUT_KEY, userInput);
     setUserInput("");
     setPromptHints([]);
@@ -783,9 +835,9 @@ function _Chat() {
   };
   const onRightClick = (e: any, message: ChatMessage) => {
     // copy to clipboard
-    if (selectOrCopy(e.currentTarget, message.content)) {
+    if (selectOrCopy(e.currentTarget, getMessageTextContent(message))) {
       if (userInput.length === 0) {
-        setUserInput(message.content);
+        setUserInput(getMessageTextContent(message));
       }
 
       e.preventDefault();
@@ -853,7 +905,9 @@ function _Chat() {
 
     // resend the message
     setIsLoading(true);
-    chatStore.onUserInput(userMessage.content).then(() => setIsLoading(false));
+    const textContent = getMessageTextContent(userMessage);
+    const images = getMessageImages(userMessage);
+    chatStore.onUserInput(textContent, images).then(() => setIsLoading(false));
     inputRef.current?.focus();
   };
 
@@ -962,7 +1016,6 @@ function _Chat() {
     setHitBottom(isHitBottom);
     setAutoScroll(isHitBottom);
   };
-
   function scrollToBottom() {
     setMsgRenderIndex(renderMessages.length - CHAT_PAGE_SIZE);
     scrollDomToBottom();
@@ -1047,6 +1100,92 @@ function _Chat() {
     };
     // eslint-disable-next-line react-hooks/exhaustive-deps
   }, []);
+  
+  const handlePaste = useCallback(
+    async (event: React.ClipboardEvent<HTMLTextAreaElement>) => {
+      const currentModel = chatStore.currentSession().mask.modelConfig.model;
+      if(!isVisionModel(currentModel)){return;}
+      const items = (event.clipboardData || window.clipboardData).items;
+      for (const item of items) {
+        if (item.kind === "file" && item.type.startsWith("image/")) {
+          event.preventDefault();
+          const file = item.getAsFile();
+          if (file) {
+            const images: string[] = [];
+            images.push(...attachImages);
+            images.push(
+              ...(await new Promise<string[]>((res, rej) => {
+                setUploading(true);
+                const imagesData: string[] = [];
+                compressImage(file, 256 * 1024)
+                  .then((dataUrl) => {
+                    imagesData.push(dataUrl);
+                    setUploading(false);
+                    res(imagesData);
+                  })
+                  .catch((e) => {
+                    setUploading(false);
+                    rej(e);
+                  });
+              })),
+            );
+            const imagesLength = images.length;
+
+            if (imagesLength > 3) {
+              images.splice(3, imagesLength - 3);
+            }
+            setAttachImages(images);
+          }
+        }
+      }
+    },
+    [attachImages, chatStore],
+  );
+
+  async function uploadImage() {
+    const images: string[] = [];
+    images.push(...attachImages);
+
+    images.push(
+      ...(await new Promise<string[]>((res, rej) => {
+        const fileInput = document.createElement("input");
+        fileInput.type = "file";
+        fileInput.accept =
+          "image/png, image/jpeg, image/webp, image/heic, image/heif";
+        fileInput.multiple = true;
+        fileInput.onchange = (event: any) => {
+          setUploading(true);
+          const files = event.target.files;
+          const imagesData: string[] = [];
+          for (let i = 0; i < files.length; i++) {
+            const file = event.target.files[i];
+            compressImage(file, 256 * 1024)
+              .then((dataUrl) => {
+                imagesData.push(dataUrl);
+                if (
+                  imagesData.length === 3 ||
+                  imagesData.length === files.length
+                ) {
+                  setUploading(false);
+                  res(imagesData);
+                }
+              })
+              .catch((e) => {
+                setUploading(false);
+                rej(e);
+              });
+          }
+        };
+        fileInput.click();
+      })),
+    );
+
+    const imagesLength = images.length;
+    if (imagesLength > 3) {
+      images.splice(3, imagesLength - 3);
+    }
+    setAttachImages(images);
+  }
 
   return (
     <div className={styles.chat} key={session.id}>
@@ -1154,15 +1293,29 @@ function _Chat() {
                           onClick={async () => {
                             const newMessage = await showPrompt(
                               Locale.Chat.Actions.Edit,
-                              message.content,
+                              getMessageTextContent(message),
                               10,
                             );
+                            let newContent: string | MultimodalContent[] =
+                              newMessage;
+                            const images = getMessageImages(message);
+                            if (images.length > 0) {
+                              newContent = [{ type: "text", text: newMessage }];
+                              for (let i = 0; i < images.length; i++) {
+                                newContent.push({
+                                  type: "image_url",
+                                  image_url: {
+                                    url: images[i],
+                                  },
+                                });
+                              }
+                            }
                             chatStore.updateCurrentSession((session) => {
                               const m = session.mask.context
                                 .concat(session.messages)
                                 .find((m) => m.id === message.id);
                               if (m) {
-                                m.content = newMessage;
+                                m.content = newContent;
                               }
                             });
                           }}
@@ -1217,7 +1370,11 @@ function _Chat() {
                               <ChatAction
                                 text={Locale.Chat.Actions.Copy}
                                 icon={<CopyIcon />}
-                                onClick={() => copyToClipboard(message.content)}
+                                onClick={() =>
+                                  copyToClipboard(
+                                    getMessageTextContent(message),
+                                  )
+                                }
                               />
                             </>
                           )}
@@ -1232,7 +1389,7 @@ function _Chat() {
                   )}
                   <div className={styles["chat-message-item"]}>
                     <Markdown
-                      content={message.content}
+                      content={getMessageTextContent(message)}
                       loading={
                         (message.preview || message.streaming) &&
                         message.content.length === 0 &&
@@ -1241,12 +1398,42 @@ function _Chat() {
                       onContextMenu={(e) => onRightClick(e, message)}
                       onDoubleClickCapture={() => {
                         if (!isMobileScreen) return;
-                        setUserInput(message.content);
+                        setUserInput(getMessageTextContent(message));
                       }}
                       fontSize={fontSize}
                       parentRef={scrollRef}
                       defaultShow={i >= messages.length - 6}
                     />
+                    {getMessageImages(message).length == 1 && (
+                      <img
+                        className={styles["chat-message-item-image"]}
+                        src={getMessageImages(message)[0]}
+                        alt=""
+                      />
+                    )}
+                    {getMessageImages(message).length > 1 && (
+                      <div
+                        className={styles["chat-message-item-images"]}
+                        style={
+                          {
+                            "--image-count": getMessageImages(message).length,
+                          } as React.CSSProperties
+                        }
+                      >
+                        {getMessageImages(message).map((image, index) => {
+                          return (
+                            <img
+                              className={
+                                styles["chat-message-item-image-multi"]
+                              }
+                              key={index}
+                              src={image}
+                              alt=""
+                            />
+                          );
+                        })}
+                      </div>
+                    )}
                   </div>
 
                   <div className={styles["chat-message-action-date"]}>
@@ -1266,9 +1453,13 @@ function _Chat() {
         <PromptHints prompts={promptHints} onPromptSelect={onPromptSelect} />
 
         <ChatActions
+          uploadImage={uploadImage}
+          setAttachImages={setAttachImages}
+          setUploading={setUploading}
           showPromptModal={() => setShowPromptModal(true)}
           scrollToBottom={scrollToBottom}
           hitBottom={hitBottom}
+          uploading={uploading}
           showPromptHints={() => {
             // Click again to close
             if (promptHints.length > 0) {
@@ -1281,8 +1472,16 @@ function _Chat() {
             onSearch("");
           }}
         />
-        <div className={styles["chat-input-panel-inner"]}>
+        <label
+          className={`${styles["chat-input-panel-inner"]} ${
+            attachImages.length != 0
+              ? styles["chat-input-panel-inner-attach"]
+              : ""
+          }`}
+          htmlFor="chat-input"
+        >
           <textarea
+            id="chat-input"
             ref={inputRef}
             className={styles["chat-input"]}
             placeholder={Locale.Chat.Input(submitKey)}
@@ -1291,12 +1490,36 @@ function _Chat() {
             onKeyDown={onInputKeyDown}
             onFocus={scrollToBottom}
             onClick={scrollToBottom}
+            onPaste={handlePaste}
             rows={inputRows}
             autoFocus={autoFocus}
             style={{
               fontSize: config.fontSize,
             }}
           />
+          {attachImages.length != 0 && (
+            <div className={styles["attach-images"]}>
+              {attachImages.map((image, index) => {
+                return (
+                  <div
+                    key={index}
+                    className={styles["attach-image"]}
+                    style={{ backgroundImage: `url("${image}")` }}
+                  >
+                    <div className={styles["attach-image-mask"]}>
+                      <DeleteImageButton
+                        deleteImage={() => {
+                          setAttachImages(
+                            attachImages.filter((_, i) => i !== index),
+                          );
+                        }}
+                      />
+                    </div>
+                  </div>
+                );
+              })}
+            </div>
+          )}
           <IconButton
             icon={<SendWhiteIcon />}
             text={Locale.Chat.Send}
@@ -1304,7 +1527,7 @@ function _Chat() {
             type="primary"
             onClick={() => doSubmit(userInput)}
           />
-        </div>
+        </label>
       </div>
 
       {showExport && (

+ 1 - 1
app/components/emoji.tsx

@@ -13,7 +13,7 @@ export function getEmojiUrl(unified: string, style: EmojiStyle) {
   // Whoever owns this Content Delivery Network (CDN), I am using your CDN to serve emojis
   // Old CDN broken, so I had to switch to this one
   // Author: https://github.com/H0llyW00dzZ
-  return `https://cdn.jsdelivr.net/npm/emoji-datasource-apple/img/${style}/64/${unified}.png`;
+  return `https://fastly.jsdelivr.net/npm/emoji-datasource-apple/img/${style}/64/${unified}.png`;
 }
 
 export function AvatarPicker(props: {

+ 56 - 3
app/components/exporter.module.scss

@@ -94,6 +94,7 @@
 
   button {
     flex-grow: 1;
+
     &:not(:last-child) {
       margin-right: 10px;
     }
@@ -190,6 +191,59 @@
         pre {
           overflow: hidden;
         }
+
+        .message-image {
+          width: 100%;
+          margin-top: 10px;
+        }
+
+        .message-images {
+          display: grid;
+          justify-content: left;
+          grid-gap: 10px;
+          grid-template-columns: repeat(var(--image-count), auto);
+          margin-top: 10px;
+        }
+
+        @media screen and (max-width: 600px) {
+          $image-width: calc(calc(100vw/2)/var(--image-count));
+
+          .message-image-multi {
+            width: $image-width;
+            height: $image-width;
+          }
+
+          .message-image {
+            max-width: calc(100vw/3*2);
+          }
+        }
+
+        @media screen and (min-width: 600px) {
+          $max-image-width: calc(900px/3*2/var(--image-count));
+          $image-width: calc(80vw/3*2/var(--image-count));
+
+          .message-image-multi {
+            width: $image-width;
+            height: $image-width;
+            max-width: $max-image-width;
+            max-height: $max-image-width;
+          }
+
+          .message-image {
+            max-width: calc(100vw/3*2);
+          }
+        }
+
+        .message-image-multi {
+          object-fit: cover;
+        }
+
+        .message-image,
+        .message-image-multi {
+          box-sizing: border-box;
+          border-radius: 10px;
+          border: rgba($color: #888, $alpha: 0.2) 1px solid;
+        }
       }
 
       &-assistant {
@@ -213,6 +267,5 @@
     }
   }
 
-  .default-theme {
-  }
-}
+  .default-theme {}
+}

+ 41 - 6
app/components/exporter.tsx

@@ -12,7 +12,12 @@ import {
   showToast,
 } from "./ui-lib";
 import { IconButton } from "./button";
-import { copyToClipboard, downloadAs, useMobileScreen } from "../utils";
+import {
+  copyToClipboard,
+  downloadAs,
+  getMessageImages,
+  useMobileScreen,
+} from "../utils";
 
 import CopyIcon from "../icons/copy.svg";
 import LoadingIcon from "../icons/three-dots.svg";
@@ -34,6 +39,7 @@ import { prettyObject } from "../utils/format";
 import { EXPORT_MESSAGE_CLASS_NAME, ModelProvider } from "../constant";
 import { getClientConfig } from "../config/client";
 import { ClientApi } from "../client/api";
+import { getMessageTextContent } from "../utils";
 
 const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
   loading: () => <LoadingIcon />,
@@ -287,7 +293,7 @@ export function RenderExport(props: {
           id={`${m.role}:${i}`}
           className={EXPORT_MESSAGE_CLASS_NAME}
         >
-          <Markdown content={m.content} defaultShow />
+          <Markdown content={getMessageTextContent(m)} defaultShow />
         </div>
       ))}
     </div>
@@ -307,7 +313,7 @@ export function PreviewActions(props: {
     setShouldExport(false);
 
     var api: ClientApi;
-    if (config.modelConfig.model === "gemini-pro") {
+    if (config.modelConfig.model.startsWith("gemini")) {
       api = new ClientApi(ModelProvider.GeminiPro);
     } else {
       api = new ClientApi(ModelProvider.GPT);
@@ -580,10 +586,37 @@ export function ImagePreviewer(props: {
 
               <div className={styles["body"]}>
                 <Markdown
-                  content={m.content}
+                  content={getMessageTextContent(m)}
                   fontSize={config.fontSize}
                   defaultShow
                 />
+                {getMessageImages(m).length == 1 && (
+                  <img
+                    key={i}
+                    src={getMessageImages(m)[0]}
+                    alt="message"
+                    className={styles["message-image"]}
+                  />
+                )}
+                {getMessageImages(m).length > 1 && (
+                  <div
+                    className={styles["message-images"]}
+                    style={
+                      {
+                        "--image-count": getMessageImages(m).length,
+                      } as React.CSSProperties
+                    }
+                  >
+                    {getMessageImages(m).map((src, i) => (
+                      <img
+                        key={i}
+                        src={src}
+                        alt="message"
+                        className={styles["message-image-multi"]}
+                      />
+                    ))}
+                  </div>
+                )}
               </div>
             </div>
           );
@@ -602,8 +635,10 @@ export function MarkdownPreviewer(props: {
     props.messages
       .map((m) => {
         return m.role === "user"
-          ? `## ${Locale.Export.MessageFromYou}:\n${m.content}`
-          : `## ${Locale.Export.MessageFromChatGPT}:\n${m.content.trim()}`;
+          ? `## ${Locale.Export.MessageFromYou}:\n${getMessageTextContent(m)}`
+          : `## ${Locale.Export.MessageFromChatGPT}:\n${getMessageTextContent(
+              m,
+            ).trim()}`;
       })
       .join("\n\n");
 

+ 1 - 1
app/components/home.tsx

@@ -171,7 +171,7 @@ export function useLoadData() {
   const config = useAppConfig();
 
   var api: ClientApi;
-  if (config.modelConfig.model === "gemini-pro") {
+  if (config.modelConfig.model.startsWith("gemini")) {
     api = new ClientApi(ModelProvider.GeminiPro);
   } else {
     api = new ClientApi(ModelProvider.GPT);

+ 21 - 4
app/components/mask.tsx

@@ -22,7 +22,7 @@ import {
   useAppConfig,
   useChatStore,
 } from "../store";
-import { ROLES } from "../client/api";
+import { MultimodalContent, ROLES } from "../client/api";
 import {
   Input,
   List,
@@ -38,7 +38,12 @@ import { useNavigate } from "react-router-dom";
 
 import chatStyle from "./chat.module.scss";
 import { useEffect, useState } from "react";
-import { copyToClipboard, downloadAs, readFromFile } from "../utils";
+import {
+  copyToClipboard,
+  downloadAs,
+  getMessageImages,
+  readFromFile,
+} from "../utils";
 import { Updater } from "../typing";
 import { ModelConfigList } from "./model-config";
 import { FileName, Path } from "../constant";
@@ -50,6 +55,7 @@ import {
   Draggable,
   OnDragEndResponder,
 } from "@hello-pangea/dnd";
+import { getMessageTextContent } from "../utils";
 
 // drag and drop helper function
 function reorder<T>(list: T[], startIndex: number, endIndex: number): T[] {
@@ -244,7 +250,7 @@ function ContextPromptItem(props: {
         </>
       )}
       <Input
-        value={props.prompt.content}
+        value={getMessageTextContent(props.prompt)}
         type="text"
         className={chatStyle["context-content"]}
         rows={focusingInput ? 5 : 1}
@@ -289,7 +295,18 @@ export function ContextPrompts(props: {
   };
 
   const updateContextPrompt = (i: number, prompt: ChatMessage) => {
-    props.updateContext((context) => (context[i] = prompt));
+    props.updateContext((context) => {
+      const images = getMessageImages(context[i]);
+      context[i] = prompt;
+      if (images.length > 0) {
+        const text = getMessageTextContent(context[i]);
+        const newContext: MultimodalContent[] = [{ type: "text", text }];
+        for (const img of images) {
+          newContext.push({ type: "image_url", image_url: { url: img } });
+        }
+        context[i].content = newContext;
+      }
+    });
   };
 
   const onDragEnd: OnDragEndResponder = (result) => {

+ 5 - 2
app/components/message-selector.tsx

@@ -7,6 +7,7 @@ import { MaskAvatar } from "./mask";
 import Locale from "../locales";
 
 import styles from "./message-selector.module.scss";
+import { getMessageTextContent } from "../utils";
 
 function useShiftRange() {
   const [startIndex, setStartIndex] = useState<number>();
@@ -103,7 +104,9 @@ export function MessageSelector(props: {
     const searchResults = new Set<string>();
     if (text.length > 0) {
       messages.forEach((m) =>
-        m.content.includes(text) ? searchResults.add(m.id!) : null,
+        getMessageTextContent(m).includes(text)
+          ? searchResults.add(m.id!)
+          : null,
       );
     }
     setSearchIds(searchResults);
@@ -219,7 +222,7 @@ export function MessageSelector(props: {
                   {new Date(m.date).toLocaleString()}
                 </div>
                 <div className={`${styles["content"]} one-line`}>
-                  {m.content}
+                  {getMessageTextContent(m)}
                 </div>
               </div>
 

+ 1 - 1
app/components/model-config.tsx

@@ -92,7 +92,7 @@ export function ModelConfigList(props: {
         ></input>
       </ListItem>
 
-      {props.modelConfig.model === "gemini-pro" ? null : (
+      {props.modelConfig.model.startsWith("gemini") ? null : (
         <>
           <ListItem
             title={Locale.Settings.PresencePenalty.Title}

+ 6 - 6
app/components/settings.tsx

@@ -268,7 +268,7 @@ function CheckButton() {
   const syncStore = useSyncStore();
 
   const couldCheck = useMemo(() => {
-    return syncStore.coundSync();
+    return syncStore.cloudSync();
   }, [syncStore]);
 
   const [checkState, setCheckState] = useState<
@@ -472,7 +472,7 @@ function SyncItems() {
   const promptStore = usePromptStore();
   const maskStore = useMaskStore();
   const couldSync = useMemo(() => {
-    return syncStore.coundSync();
+    return syncStore.cloudSync();
   }, [syncStore]);
 
   const [showSyncConfigModal, setShowSyncConfigModal] = useState(false);
@@ -1081,8 +1081,8 @@ export function Settings() {
                         ></input>
                       </ListItem>
                       <ListItem
-                        title={Locale.Settings.Access.Azure.ApiKey.Title}
-                        subTitle={Locale.Settings.Access.Azure.ApiKey.SubTitle}
+                        title={Locale.Settings.Access.Google.ApiKey.Title}
+                        subTitle={Locale.Settings.Access.Google.ApiKey.SubTitle}
                       >
                         <PasswordInput
                           value={accessStore.googleApiKey}
@@ -1099,9 +1099,9 @@ export function Settings() {
                         />
                       </ListItem>
                       <ListItem
-                        title={Locale.Settings.Access.Google.ApiVerion.Title}
+                        title={Locale.Settings.Access.Google.ApiVersion.Title}
                         subTitle={
-                          Locale.Settings.Access.Google.ApiVerion.SubTitle
+                          Locale.Settings.Access.Google.ApiVersion.SubTitle
                         }
                       >
                         <input

+ 3 - 0
app/config/server.ts

@@ -30,6 +30,9 @@ declare global {
       // google only
       GOOGLE_API_KEY?: string;
       GOOGLE_URL?: string;
+
+      // google tag manager
+      GTM_ID?: string;
     }
   }
 }

+ 27 - 5
app/constant.ts

@@ -8,8 +8,7 @@ export const FETCH_COMMIT_URL = `https://api.github.com/repos/${OWNER}/${REPO}/c
 export const FETCH_TAG_URL = `https://api.github.com/repos/${OWNER}/${REPO}/tags?per_page=1`;
 export const RUNTIME_CONFIG_DOM = "danger-runtime-config";
 
-export const DEFAULT_CORS_HOST = "https://a.nextweb.fun";
-export const DEFAULT_API_HOST = `${DEFAULT_CORS_HOST}/api/proxy`;
+export const DEFAULT_API_HOST = "https://api.nextchat.dev";
 export const OPENAI_BASE_URL = "https://api.openai.com";
 
 export const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/";
@@ -89,13 +88,14 @@ export const Azure = {
 export const Google = {
   ExampleEndpoint: "https://generativelanguage.googleapis.com/",
   ChatPath: "v1beta/models/gemini-pro:generateContent",
+  VisionChatPath: "v1beta/models/gemini-pro-vision:generateContent",
 
   // /api/openai/v1/chat/completions
 };
 
 export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
 export const DEFAULT_SYSTEM_TEMPLATE = `
-You are ChatGPT, a large language model trained by OpenAI.
+You are ChatGPT, a large language model trained by {{ServiceProvider}}.
 Knowledge cutoff: {{cutoff}}
 Current model: {{model}}
 Current time: {{time}}
@@ -104,13 +104,17 @@ Latex block: $$e=mc^2$$
 `;
 
 export const SUMMARIZE_MODEL = "gpt-3.5-turbo";
+export const GEMINI_SUMMARIZE_MODEL = "gemini-pro";
 
 export const KnowledgeCutOffDate: Record<string, string> = {
   default: "2021-09",
-  "gpt-4-turbo-preview": "2023-04",
+  "gpt-4-turbo-preview": "2023-12",
   "gpt-4-1106-preview": "2023-04",
-  "gpt-4-0125-preview": "2023-04",
+  "gpt-4-0125-preview": "2023-12",
   "gpt-4-vision-preview": "2023-04",
+  // After improvements,
+  // it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
+  "gemini-pro": "2023-12",
 };
 
 export const DEFAULT_MODELS = [
@@ -213,6 +217,15 @@ export const DEFAULT_MODELS = [
       providerType: "openai",
     },
   },
+  {
+    name: "gpt-3.5-turbo-0125",
+    available: true,
+    provider: {
+      id: "openai",
+      providerName: "OpenAI",
+      providerType: "openai",
+    },
+  },
   {
     name: "gpt-3.5-turbo-0301",
     available: true,
@@ -267,6 +280,15 @@ export const DEFAULT_MODELS = [
       providerType: "google",
     },
   },
+  {
+    name: "gemini-pro-vision",
+    available: true,
+    provider: {
+      id: "google",
+      providerName: "Google",
+      providerType: "google",
+    },
+  },
 ] as const;
 
 export const CHAT_PAGE_SIZE = 15;

Fichier diff supprimé car celui-ci est trop grand
+ 0 - 0
app/icons/image.svg


+ 1 - 0
app/icons/loading.svg

@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="#fff" style=""><rect id="backgroundrect" width="100%" height="100%" x="0" y="0" fill="none" stroke="none" style="" class="" /><g class="currentLayer" style=""><title>Layer 1</title><circle cx="4" cy="8" r="1.926" fill="#333" id="svg_1" class=""><animate attributeName="r" begin="0s" calcMode="linear" dur="0.8s" from="2" repeatCount="indefinite" to="2" values="2;1.2;2" /><animate attributeName="fill-opacity" begin="0s" calcMode="linear" dur="0.8s" from="1" repeatCount="indefinite" to="1" values="1;.5;1" /></circle><circle cx="8" cy="8" r="1.2736" fill="#333" fill-opacity=".3" id="svg_2" class=""><animate attributeName="r" begin="0s" calcMode="linear" dur="0.8s" from="1.2" repeatCount="indefinite" to="1.2" values="1.2;2;1.2" /><animate attributeName="fill-opacity" begin="0s" calcMode="linear" dur="0.8s" from=".5" repeatCount="indefinite" to=".5" values=".5;1;.5" /></circle><circle cx="12" cy="8" r="1.926" fill="#333" id="svg_3" class=""><animate attributeName="r" begin="0s" calcMode="linear" dur="0.8s" from="2" repeatCount="indefinite" to="2" values="2;1.2;2" /><animate attributeName="fill-opacity" begin="0s" calcMode="linear" dur="0.8s" from="1" repeatCount="indefinite" to="1" values="1;.5;1" /></circle></g></svg>

+ 9 - 8
app/locales/cn.ts

@@ -63,6 +63,7 @@ const cn = {
       Masks: "所有面具",
       Clear: "清除聊天",
       Settings: "对话设置",
+      UploadImage: "上传图片",
     },
     Rename: "重命名对话",
     Typing: "正在输入…",
@@ -314,19 +315,19 @@ const cn = {
       },
       Google: {
         ApiKey: {
-          Title: "接口密钥",
-          SubTitle: "使用自定义 Google AI Studio API Key 绕过密码访问限制",
-          Placeholder: "Google AI Studio API Key",
+          Title: "API 密钥",
+          SubTitle: "从 Google AI 获取您的 API 密钥",
+          Placeholder: "输入您的 Google AI Studio API 密钥",
         },
 
         Endpoint: {
-          Title: "接口地址",
-          SubTitle: "不包含请求路径,样例:",
+          Title: "终端地址",
+          SubTitle: "例:",
         },
 
-        ApiVerion: {
-          Title: "接口版本 (gemini-pro api version)",
-          SubTitle: "选择指定的部分版本",
+        ApiVersion: {
+          Title: "API 版本(仅适用于 gemini-pro)",
+          SubTitle: "选择一个特定的 API 版本",
         },
       },
       CustomModel: {

+ 6 - 6
app/locales/en.ts

@@ -65,6 +65,7 @@ const en: LocaleType = {
       Masks: "Masks",
       Clear: "Clear Context",
       Settings: "Settings",
+      UploadImage: "Upload Images",
     },
     Rename: "Rename Chat",
     Typing: "Typing…",
@@ -322,9 +323,8 @@ const en: LocaleType = {
       Google: {
         ApiKey: {
           Title: "API Key",
-          SubTitle:
-            "Bypass password access restrictions using a custom Google AI Studio API Key",
-          Placeholder: "Google AI Studio API Key",
+          SubTitle: "Obtain your API Key from Google AI",
+          Placeholder: "Enter your Google AI Studio API Key",
         },
 
         Endpoint: {
@@ -332,9 +332,9 @@ const en: LocaleType = {
           SubTitle: "Example:",
         },
 
-        ApiVerion: {
-          Title: "API Version (gemini-pro api version)",
-          SubTitle: "Select a specific part version",
+        ApiVersion: {
+          Title: "API Version (specific to gemini-pro)",
+          SubTitle: "Select a specific API version",
         },
       },
     },

+ 1 - 1
app/locales/sk.ts

@@ -334,7 +334,7 @@ const sk: PartialLocaleType = {
           SubTitle: "Príklad:",
         },
 
-        ApiVerion: {
+        ApiVersion: {
           Title: "Verzia API (gemini-pro verzia API)",
           SubTitle: "Vyberte špecifickú verziu časti",
         },

+ 256 - 5
app/locales/tw.ts

@@ -1,16 +1,36 @@
+import { getClientConfig } from "../config/client";
 import { SubmitKey } from "../store/config";
-import type { PartialLocaleType } from "./index";
 
-const tw: PartialLocaleType = {
+const isApp = !!getClientConfig()?.isApp;
+
+const tw = {
   WIP: "該功能仍在開發中……",
   Error: {
-    Unauthorized: "目前您的狀態是未授權,請前往[設定頁面](/#/auth)輸入授權碼。",
+    Unauthorized: isApp
+      ? "檢測到無效 API Key,請前往[設定](/#/settings)頁檢查 API Key 是否設定正確。"
+      : "訪問密碼不正確或為空,請前往[登入](/#/auth)頁輸入正確的訪問密碼,或者在[設定](/#/settings)頁填入你自己的 OpenAI API Key。",
+  },
+
+  Auth: {
+    Title: "需要密碼",
+    Tips: "管理員開啟了密碼驗證,請在下方填入訪問碼",
+    SubTips: "或者輸入你的 OpenAI 或 Google API 密鑰",
+    Input: "在此處填寫訪問碼",
+    Confirm: "確認",
+    Later: "稍候再說",
   },
   ChatItem: {
     ChatItemCount: (count: number) => `${count} 則對話`,
   },
   Chat: {
     SubTitle: (count: number) => `您已經與 ChatGPT 進行了 ${count} 則對話`,
+    EditMessage: {
+      Title: "編輯消息記錄",
+      Topic: {
+        Title: "聊天主題",
+        SubTitle: "更改當前聊天主題",
+      },
+    },
     Actions: {
       ChatList: "檢視訊息列表",
       CompressedHistory: "檢視壓縮後的歷史 Prompt",
@@ -18,7 +38,33 @@ const tw: PartialLocaleType = {
       Copy: "複製",
       Stop: "停止",
       Retry: "重試",
+      Pin: "固定",
+      PinToastContent: "已將 1 條對話固定至預設提示詞",
+      PinToastAction: "查看",
       Delete: "刪除",
+      Edit: "編輯",
+    },
+    Commands: {
+      new: "新建聊天",
+      newm: "從面具新建聊天",
+      next: "下一個聊天",
+      prev: "上一個聊天",
+      clear: "清除上下文",
+      del: "刪除聊天",
+    },
+    InputActions: {
+      Stop: "停止回應",
+      ToBottom: "移至最新",
+      Theme: {
+        auto: "自動主題",
+        light: "亮色模式",
+        dark: "深色模式",
+      },
+      Prompt: "快捷指令",
+      Masks: "所有面具",
+      Clear: "清除聊天",
+      Settings: "對話設定",
+      UploadImage: "上傳圖片",
     },
     Rename: "重新命名對話",
     Typing: "正在輸入…",
@@ -34,13 +80,37 @@ const tw: PartialLocaleType = {
       Reset: "重設",
       SaveAs: "另存新檔",
     },
+    IsContext: "預設提示詞",
   },
   Export: {
     Title: "將聊天記錄匯出為 Markdown",
     Copy: "複製全部",
     Download: "下載檔案",
+    Share: "分享到 ShareGPT",
     MessageFromYou: "來自您的訊息",
     MessageFromChatGPT: "來自 ChatGPT 的訊息",
+    Format: {
+      Title: "導出格式",
+      SubTitle: "可以導出 Markdown 文本或者 PNG 圖片",
+    },
+    IncludeContext: {
+      Title: "包含面具上下文",
+      SubTitle: "是否在消息中展示面具上下文",
+    },
+    Steps: {
+      Select: "選取",
+      Preview: "預覽",
+    },
+    Image: {
+      Toast: "正在生成截圖",
+      Modal: "長按或右鍵保存圖片",
+    },
+  },
+  Select: {
+    Search: "查詢消息",
+    All: "選取全部",
+    Latest: "最近幾條",
+    Clear: "清除選中",
   },
   Memory: {
     Title: "上下文記憶 Prompt",
@@ -60,6 +130,20 @@ const tw: PartialLocaleType = {
     Title: "設定",
     SubTitle: "設定選項",
 
+    Danger: {
+      Reset: {
+        Title: "重置所有設定",
+        SubTitle: "重置所有設定項回預設值",
+        Action: "立即重置",
+        Confirm: "確認重置所有設定?",
+      },
+      Clear: {
+        Title: "清除所有資料",
+        SubTitle: "清除所有聊天、設定資料",
+        Action: "立即清除",
+        Confirm: "確認清除所有聊天、設定資料?",
+      },
+    },
     Lang: {
       Name: "Language", // ATTENTION: if you wanna add a new translation, please do not translate this value, leave it as `Language`
       All: "所有語言",
@@ -73,6 +157,11 @@ const tw: PartialLocaleType = {
       Title: "匯入系統提示",
       SubTitle: "強制在每個請求的訊息列表開頭新增一個模擬 ChatGPT 的系統提示",
     },
+    InputTemplate: {
+      Title: "用戶輸入預處理",
+      SubTitle: "用戶最新的一條消息會填充到此模板",
+    },
+
     Update: {
       Version: (x: string) => `目前版本:${x}`,
       IsLatest: "已是最新版本",
@@ -88,11 +177,62 @@ const tw: PartialLocaleType = {
       Title: "預覽氣泡",
       SubTitle: "在預覽氣泡中預覽 Markdown 內容",
     },
+    AutoGenerateTitle: {
+      Title: "自動生成標題",
+      SubTitle: "根據對話內容生成合適的標題",
+    },
+    Sync: {
+      CloudState: "雲端資料",
+      NotSyncYet: "還沒有進行過同步",
+      Success: "同步成功",
+      Fail: "同步失敗",
+
+      Config: {
+        Modal: {
+          Title: "設定雲端同步",
+          Check: "檢查可用性",
+        },
+        SyncType: {
+          Title: "同步類型",
+          SubTitle: "選擇喜愛的同步服務器",
+        },
+        Proxy: {
+          Title: "啟用代理",
+          SubTitle: "在瀏覽器中同步時,必須啟用代理以避免跨域限制",
+        },
+        ProxyUrl: {
+          Title: "代理地址",
+          SubTitle: "僅適用於本項目自帶的跨域代理",
+        },
+
+        WebDav: {
+          Endpoint: "WebDAV 地址",
+          UserName: "用戶名",
+          Password: "密碼",
+        },
+
+        UpStash: {
+          Endpoint: "UpStash Redis REST Url",
+          UserName: "備份名稱",
+          Password: "UpStash Redis REST Token",
+        },
+      },
+
+      LocalState: "本地資料",
+      Overview: (overview: any) => {
+        return `${overview.chat} 次對話,${overview.message} 條消息,${overview.prompt} 條提示詞,${overview.mask} 個面具`;
+      },
+      ImportFailed: "導入失敗",
+    },
     Mask: {
       Splash: {
         Title: "面具啟動頁面",
         SubTitle: "新增聊天時,呈現面具啟動頁面",
       },
+      Builtin: {
+        Title: "隱藏內置面具",
+        SubTitle: "在所有面具列表中隱藏內置面具",
+      },
     },
     Prompt: {
       Disable: {
@@ -131,11 +271,81 @@ const tw: PartialLocaleType = {
       NoAccess: "輸入 API Key 檢視餘額",
     },
 
+    Access: {
+      AccessCode: {
+        Title: "訪問密碼",
+        SubTitle: "管理員已開啟加密訪問",
+        Placeholder: "請輸入訪問密碼",
+      },
+      CustomEndpoint: {
+        Title: "自定義接口 (Endpoint)",
+        SubTitle: "是否使用自定義 Azure 或 OpenAI 服務",
+      },
+      Provider: {
+        Title: "模型服務商",
+        SubTitle: "切換不同的服務商",
+      },
+      OpenAI: {
+        ApiKey: {
+          Title: "API Key",
+          SubTitle: "使用自定義 OpenAI Key 繞過密碼訪問限制",
+          Placeholder: "OpenAI API Key",
+        },
+
+        Endpoint: {
+          Title: "接口(Endpoint) 地址",
+          SubTitle: "除默認地址外,必須包含 http(s)://",
+        },
+      },
+      Azure: {
+        ApiKey: {
+          Title: "接口密鑰",
+          SubTitle: "使用自定義 Azure Key 繞過密碼訪問限制",
+          Placeholder: "Azure API Key",
+        },
+
+        Endpoint: {
+          Title: "接口(Endpoint) 地址",
+          SubTitle: "樣例:",
+        },
+
+        ApiVerion: {
+          Title: "接口版本 (azure api version)",
+          SubTitle: "選擇指定的部分版本",
+        },
+      },
+      Google: {
+        ApiKey: {
+          Title: "API 密鑰",
+          SubTitle: "從 Google AI 獲取您的 API 密鑰",
+          Placeholder: "輸入您的 Google AI Studio API 密鑰",
+        },
+
+        Endpoint: {
+          Title: "終端地址",
+          SubTitle: "示例:",
+        },
+
+        ApiVersion: {
+          Title: "API 版本(僅適用於 gemini-pro)",
+          SubTitle: "選擇一個特定的 API 版本",
+        },
+      },
+      CustomModel: {
+        Title: "自定義模型名",
+        SubTitle: "增加自定義模型可選項,使用英文逗號隔開",
+      },
+    },
+
     Model: "模型 (model)",
     Temperature: {
       Title: "隨機性 (temperature)",
       SubTitle: "值越大,回應越隨機",
     },
+    TopP: {
+      Title: "核采樣 (top_p)",
+      SubTitle: "與隨機性類似,但不要和隨機性一起更改",
+    },
     MaxTokens: {
       Title: "單次回應限制 (max_tokens)",
       SubTitle: "單次互動所用的最大 Token 數",
@@ -166,10 +376,16 @@ const tw: PartialLocaleType = {
     Success: "已複製到剪貼簿中",
     Failed: "複製失敗,請賦予剪貼簿權限",
   },
+  Download: {
+    Success: "內容已下載到您的目錄。",
+    Failed: "下載失敗。",
+  },
   Context: {
     Toast: (x: any) => `已設定 ${x} 條前置上下文`,
     Edit: "前置上下文和歷史記憶",
     Add: "新增一條",
+    Clear: "上下文已清除",
+    Revert: "恢復上下文",
   },
   Plugin: { Name: "外掛" },
   FineTuned: { Sysmessage: "你是一個助手" },
@@ -198,16 +414,34 @@ const tw: PartialLocaleType = {
     Config: {
       Avatar: "角色頭像",
       Name: "角色名稱",
+      Sync: {
+        Title: "使用全局設定",
+        SubTitle: "當前對話是否使用全局模型設定",
+        Confirm: "當前對話的自定義設定將會被自動覆蓋,確認啟用全局設定?",
+      },
+      HideContext: {
+        Title: "隱藏預設對話",
+        SubTitle: "隱藏後預設對話不會出現在聊天界面",
+      },
+      Share: {
+        Title: "分享此面具",
+        SubTitle: "生成此面具的直達鏈接",
+        Action: "覆制鏈接",
+      },
     },
   },
   NewChat: {
     Return: "返回",
     Skip: "跳過",
+    NotShow: "不再呈現",
+    ConfirmNoShow: "確認停用?停用後可以隨時在設定中重新啟用。",
     Title: "挑選一個面具",
     SubTitle: "現在開始,與面具背後的靈魂思維碰撞",
     More: "搜尋更多",
-    NotShow: "不再呈現",
-    ConfirmNoShow: "確認停用?停用後可以隨時在設定中重新啟用。",
+  },
+  URLCommand: {
+    Code: "檢測到連結中已經包含訪問碼,是否自動填入?",
+    Settings: "檢測到連結中包含了預設設定,是否自動填入?",
   },
   UI: {
     Confirm: "確認",
@@ -215,8 +449,15 @@ const tw: PartialLocaleType = {
     Close: "關閉",
     Create: "新增",
     Edit: "編輯",
+    Export: "導出",
+    Import: "導入",
+    Sync: "同步",
+    Config: "設定",
   },
   Exporter: {
+    Description: {
+      Title: "只有清除上下文之後的消息會被展示",
+    },
     Model: "模型",
     Messages: "訊息",
     Topic: "主題",
@@ -224,4 +465,14 @@ const tw: PartialLocaleType = {
   },
 };
 
+type DeepPartial<T> = T extends object
+  ? {
+    [P in keyof T]?: DeepPartial<T[P]>;
+  }
+  : T;
+
+export type LocaleType = typeof tw;
+export type PartialLocaleType = DeepPartial<typeof tw>;
+
 export default tw;
+// Translated by @chunkiuuu, feel free the submit new pr if there are typo/incorrect translations :D

+ 3 - 1
app/store/access.ts

@@ -12,7 +12,9 @@ import { ensure } from "../utils/clone";
 let fetchState = 0; // 0 not fetch, 1 fetching, 2 done
 
 const DEFAULT_OPENAI_URL =
-  getClientConfig()?.buildMode === "export" ? DEFAULT_API_HOST : ApiPath.OpenAI;
+  getClientConfig()?.buildMode === "export"
+    ? DEFAULT_API_HOST + "/api/proxy/openai"
+    : ApiPath.OpenAI;
 
 const DEFAULT_ACCESS_STATE = {
   accessCode: "",

+ 56 - 13
app/store/chat.ts

@@ -1,4 +1,4 @@
-import { trimTopic } from "../utils";
+import { trimTopic, getMessageTextContent } from "../utils";
 
 import Locale, { getLang } from "../locales";
 import { showToast } from "../components/ui-lib";
@@ -6,13 +6,15 @@ import { ModelConfig, ModelType, useAppConfig } from "./config";
 import { createEmptyMask, Mask } from "./mask";
 import {
   DEFAULT_INPUT_TEMPLATE,
+  DEFAULT_MODELS,
   DEFAULT_SYSTEM_TEMPLATE,
   KnowledgeCutOffDate,
   ModelProvider,
   StoreKey,
   SUMMARIZE_MODEL,
+  GEMINI_SUMMARIZE_MODEL,
 } from "../constant";
-import { ClientApi, RequestMessage } from "../client/api";
+import { ClientApi, RequestMessage, MultimodalContent } from "../client/api";
 import { ChatControllerPool } from "../client/controller";
 import { prettyObject } from "../utils/format";
 import { estimateTokenLength } from "../utils/token";
@@ -83,18 +85,38 @@ function createEmptySession(): ChatSession {
 
 function getSummarizeModel(currentModel: string) {
   // if it is using gpt-* models, force to use 3.5 to summarize
-  return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel;
+  if (currentModel.startsWith("gpt")) {
+    return SUMMARIZE_MODEL;
+  }
+  if (currentModel.startsWith("gemini-pro")) {
+    return GEMINI_SUMMARIZE_MODEL;
+  }
+  return currentModel;
 }
 
 function countMessages(msgs: ChatMessage[]) {
-  return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0);
+  return msgs.reduce(
+    (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
+    0,
+  );
 }
 
 function fillTemplateWith(input: string, modelConfig: ModelConfig) {
-  let cutoff =
+  const cutoff =
     KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
+  // Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
+  const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
+
+  var serviceProvider = "OpenAI";
+  if (modelInfo) {
+    // TODO: auto detect the providerName from the modelConfig.model
+
+    // Directly use the providerName from the modelInfo
+    serviceProvider = modelInfo.provider.providerName;
+  }
 
   const vars = {
+    ServiceProvider: serviceProvider,
     cutoff,
     model: modelConfig.model,
     time: new Date().toLocaleString(),
@@ -111,7 +133,8 @@ function fillTemplateWith(input: string, modelConfig: ModelConfig) {
   }
 
   Object.entries(vars).forEach(([name, value]) => {
-    output = output.replaceAll(`{{${name}}}`, value);
+    const regex = new RegExp(`{{${name}}}`, "g");
+    output = output.replace(regex, value.toString()); // Ensure value is a string
   });
 
   return output;
@@ -267,16 +290,36 @@ export const useChatStore = createPersistStore(
         get().summarizeSession();
       },
 
-      async onUserInput(content: string) {
+      async onUserInput(content: string, attachImages?: string[]) {
         const session = get().currentSession();
         const modelConfig = session.mask.modelConfig;
 
         const userContent = fillTemplateWith(content, modelConfig);
         console.log("[User Input] after template: ", userContent);
 
-        const userMessage: ChatMessage = createMessage({
+        let mContent: string | MultimodalContent[] = userContent;
+
+        if (attachImages && attachImages.length > 0) {
+          mContent = [
+            {
+              type: "text",
+              text: userContent,
+            },
+          ];
+          mContent = mContent.concat(
+            attachImages.map((url) => {
+              return {
+                type: "image_url",
+                image_url: {
+                  url: url,
+                },
+              };
+            }),
+          );
+        }
+        let userMessage: ChatMessage = createMessage({
           role: "user",
-          content: userContent,
+          content: mContent,
         });
 
         const botMessage: ChatMessage = createMessage({
@@ -294,7 +337,7 @@ export const useChatStore = createPersistStore(
         get().updateCurrentSession((session) => {
           const savedUserMessage = {
             ...userMessage,
-            content,
+            content: mContent,
           };
           session.messages = session.messages.concat([
             savedUserMessage,
@@ -303,7 +346,7 @@ export const useChatStore = createPersistStore(
         });
 
         var api: ClientApi;
-        if (modelConfig.model === "gemini-pro") {
+        if (modelConfig.model.startsWith("gemini")) {
           api = new ClientApi(ModelProvider.GeminiPro);
         } else {
           api = new ClientApi(ModelProvider.GPT);
@@ -448,7 +491,7 @@ export const useChatStore = createPersistStore(
         ) {
           const msg = messages[i];
           if (!msg || msg.isError) continue;
-          tokenCount += estimateTokenLength(msg.content);
+          tokenCount += estimateTokenLength(getMessageTextContent(msg));
           reversedRecentMessages.push(msg);
         }
 
@@ -488,7 +531,7 @@ export const useChatStore = createPersistStore(
         const modelConfig = session.mask.modelConfig;
 
         var api: ClientApi;
-        if (modelConfig.model === "gemini-pro") {
+        if (modelConfig.model.startsWith("gemini")) {
           api = new ClientApi(ModelProvider.GeminiPro);
         } else {
           api = new ClientApi(ModelProvider.GPT);

+ 1 - 1
app/store/config.ts

@@ -91,7 +91,7 @@ export const ModalConfigValidator = {
     return limitNumber(x, -2, 2, 0);
   },
   temperature(x: number) {
-    return limitNumber(x, 0, 1, 1);
+    return limitNumber(x, 0, 2, 1);
   },
   top_p(x: number) {
     return limitNumber(x, 0, 1, 1);

+ 5 - 3
app/store/sync.ts

@@ -48,7 +48,7 @@ const DEFAULT_SYNC_STATE = {
 export const useSyncStore = createPersistStore(
   DEFAULT_SYNC_STATE,
   (set, get) => ({
-    coundSync() {
+    cloudSync() {
       const config = get()[get().provider];
       return Object.values(config).every((c) => c.toString().length > 0);
     },
@@ -60,8 +60,10 @@ export const useSyncStore = createPersistStore(
     export() {
       const state = getLocalAppState();
       const datePart = isApp
-      ? `${new Date().toLocaleDateString().replace(/\//g, '_')} ${new Date().toLocaleTimeString().replace(/:/g, '_')}`
-      : new Date().toLocaleString();
+        ? `${new Date().toLocaleDateString().replace(/\//g, "_")} ${new Date()
+            .toLocaleTimeString()
+            .replace(/:/g, "_")}`
+        : new Date().toLocaleString();
 
       const fileName = `Backup-${datePart}.json`;
       downloadAs(JSON.stringify(state), fileName);

+ 92 - 11
app/utils.ts

@@ -1,12 +1,17 @@
 import { useEffect, useState } from "react";
 import { showToast } from "./components/ui-lib";
 import Locale from "./locales";
+import { RequestMessage } from "./client/api";
+import { DEFAULT_MODELS } from "./constant";
 
 export function trimTopic(topic: string) {
   // Fix an issue where double quotes still show in the Indonesian language
   // This will remove the specified punctuation from the end of the string
   // and also trim quotes from both the start and end if they exist.
-  return topic.replace(/^["“”]+|["“”]+$/g, "").replace(/[,。!?”“"、,.!?]*$/, "");
+  return topic
+    // fix for gemini
+    .replace(/^["“”*]+|["“”*]+$/g, "")
+    .replace(/[,。!?”“"、,.!?*]*$/, "");
 }
 
 export async function copyToClipboard(text: string) {
@@ -40,8 +45,8 @@ export async function downloadAs(text: string, filename: string) {
       defaultPath: `${filename}`,
       filters: [
         {
-          name: `${filename.split('.').pop()} files`,
-          extensions: [`${filename.split('.').pop()}`],
+          name: `${filename.split(".").pop()} files`,
+          extensions: [`${filename.split(".").pop()}`],
         },
         {
           name: "All Files",
@@ -69,16 +74,59 @@ export async function downloadAs(text: string, filename: string) {
       "href",
       "data:text/plain;charset=utf-8," + encodeURIComponent(text),
     );
-  element.setAttribute("download", filename);
+    element.setAttribute("download", filename);
 
-  element.style.display = "none";
-  document.body.appendChild(element);
+    element.style.display = "none";
+    document.body.appendChild(element);
 
-  element.click();
+    element.click();
 
-  document.body.removeChild(element);
+    document.body.removeChild(element);
+  }
 }
+
+export function compressImage(file: File, maxSize: number): Promise<string> {
+  return new Promise((resolve, reject) => {
+    const reader = new FileReader();
+    reader.onload = (readerEvent: any) => {
+      const image = new Image();
+      image.onload = () => {
+        let canvas = document.createElement("canvas");
+        let ctx = canvas.getContext("2d");
+        let width = image.width;
+        let height = image.height;
+        let quality = 0.9;
+        let dataUrl;
+
+        do {
+          canvas.width = width;
+          canvas.height = height;
+          ctx?.clearRect(0, 0, canvas.width, canvas.height);
+          ctx?.drawImage(image, 0, 0, width, height);
+          dataUrl = canvas.toDataURL("image/jpeg", quality);
+
+          if (dataUrl.length < maxSize) break;
+
+          if (quality > 0.5) {
+            // Prioritize quality reduction
+            quality -= 0.1;
+          } else {
+            // Then reduce the size
+            width *= 0.9;
+            height *= 0.9;
+          }
+        } while (dataUrl.length > maxSize);
+
+        resolve(dataUrl);
+      };
+      image.onerror = reject;
+      image.src = readerEvent.target.result;
+    };
+    reader.onerror = reject;
+    reader.readAsDataURL(file);
+  });
 }
+
 export function readFromFile() {
   return new Promise<string>((res, rej) => {
     const fileInput = document.createElement("input");
@@ -212,8 +260,41 @@ export function getCSSVar(varName: string) {
 export function isMacOS(): boolean {
   if (typeof window !== "undefined") {
     let userAgent = window.navigator.userAgent.toLocaleLowerCase();
-    const macintosh = /iphone|ipad|ipod|macintosh/.test(userAgent)
-    return !!macintosh
+    const macintosh = /iphone|ipad|ipod|macintosh/.test(userAgent);
+    return !!macintosh;
+  }
+  return false;
+}
+
+export function getMessageTextContent(message: RequestMessage) {
+  if (typeof message.content === "string") {
+    return message.content;
+  }
+  for (const c of message.content) {
+    if (c.type === "text") {
+      return c.text ?? "";
+    }
+  }
+  return "";
+}
+
+export function getMessageImages(message: RequestMessage): string[] {
+  if (typeof message.content === "string") {
+    return [];
+  }
+  const urls: string[] = [];
+  for (const c of message.content) {
+    if (c.type === "image_url") {
+      urls.push(c.image_url?.url ?? "");
+    }
   }
-  return false
+  return urls;
+}
+
+export function isVisionModel(model: string) {
+  return (
+    // model.startsWith("gpt-4-vision") ||
+    // model.startsWith("gemini-pro-vision") ||
+    model.includes("vision")
+  );
 }

+ 2 - 2
app/utils/cors.ts

@@ -1,8 +1,8 @@
 import { getClientConfig } from "../config/client";
-import { ApiPath, DEFAULT_CORS_HOST } from "../constant";
+import { ApiPath, DEFAULT_API_HOST } from "../constant";
 
 export function corsPath(path: string) {
-  const baseUrl = getClientConfig()?.isApp ? `${DEFAULT_CORS_HOST}` : "";
+  const baseUrl = getClientConfig()?.isApp ? `${DEFAULT_API_HOST}` : "";
 
   if (!path.startsWith("/")) {
     path = "/" + path;

+ 10 - 1
next.config.mjs

@@ -64,8 +64,17 @@ if (mode !== "export") {
 
   nextConfig.rewrites = async () => {
     const ret = [
+      // adjust for previous version directly using "/api/proxy/" as proxy base route
       {
-        source: "/api/proxy/:path*",
+        source: "/api/proxy/v1/:path*",
+        destination: "https://api.openai.com/v1/:path*",
+      },
+      {
+        source: "/api/proxy/google/:path*",
+        destination: "https://generativelanguage.googleapis.com/:path*",
+      },
+      {
+        source: "/api/proxy/openai/:path*",
         destination: "https://api.openai.com/:path*",
       },
       {

+ 2 - 2
package.json

@@ -1,5 +1,5 @@
 {
-  "name": "chatgpt-next-web",
+  "name": "nextchat",
   "private": false,
   "license": "mit",
   "scripts": {
@@ -64,4 +64,4 @@
   "resolutions": {
     "lint-staged/yaml": "^2.2.2"
   }
-}
+}

+ 1 - 1
scripts/setup.sh

@@ -54,7 +54,7 @@ if ! command -v node >/dev/null || ! command -v git >/dev/null || ! command -v y
 fi
 
 # Clone the repository and install dependencies
-git clone https://github.com/Yidadaa/ChatGPT-Next-Web
+git clone https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web
 cd ChatGPT-Next-Web
 yarn install
 

Fichier diff supprimé car celui-ci est trop grand
+ 475 - 86
src-tauri/Cargo.lock


+ 23 - 5
src-tauri/Cargo.toml

@@ -1,27 +1,45 @@
 [package]
-name = "chatgpt-next-web"
+name = "nextchat"
 version = "0.1.0"
 description = "A cross platform app for LLM ChatBot."
 authors = ["Yidadaa"]
 license = "mit"
 repository = ""
-default-run = "chatgpt-next-web"
+default-run = "nextchat"
 edition = "2021"
 rust-version = "1.60"
 
 # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
 
 [build-dependencies]
-tauri-build = { version = "1.3.0", features = [] }
+tauri-build = { version = "1.5.1", features = [] }
 
 [dependencies]
 serde_json = "1.0"
 serde = { version = "1.0", features = ["derive"] }
-tauri = { version = "1.3.0", features = ["notification-all", "fs-all", "clipboard-all", "dialog-all", "shell-open", "updater", "window-close", "window-hide", "window-maximize", "window-minimize", "window-set-icon", "window-set-ignore-cursor-events", "window-set-resizable", "window-show", "window-start-dragging", "window-unmaximize", "window-unminimize"] }
+tauri = { version = "1.5.4", features = [
+    "notification-all",
+    "fs-all",
+    "clipboard-all",
+    "dialog-all",
+    "shell-open",
+    "updater",
+    "window-close",
+    "window-hide",
+    "window-maximize",
+    "window-minimize",
+    "window-set-icon",
+    "window-set-ignore-cursor-events",
+    "window-set-resizable",
+    "window-show",
+    "window-start-dragging",
+    "window-unmaximize",
+    "window-unminimize",
+] }
 tauri-plugin-window-state = { git = "https://github.com/tauri-apps/plugins-workspace", branch = "v1" }
 
 [features]
 # this feature is used for production builds or when `devPath` points to the filesystem and the built-in dev server is disabled.
 # If you use cargo directly instead of tauri's cli you can use this feature flag to switch between tauri's `dev` and `build` modes.
 # DO NOT REMOVE!!
-custom-protocol = [ "tauri/custom-protocol" ]
+custom-protocol = ["tauri/custom-protocol"]

+ 4 - 3
src-tauri/tauri.conf.json

@@ -9,7 +9,7 @@
   },
   "package": {
     "productName": "NextChat",
-    "version": "2.10.1"
+    "version": "2.11.2"
   },
   "tauri": {
     "allowlist": {
@@ -86,12 +86,13 @@
       }
     },
     "security": {
-      "csp": null
+      "csp": null,
+      "dangerousUseHttpScheme": true
     },
     "updater": {
       "active": true,
       "endpoints": [
-        "https://github.com/Yidadaa/ChatGPT-Next-Web/releases/latest/download/latest.json"
+        "https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/releases/latest/download/latest.json"
       ],
       "dialog": false,
       "windows": {

Certains fichiers n'ont pas été affichés car il y a eu trop de fichiers modifiés dans ce diff