Parcourir la source

fix: indentation violations in YAML files (#1972)

Bowen Liang il y a 1 an
Parent
commit
b8592ad412
84 fichiers modifiés avec 1837 ajouts et 1842 suppressions
  1. 47 47
      .github/ISSUE_TEMPLATE/bug_report.yml
  2. 1 1
      .github/ISSUE_TEMPLATE/config.yml
  3. 17 17
      .github/ISSUE_TEMPLATE/document_issue.yml
  4. 31 31
      .github/ISSUE_TEMPLATE/feature_request.yml
  5. 16 16
      .github/ISSUE_TEMPLATE/help_wanted.yml
  6. 48 48
      .github/ISSUE_TEMPLATE/translation_issue.yml
  7. 1 1
      .github/linters/.hadolint.yaml
  8. 2 6
      .github/linters/.yaml-lint.yml
  9. 12 12
      .github/workflows/api-model-runtime-tests.yml
  10. 41 41
      .github/workflows/build-api-image.yml
  11. 41 41
      .github/workflows/build-web-image.yml
  12. 11 11
      .github/workflows/stale.yml
  13. 34 34
      .github/workflows/style.yml
  14. 1 1
      api/core/model_runtime/model_providers/_position.yaml
  15. 18 18
      api/core/model_runtime/model_providers/anthropic/anthropic.yaml
  16. 21 21
      api/core/model_runtime/model_providers/anthropic/llm/claude-2.1.yaml
  17. 21 21
      api/core/model_runtime/model_providers/anthropic/llm/claude-2.yaml
  18. 21 21
      api/core/model_runtime/model_providers/anthropic/llm/claude-instant-1.yaml
  19. 79 79
      api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml
  20. 20 20
      api/core/model_runtime/model_providers/baichuan/baichuan.yaml
  21. 33 33
      api/core/model_runtime/model_providers/baichuan/llm/baichuan2-53b.yaml
  22. 33 33
      api/core/model_runtime/model_providers/baichuan/llm/baichuan2-turbo-192k.yaml
  23. 33 33
      api/core/model_runtime/model_providers/baichuan/llm/baichuan2-turbo.yaml
  24. 1 1
      api/core/model_runtime/model_providers/baichuan/text_embedding/baichuan-text-embedding.yaml
  25. 10 10
      api/core/model_runtime/model_providers/chatglm/chatglm.yaml
  26. 12 12
      api/core/model_runtime/model_providers/chatglm/llm/chatglm2-6b-32k.yaml
  27. 12 12
      api/core/model_runtime/model_providers/chatglm/llm/chatglm2-6b.yaml
  28. 13 13
      api/core/model_runtime/model_providers/chatglm/llm/chatglm3-6b-32k.yaml
  29. 13 13
      api/core/model_runtime/model_providers/chatglm/llm/chatglm3-6b.yaml
  30. 12 12
      api/core/model_runtime/model_providers/cohere/cohere.yaml
  31. 1 1
      api/core/model_runtime/model_providers/cohere/rerank/rerank-multilingual-v2.0.yaml
  32. 10 11
      api/core/model_runtime/model_providers/google/google.yaml
  33. 21 21
      api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml
  34. 21 21
      api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml
  35. 74 74
      api/core/model_runtime/model_providers/huggingface_hub/huggingface_hub.yaml
  36. 11 11
      api/core/model_runtime/model_providers/jina/jina.yaml
  37. 1 1
      api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-base-en.yaml
  38. 1 1
      api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-small-en.yaml
  39. 33 33
      api/core/model_runtime/model_providers/localai/localai.yaml
  40. 16 16
      api/core/model_runtime/model_providers/minimax/llm/abab5-chat.yaml
  41. 23 23
      api/core/model_runtime/model_providers/minimax/llm/abab5.5-chat.yaml
  42. 19 19
      api/core/model_runtime/model_providers/minimax/minimax.yaml
  43. 1 1
      api/core/model_runtime/model_providers/minimax/text_embedding/embo-01.yaml
  44. 1 1
      api/core/model_runtime/model_providers/openai/llm/_position.yaml
  45. 16 16
      api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0613.yaml
  46. 16 16
      api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-1106.yaml
  47. 16 16
      api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k-0613.yaml
  48. 16 16
      api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k.yaml
  49. 15 15
      api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-instruct.yaml
  50. 16 16
      api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml
  51. 44 44
      api/core/model_runtime/model_providers/openai/llm/gpt-4-1106-preview.yaml
  52. 44 44
      api/core/model_runtime/model_providers/openai/llm/gpt-4-32k.yaml
  53. 43 43
      api/core/model_runtime/model_providers/openai/llm/gpt-4-vision-preview.yaml
  54. 44 44
      api/core/model_runtime/model_providers/openai/llm/gpt-4.yaml
  55. 15 15
      api/core/model_runtime/model_providers/openai/llm/text-davinci-003.yaml
  56. 1 1
      api/core/model_runtime/model_providers/openai/moderation/text-moderation-stable.yaml
  57. 56 56
      api/core/model_runtime/model_providers/openai/openai.yaml
  58. 1 1
      api/core/model_runtime/model_providers/openai/speech2text/whisper-1.yaml
  59. 1 1
      api/core/model_runtime/model_providers/openai/text_embedding/text-embedidng-ada-002.yaml
  60. 61 61
      api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml
  61. 12 12
      api/core/model_runtime/model_providers/openllm/openllm.yaml
  62. 19 19
      api/core/model_runtime/model_providers/replicate/replicate.yaml
  63. 26 26
      api/core/model_runtime/model_providers/spark/llm/spark-1.5.yaml
  64. 26 26
      api/core/model_runtime/model_providers/spark/llm/spark-2.yaml
  65. 26 26
      api/core/model_runtime/model_providers/spark/llm/spark-3.yaml
  66. 26 26
      api/core/model_runtime/model_providers/spark/spark.yaml
  67. 53 53
      api/core/model_runtime/model_providers/togetherai/togetherai.yaml
  68. 49 49
      api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml
  69. 50 50
      api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml
  70. 10 10
      api/core/model_runtime/model_providers/tongyi/tongyi.yaml
  71. 27 27
      api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-4.yaml
  72. 27 27
      api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-8k.yaml
  73. 18 18
      api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-turbo.yaml
  74. 27 27
      api/core/model_runtime/model_providers/wenxin/llm/ernie-bot.yaml
  75. 18 18
      api/core/model_runtime/model_providers/wenxin/wenxin.yaml
  76. 22 22
      api/core/model_runtime/model_providers/xinference/xinference.yaml
  77. 15 15
      api/core/model_runtime/model_providers/zhipuai/llm/chatglm_lite.yaml
  78. 15 15
      api/core/model_runtime/model_providers/zhipuai/llm/chatglm_lite_32k.yaml
  79. 15 15
      api/core/model_runtime/model_providers/zhipuai/llm/chatglm_pro.yaml
  80. 15 15
      api/core/model_runtime/model_providers/zhipuai/llm/chatglm_std.yaml
  81. 35 35
      api/core/model_runtime/model_providers/zhipuai/llm/chatglm_turbo.yaml
  82. 1 1
      api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.yaml
  83. 11 11
      api/core/model_runtime/model_providers/zhipuai/zhipuai.yaml
  84. 1 1
      docker/docker-compose.yaml

+ 47 - 47
.github/ISSUE_TEMPLATE/bug_report.yml

@@ -1,56 +1,56 @@
 name: "🕷️ Bug report"
 description: Report errors or unexpected behavior
 labels:
-- bug
+  - bug
 body:
-- type: checkboxes
-  attributes:
-    label: Self Checks
-    description: "To make sure we get to you in time, please check the following :)"
-    options:
-      - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
-        required: true
-      - label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
-        required: true
+  - type: checkboxes
+    attributes:
+      label: Self Checks
+      description: "To make sure we get to you in time, please check the following :)"
+      options:
+        - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
+          required: true
+        - label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
+          required: true
 
-- type: input
-  attributes:
-    label: Dify version
-    placeholder: 0.3.21
-    description: See about section in Dify console
-  validations:
-    required: true
+  - type: input
+    attributes:
+      label: Dify version
+      placeholder: 0.3.21
+      description: See about section in Dify console
+    validations:
+      required: true
 
-- type: dropdown
-  attributes:
-    label: Cloud or Self Hosted
-    description: How / Where was Dify installed from?
-    multiple: true
-    options:
-      - Cloud
-      - Self Hosted (Docker)
-      - Self Hosted (Source)
-  validations:
-    required: true
+  - type: dropdown
+    attributes:
+      label: Cloud or Self Hosted
+      description: How / Where was Dify installed from?
+      multiple: true
+      options:
+        - Cloud
+        - Self Hosted (Docker)
+        - Self Hosted (Source)
+    validations:
+      required: true
 
-- type: textarea
-  attributes:
-    label: Steps to reproduce
-    description: We highly suggest including screenshots and a bug report log.
-    placeholder: Having detailed steps helps us reproduce the bug. 
-  validations:
-    required: true
+  - type: textarea
+    attributes:
+      label: Steps to reproduce
+      description: We highly suggest including screenshots and a bug report log.
+      placeholder: Having detailed steps helps us reproduce the bug.
+    validations:
+      required: true
 
-- type: textarea
-  attributes:
-    label: ✔️ Expected Behavior
-    placeholder: What were you expecting?
-  validations:
-    required: false
+  - type: textarea
+    attributes:
+      label: ✔️ Expected Behavior
+      placeholder: What were you expecting?
+    validations:
+      required: false
 
-- type: textarea
-  attributes:
-    label: ❌ Actual Behavior
-    placeholder: What happened instead?
-  validations:
-    required: false
+  - type: textarea
+    attributes:
+      label: ❌ Actual Behavior
+      placeholder: What happened instead?
+    validations:
+      required: false

+ 1 - 1
.github/ISSUE_TEMPLATE/config.yml

@@ -5,4 +5,4 @@ contact_links:
     about: Documentation for users of Dify
   - name: "\U0001F4DA Dify dev documentation"
     url: https://docs.dify.ai/getting-started/install-self-hosted
-    about: Documentation for people interested in developing and contributing for Dify
+    about: Documentation for people interested in developing and contributing for Dify

+ 17 - 17
.github/ISSUE_TEMPLATE/document_issue.yml

@@ -1,20 +1,20 @@
 name: "📚 Documentation Issue"
 description: Report issues in our documentation
-labels: 
-- ducumentation
+labels:
+  - ducumentation
 body:
-- type: checkboxes
-  attributes:
-    label: Self Checks
-    description: "To make sure we get to you in time, please check the following :)"
-    options:
-      - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
-        required: true
-      - label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
-        required: true
-- type: textarea
-  attributes: 
-    label: Provide a description of requested docs changes
-    placeholder: Briefly describe which document needs to be corrected and why.
-  validations:
-    required: true
+  - type: checkboxes
+    attributes:
+      label: Self Checks
+      description: "To make sure we get to you in time, please check the following :)"
+      options:
+        - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
+          required: true
+        - label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
+          required: true
+  - type: textarea
+    attributes:
+      label: Provide a description of requested docs changes
+      placeholder: Briefly describe which document needs to be corrected and why.
+    validations:
+      required: true

+ 31 - 31
.github/ISSUE_TEMPLATE/feature_request.yml

@@ -1,35 +1,35 @@
 name: "⭐ Feature or enhancement request"
 description: Propose something new.
 labels:
-- enhancement
+  - enhancement
 body:
-- type: checkboxes
-  attributes:
-    label: Self Checks
-    description: "To make sure we get to you in time, please check the following :)"
-    options:
-      - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
-        required: true
-      - label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
-        required: true
-- type: textarea
-  attributes: 
-    label: Description of the new feature / enhancement
-    placeholder: What is the expected behavior of the proposed feature?
-  validations:
-    required: true
-- type: textarea
-  attributes:
-    label: Scenario when this would be used?
-    placeholder: What is the scenario this would be used? Why is this important to your workflow as a dify user?
-  validations:
-    required: true
-- type: textarea
-  attributes:
-    label: Supporting information
-    placeholder: "Having additional evidence, data, tweets, blog posts, research, ... anything is extremely helpful. This information provides context to the scenario that may otherwise be lost."
-  validations:
-    required: false
-- type: markdown
-  attributes:
-    value: Please limit one request per issue.
+  - type: checkboxes
+    attributes:
+      label: Self Checks
+      description: "To make sure we get to you in time, please check the following :)"
+      options:
+        - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
+          required: true
+        - label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
+          required: true
+  - type: textarea
+    attributes:
+      label: Description of the new feature / enhancement
+      placeholder: What is the expected behavior of the proposed feature?
+    validations:
+      required: true
+  - type: textarea
+    attributes:
+      label: Scenario when this would be used?
+      placeholder: What is the scenario this would be used? Why is this important to your workflow as a dify user?
+    validations:
+      required: true
+  - type: textarea
+    attributes:
+      label: Supporting information
+      placeholder: "Having additional evidence, data, tweets, blog posts, research, ... anything is extremely helpful. This information provides context to the scenario that may otherwise be lost."
+    validations:
+      required: false
+  - type: markdown
+    attributes:
+      value: Please limit one request per issue.

+ 16 - 16
.github/ISSUE_TEMPLATE/help_wanted.yml

@@ -1,20 +1,20 @@
 name: "🤝 Help Wanted"
 description: "Request help from the community [please use English :)]"
 labels:
-- help-wanted
+  - help-wanted
 body:
-- type: checkboxes
-  attributes:
-    label: Self Checks
-    description: "To make sure we get to you in time, please check the following :)"
-    options:
-      - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
-        required: true
-      - label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
-        required: true
-- type: textarea
-  attributes:
-    label: Provide a description of the help you need
-    placeholder: Briefly describe what you need help with.
-  validations:
-    required: true
+  - type: checkboxes
+    attributes:
+      label: Self Checks
+      description: "To make sure we get to you in time, please check the following :)"
+      options:
+        - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
+          required: true
+        - label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
+          required: true
+  - type: textarea
+    attributes:
+      label: Provide a description of the help you need
+      placeholder: Briefly describe what you need help with.
+    validations:
+      required: true

+ 48 - 48
.github/ISSUE_TEMPLATE/translation_issue.yml

@@ -1,52 +1,52 @@
 name: "🌐 Localization/Translation issue"
 description: Report incorrect translations. [please use English :)]
 labels:
-- translation
+  - translation
 body:
-- type: checkboxes
-  attributes:
-    label: Self Checks
-    description: "To make sure we get to you in time, please check the following :)"
-    options:
-      - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
-        required: true
-      - label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
-        required: true
-- type: input
-  attributes:
-    label: Dify version
-    placeholder: 0.3.21
-    description: Hover over system tray icon or look at Settings
-  validations:
-    required: true
-- type: input
-  attributes:
-    label: Utility with translation issue
-    placeholder: Some area
-    description: Please input here the utility with the translation issue
-  validations:
-    required: true
-- type: input
-  attributes:
-    label: 🌐 Language affected
-    placeholder: "German"
-  validations:
-    required: true
-- type: textarea
-  attributes: 
-    label: ❌ Actual phrase(s)
-    placeholder: What is there? Please include a screenshot as that is extremely helpful.
-  validations:
-    required: true
-- type: textarea
-  attributes: 
-    label: ✔️ Expected phrase(s)
-    placeholder: What was expected?
-  validations:
-    required: true
-- type: textarea
-  attributes:
-    label: ℹ Why is the current translation wrong
-    placeholder: Why do you feel this is incorrect?
-  validations:
-    required: true
+  - type: checkboxes
+    attributes:
+      label: Self Checks
+      description: "To make sure we get to you in time, please check the following :)"
+      options:
+        - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
+          required: true
+        - label: I confirm that I am using English to file this report (我已阅读并同意 [Language Policy](https://github.com/langgenius/dify/issues/1542)).
+          required: true
+  - type: input
+    attributes:
+      label: Dify version
+      placeholder: 0.3.21
+      description: Hover over system tray icon or look at Settings
+    validations:
+      required: true
+  - type: input
+    attributes:
+      label: Utility with translation issue
+      placeholder: Some area
+      description: Please input here the utility with the translation issue
+    validations:
+      required: true
+  - type: input
+    attributes:
+      label: 🌐 Language affected
+      placeholder: "German"
+    validations:
+      required: true
+  - type: textarea
+    attributes:
+      label: ❌ Actual phrase(s)
+      placeholder: What is there? Please include a screenshot as that is extremely helpful.
+    validations:
+      required: true
+  - type: textarea
+    attributes:
+      label: ✔️ Expected phrase(s)
+      placeholder: What was expected?
+    validations:
+      required: true
+  - type: textarea
+    attributes:
+      label: ℹ Why is the current translation wrong
+      placeholder: Why do you feel this is incorrect?
+    validations:
+      required: true

+ 1 - 1
.github/linters/.hadolint.yaml

@@ -1 +1 @@
-failure-threshold: "error"
+failure-threshold: "error"

+ 2 - 6
.github/linters/.yaml-lint.yml

@@ -5,11 +5,7 @@ extends: default
 rules:
   brackets:
     max-spaces-inside: 1
+  comments-indentation: disable
   document-start: disable
-  indentation:
-    level: warning
   line-length: disable
-  new-line-at-end-of-file:
-    level: warning
-  trailing-spaces:
-    level: warning
+  truthy: disable

+ 12 - 12
.github/workflows/api-model-runtime-tests.yml

@@ -32,18 +32,18 @@ jobs:
       MOCK_SWITCH: true
 
     steps:
-    - name: Checkout code
-      uses: actions/checkout@v4
+      - name: Checkout code
+        uses: actions/checkout@v4
 
-    - name: Set up Python
-      uses: actions/setup-python@v5
-      with:
-        python-version: '3.10'
-        cache: 'pip'
-        cache-dependency-path: ./api/requirements.txt
+      - name: Set up Python
+        uses: actions/setup-python@v5
+        with:
+          python-version: '3.10'
+          cache: 'pip'
+          cache-dependency-path: ./api/requirements.txt
 
-    - name: Install dependencies
-      run: pip install -r ./api/requirements.txt
+      - name: Install dependencies
+        run: pip install -r ./api/requirements.txt
 
-    - name: Run pytest
-      run: pytest api/tests/integration_tests/model_runtime/anthropic api/tests/integration_tests/model_runtime/azure_openai api/tests/integration_tests/model_runtime/openai api/tests/integration_tests/model_runtime/chatglm api/tests/integration_tests/model_runtime/google api/tests/integration_tests/model_runtime/xinference api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py
+      - name: Run pytest
+        run: pytest api/tests/integration_tests/model_runtime/anthropic api/tests/integration_tests/model_runtime/azure_openai api/tests/integration_tests/model_runtime/openai api/tests/integration_tests/model_runtime/chatglm api/tests/integration_tests/model_runtime/google api/tests/integration_tests/model_runtime/xinference api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py

+ 41 - 41
.github/workflows/build-api-image.yml

@@ -6,55 +6,55 @@ on:
       - 'main'
       - 'deploy/dev'
   release:
-    types: [published]
+    types: [ published ]
 
 jobs:
   build-and-push:
     runs-on: ubuntu-latest
     if: github.event.pull_request.draft == false
     steps:
-    - name: Set up QEMU
-      uses: docker/setup-qemu-action@v3
+      - name: Set up QEMU
+        uses: docker/setup-qemu-action@v3
 
-    - name: Set up Docker Buildx
-      uses: docker/setup-buildx-action@v3
+      - name: Set up Docker Buildx
+        uses: docker/setup-buildx-action@v3
 
-    - name: Login to Docker Hub
-      uses: docker/login-action@v2
-      with:
-        username: ${{ secrets.DOCKERHUB_USER }}
-        password: ${{ secrets.DOCKERHUB_TOKEN }}
+      - name: Login to Docker Hub
+        uses: docker/login-action@v2
+        with:
+          username: ${{ secrets.DOCKERHUB_USER }}
+          password: ${{ secrets.DOCKERHUB_TOKEN }}
 
-    - name: Extract metadata (tags, labels) for Docker
-      id: meta
-      uses: docker/metadata-action@v5
-      with:
-        images: langgenius/dify-api
-        tags: |
-          type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
-          type=ref,event=branch
-          type=sha,enable=true,priority=100,prefix=,suffix=,format=long
-          type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}
+      - name: Extract metadata (tags, labels) for Docker
+        id: meta
+        uses: docker/metadata-action@v5
+        with:
+          images: langgenius/dify-api
+          tags: |
+            type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
+            type=ref,event=branch
+            type=sha,enable=true,priority=100,prefix=,suffix=,format=long
+            type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}
 
-    - name: Build and push
-      uses: docker/build-push-action@v5
-      with:
-        context: "{{defaultContext}}:api"
-        platforms: ${{ startsWith(github.ref, 'refs/tags/') && 'linux/amd64,linux/arm64' || 'linux/amd64' }}
-        build-args: |
-          COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
-        push: true
-        tags: ${{ steps.meta.outputs.tags }}
-        labels: ${{ steps.meta.outputs.labels }}
-        cache-from: type=gha
-        cache-to: type=gha,mode=max
+      - name: Build and push
+        uses: docker/build-push-action@v5
+        with:
+          context: "{{defaultContext}}:api"
+          platforms: ${{ startsWith(github.ref, 'refs/tags/') && 'linux/amd64,linux/arm64' || 'linux/amd64' }}
+          build-args: |
+            COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
+          push: true
+          tags: ${{ steps.meta.outputs.tags }}
+          labels: ${{ steps.meta.outputs.labels }}
+          cache-from: type=gha
+          cache-to: type=gha,mode=max
 
-    - name: Deploy to server
-      if: github.ref == 'refs/heads/deploy/dev'
-      uses: appleboy/ssh-action@v0.1.8
-      with:
-        host: ${{ secrets.SSH_HOST }}
-        username: ${{ secrets.SSH_USER }}
-        key: ${{ secrets.SSH_PRIVATE_KEY }}
-        script: |
-          ${{ secrets.SSH_SCRIPT }}
+      - name: Deploy to server
+        if: github.ref == 'refs/heads/deploy/dev'
+        uses: appleboy/ssh-action@v0.1.8
+        with:
+          host: ${{ secrets.SSH_HOST }}
+          username: ${{ secrets.SSH_USER }}
+          key: ${{ secrets.SSH_PRIVATE_KEY }}
+          script: |
+            ${{ secrets.SSH_SCRIPT }}

+ 41 - 41
.github/workflows/build-web-image.yml

@@ -6,55 +6,55 @@ on:
       - 'main'
       - 'deploy/dev'
   release:
-    types: [published]
+    types: [ published ]
 
 jobs:
   build-and-push:
     runs-on: ubuntu-latest
     if: github.event.pull_request.draft == false
     steps:
-    - name: Set up QEMU
-      uses: docker/setup-qemu-action@v3
+      - name: Set up QEMU
+        uses: docker/setup-qemu-action@v3
 
-    - name: Set up Docker Buildx
-      uses: docker/setup-buildx-action@v3
+      - name: Set up Docker Buildx
+        uses: docker/setup-buildx-action@v3
 
-    - name: Login to Docker Hub
-      uses: docker/login-action@v2
-      with:
-        username: ${{ secrets.DOCKERHUB_USER }}
-        password: ${{ secrets.DOCKERHUB_TOKEN }}
+      - name: Login to Docker Hub
+        uses: docker/login-action@v2
+        with:
+          username: ${{ secrets.DOCKERHUB_USER }}
+          password: ${{ secrets.DOCKERHUB_TOKEN }}
 
-    - name: Extract metadata (tags, labels) for Docker
-      id: meta
-      uses: docker/metadata-action@v5
-      with:
-        images: langgenius/dify-web
-        tags: |
-          type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
-          type=ref,event=branch
-          type=sha,enable=true,priority=100,prefix=,suffix=,format=long
-          type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}
+      - name: Extract metadata (tags, labels) for Docker
+        id: meta
+        uses: docker/metadata-action@v5
+        with:
+          images: langgenius/dify-web
+          tags: |
+            type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
+            type=ref,event=branch
+            type=sha,enable=true,priority=100,prefix=,suffix=,format=long
+            type=raw,value=${{ github.ref_name }},enable=${{ startsWith(github.ref, 'refs/tags/') }}
 
-    - name: Build and push
-      uses: docker/build-push-action@v5
-      with:
-        context: "{{defaultContext}}:web"
-        platforms: ${{ startsWith(github.ref, 'refs/tags/') && 'linux/amd64,linux/arm64' || 'linux/amd64' }}
-        build-args: |
-          COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
-        push: true
-        tags: ${{ steps.meta.outputs.tags }}
-        labels: ${{ steps.meta.outputs.labels }}
-        cache-from: type=gha
-        cache-to: type=gha,mode=max
+      - name: Build and push
+        uses: docker/build-push-action@v5
+        with:
+          context: "{{defaultContext}}:web"
+          platforms: ${{ startsWith(github.ref, 'refs/tags/') && 'linux/amd64,linux/arm64' || 'linux/amd64' }}
+          build-args: |
+            COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
+          push: true
+          tags: ${{ steps.meta.outputs.tags }}
+          labels: ${{ steps.meta.outputs.labels }}
+          cache-from: type=gha
+          cache-to: type=gha,mode=max
 
-    - name: Deploy to server
-      if: github.ref == 'refs/heads/deploy/dev'
-      uses: appleboy/ssh-action@v0.1.8
-      with:
-        host: ${{ secrets.SSH_HOST }}
-        username: ${{ secrets.SSH_USER }}
-        key: ${{ secrets.SSH_PRIVATE_KEY }}
-        script: |
-          ${{ secrets.SSH_SCRIPT }}
+      - name: Deploy to server
+        if: github.ref == 'refs/heads/deploy/dev'
+        uses: appleboy/ssh-action@v0.1.8
+        with:
+          host: ${{ secrets.SSH_HOST }}
+          username: ${{ secrets.SSH_USER }}
+          key: ${{ secrets.SSH_PRIVATE_KEY }}
+          script: |
+            ${{ secrets.SSH_SCRIPT }}

+ 11 - 11
.github/workflows/stale.yml

@@ -7,7 +7,7 @@ name: Mark stale issues and pull requests
 
 on:
   schedule:
-  - cron: '0 3 * * *'
+    - cron: '0 3 * * *'
 
 jobs:
   stale:
@@ -18,13 +18,13 @@ jobs:
       pull-requests: write
 
     steps:
-    - uses: actions/stale@v5
-      with:
-        days-before-issue-stale: 15
-        days-before-issue-close: 3
-        repo-token: ${{ secrets.GITHUB_TOKEN }}
-        stale-issue-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
-        stale-pr-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
-        stale-issue-label: 'no-issue-activity'
-        stale-pr-label: 'no-pr-activity'
-        any-of-labels: 'duplicate,question,invalid,wontfix,no-issue-activity,no-pr-activity,enhancement,cant-reproduce,help-wanted'
+      - uses: actions/stale@v5
+        with:
+          days-before-issue-stale: 15
+          days-before-issue-close: 3
+          repo-token: ${{ secrets.GITHUB_TOKEN }}
+          stale-issue-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
+          stale-pr-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
+          stale-issue-label: 'no-issue-activity'
+          stale-pr-label: 'no-pr-activity'
+          any-of-labels: 'duplicate,question,invalid,wontfix,no-issue-activity,no-pr-activity,enhancement,cant-reproduce,help-wanted'

+ 34 - 34
.github/workflows/style.yml

@@ -18,37 +18,37 @@ jobs:
     runs-on: ubuntu-latest
 
     steps:
-    - name: Checkout code
-      uses: actions/checkout@v4
-
-    - name: Setup NodeJS
-      uses: actions/setup-node@v4
-      with:
-        node-version: 18
-        cache: yarn
-        cache-dependency-path: ./web/package.json
-
-    - name: Web dependencies
-      run: |
-        cd ./web
-        yarn install --frozen-lockfile
-
-    - name: Web style check
-      run: |
-        cd ./web
-        yarn run lint
-
-    - name: Super-linter
-      uses: super-linter/super-linter/slim@v5
-      env:
-        BASH_SEVERITY: warning
-        DEFAULT_BRANCH: main
-        ERROR_ON_MISSING_EXEC_BIT: true
-        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-        IGNORE_GENERATED_FILES: true
-        IGNORE_GITIGNORED_FILES: true
-        VALIDATE_BASH: true
-        VALIDATE_BASH_EXEC: true
-        VALIDATE_GITHUB_ACTIONS: true
-        VALIDATE_DOCKERFILE_HADOLINT: true
-        VALIDATE_YAML: true
+      - name: Checkout code
+        uses: actions/checkout@v4
+
+      - name: Setup NodeJS
+        uses: actions/setup-node@v4
+        with:
+          node-version: 18
+          cache: yarn
+          cache-dependency-path: ./web/package.json
+
+      - name: Web dependencies
+        run: |
+          cd ./web
+          yarn install --frozen-lockfile
+
+      - name: Web style check
+        run: |
+          cd ./web
+          yarn run lint
+
+      - name: Super-linter
+        uses: super-linter/super-linter/slim@v5
+        env:
+          BASH_SEVERITY: warning
+          DEFAULT_BRANCH: main
+          ERROR_ON_MISSING_EXEC_BIT: true
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+          IGNORE_GENERATED_FILES: true
+          IGNORE_GITIGNORED_FILES: true
+          VALIDATE_BASH: true
+          VALIDATE_BASH_EXEC: true
+          VALIDATE_GITHUB_ACTIONS: true
+          VALIDATE_DOCKERFILE_HADOLINT: true
+          VALIDATE_YAML: true

+ 1 - 1
api/core/model_runtime/model_providers/_position.yaml

@@ -17,4 +17,4 @@
 - xinference
 - openllm
 - localai
-- openai_api_compatible
+- openai_api_compatible

+ 18 - 18
api/core/model_runtime/model_providers/anthropic/anthropic.yaml

@@ -16,24 +16,24 @@ help:
   url:
     en_US: https://console.anthropic.com/account/keys
 supported_model_types:
-- llm
+  - llm
 configurate_methods:
-- predefined-model
+  - predefined-model
 provider_credential_schema:
   credential_form_schemas:
-  - variable: anthropic_api_key
-    label:
-      en_US: API Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 API Key
-      en_US: Enter your API Key
-  - variable: anthropic_api_url
-    label:
-      en_US: API URL
-    type: text-input
-    required: false
-    placeholder:
-      zh_Hans: 在此输入您的 API URL
-      en_US: Enter your API URL
+    - variable: anthropic_api_key
+      label:
+        en_US: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API Key
+    - variable: anthropic_api_url
+      label:
+        en_US: API URL
+      type: text-input
+      required: false
+      placeholder:
+        zh_Hans: 在此输入您的 API URL
+        en_US: Enter your API URL

+ 21 - 21
api/core/model_runtime/model_providers/anthropic/llm/claude-2.1.yaml

@@ -3,32 +3,32 @@ label:
   en_US: claude-2.1
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 200000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: top_k
-  label:
-    zh_Hans: 取样数量
-    en_US: Top k
-  type: int
-  help:
-    zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
-    en_US: Only sample from the top K options for each subsequent token.
-  required: false
-- name: max_tokens_to_sample
-  use_template: max_tokens
-  required: true
-  default: 4096
-  min: 1
-  max: 4096
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: top_k
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    type: int
+    help:
+      zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+      en_US: Only sample from the top K options for each subsequent token.
+    required: false
+  - name: max_tokens_to_sample
+    use_template: max_tokens
+    required: true
+    default: 4096
+    min: 1
+    max: 4096
 pricing:
   input: '8.00'
   output: '24.00'
   unit: '0.000001'
-  currency: USD
+  currency: USD

+ 21 - 21
api/core/model_runtime/model_providers/anthropic/llm/claude-2.yaml

@@ -3,32 +3,32 @@ label:
   en_US: claude-2
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 100000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: top_k
-  label:
-    zh_Hans: 取样数量
-    en_US: Top k
-  type: int
-  help:
-    zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
-    en_US: Only sample from the top K options for each subsequent token.
-  required: false
-- name: max_tokens_to_sample
-  use_template: max_tokens
-  required: true
-  default: 4096
-  min: 1
-  max: 4096
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: top_k
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    type: int
+    help:
+      zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+      en_US: Only sample from the top K options for each subsequent token.
+    required: false
+  - name: max_tokens_to_sample
+    use_template: max_tokens
+    required: true
+    default: 4096
+    min: 1
+    max: 4096
 pricing:
   input: '8.00'
   output: '24.00'
   unit: '0.000001'
-  currency: USD
+  currency: USD

+ 21 - 21
api/core/model_runtime/model_providers/anthropic/llm/claude-instant-1.yaml

@@ -2,32 +2,32 @@ model: claude-instant-1
 label:
   en_US: claude-instant-1
 model_type: llm
-features: []
+features: [ ]
 model_properties:
   mode: chat
   context_size: 100000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: top_k
-  label:
-    zh_Hans: 取样数量
-    en_US: Top k
-  type: int
-  help:
-    zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
-    en_US: Only sample from the top K options for each subsequent token.
-  required: false
-- name: max_tokens_to_sample
-  use_template: max_tokens
-  required: true
-  default: 4096
-  min: 1
-  max: 4096
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: top_k
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    type: int
+    help:
+      zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+      en_US: Only sample from the top K options for each subsequent token.
+    required: false
+  - name: max_tokens_to_sample
+    use_template: max_tokens
+    required: true
+    default: 4096
+    min: 1
+    max: 4096
 pricing:
   input: '1.63'
   output: '5.51'
   unit: '0.000001'
-  currency: USD
+  currency: USD

+ 79 - 79
api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml

@@ -13,10 +13,10 @@ help:
   url:
     en_US: https://azure.microsoft.com/en-us/products/ai-services/openai-service
 supported_model_types:
-- llm
-- text-embedding
+  - llm
+  - text-embedding
 configurate_methods:
-- customizable-model
+  - customizable-model
 model_credential_schema:
   model:
     label:
@@ -26,79 +26,79 @@ model_credential_schema:
       en_US: Enter your Deployment Name here, matching the Azure deployment name.
       zh_Hans: 在此输入您的部署名称,与 Azure 部署名称匹配。
   credential_form_schemas:
-  - variable: openai_api_base
-    label:
-      en_US: API Endpoint URL
-      zh_Hans: API 域名
-    type: text-input
-    required: true
-    placeholder:
-      zh_Hans: '在此输入您的 API 域名,如:https://example.com/xxx'
-      en_US: 'Enter your API Endpoint, eg: https://example.com/xxx'
-  - variable: openai_api_key
-    label:
-      en_US: API Key
-      zh_Hans: API Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 API Key
-      en_US: Enter your API key here
-  - variable: base_model_name
-    label:
-      en_US: Base Model
-      zh_Hans: 基础模型
-    type: select
-    required: true
-    options:
-    - label:
-        en_US: gpt-35-turbo
-      value: gpt-35-turbo
-      show_on:
-      - variable: __model_type
-        value: llm
-    - label:
-        en_US: gpt-35-turbo-16k
-      value: gpt-35-turbo-16k
-      show_on:
-      - variable: __model_type
-        value: llm
-    - label:
-        en_US: gpt-4
-      value: gpt-4
-      show_on:
-      - variable: __model_type
-        value: llm
-    - label:
-        en_US: gpt-4-32k
-      value: gpt-4-32k
-      show_on:
-      - variable: __model_type
-        value: llm
-    - label:
-        en_US: gpt-4-1106-preview
-      value: gpt-4-1106-preview
-      show_on:
-      - variable: __model_type
-        value: llm
-    - label:
-        en_US: gpt-4-vision-preview
-      value: gpt-4-vision-preview
-      show_on:
-      - variable: __model_type
-        value: llm
-    - label:
-        en_US: gpt-35-turbo-instruct
-      value: gpt-35-turbo-instruct
-      show_on:
-      - variable: __model_type
-        value: llm
-    - label:
-        en_US: text-embedding-ada-002
-      value: text-embedding-ada-002
-      show_on:
-        - variable: __model_type
-          value: text-embedding
-    placeholder:
-      zh_Hans: 在此输入您的模型版本
-      en_US: Enter your model version
+    - variable: openai_api_base
+      label:
+        en_US: API Endpoint URL
+        zh_Hans: API 域名
+      type: text-input
+      required: true
+      placeholder:
+        zh_Hans: '在此输入您的 API 域名,如:https://example.com/xxx'
+        en_US: 'Enter your API Endpoint, eg: https://example.com/xxx'
+    - variable: openai_api_key
+      label:
+        en_US: API Key
+        zh_Hans: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API key here
+    - variable: base_model_name
+      label:
+        en_US: Base Model
+        zh_Hans: 基础模型
+      type: select
+      required: true
+      options:
+        - label:
+            en_US: gpt-35-turbo
+          value: gpt-35-turbo
+          show_on:
+            - variable: __model_type
+              value: llm
+        - label:
+            en_US: gpt-35-turbo-16k
+          value: gpt-35-turbo-16k
+          show_on:
+            - variable: __model_type
+              value: llm
+        - label:
+            en_US: gpt-4
+          value: gpt-4
+          show_on:
+            - variable: __model_type
+              value: llm
+        - label:
+            en_US: gpt-4-32k
+          value: gpt-4-32k
+          show_on:
+            - variable: __model_type
+              value: llm
+        - label:
+            en_US: gpt-4-1106-preview
+          value: gpt-4-1106-preview
+          show_on:
+            - variable: __model_type
+              value: llm
+        - label:
+            en_US: gpt-4-vision-preview
+          value: gpt-4-vision-preview
+          show_on:
+            - variable: __model_type
+              value: llm
+        - label:
+            en_US: gpt-35-turbo-instruct
+          value: gpt-35-turbo-instruct
+          show_on:
+            - variable: __model_type
+              value: llm
+        - label:
+            en_US: text-embedding-ada-002
+          value: text-embedding-ada-002
+          show_on:
+            - variable: __model_type
+              value: text-embedding
+      placeholder:
+        zh_Hans: 在此输入您的模型版本
+        en_US: Enter your model version

+ 20 - 20
api/core/model_runtime/model_providers/baichuan/baichuan.yaml

@@ -8,30 +8,30 @@ icon_large:
 background: "#FFF6F2"
 help:
   title:
-    en_US: Get your API Key from BAICHUAN AI 
+    en_US: Get your API Key from BAICHUAN AI
     zh_Hans: 从百川智能获取您的 API Key
   url:
     en_US: https://www.baichuan-ai.com
 supported_model_types:
-- llm
-- text-embedding
+  - llm
+  - text-embedding
 configurate_methods:
-- predefined-model
+  - predefined-model
 provider_credential_schema:
   credential_form_schemas:
-  - variable: api_key
-    label:
-      en_US: API Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 API Key
-      en_US: Enter your API Key
-  - variable: secret_key
-    label:
-      en_US: Secret Key
-    type: secret-input
-    required: false
-    placeholder:
-      zh_Hans: 在此输入您的 Secret Key
-      en_US: Enter your Secret Key
+    - variable: api_key
+      label:
+        en_US: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API Key
+    - variable: secret_key
+      label:
+        en_US: Secret Key
+      type: secret-input
+      required: false
+      placeholder:
+        zh_Hans: 在此输入您的 Secret Key
+        en_US: Enter your Secret Key

+ 33 - 33
api/core/model_runtime/model_providers/baichuan/llm/baichuan2-53b.yaml

@@ -3,40 +3,40 @@ label:
   en_US: Baichuan2-53B
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 4000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: top_k
-  label:
-    zh_Hans: 取样数量
-    en_US: Top k
-  type: int
-  help:
-    zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
-    en_US: Only sample from the top K options for each subsequent token.
-  required: false
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 1000
-  min: 1
-  max: 4000
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: with_search_enhance
-  label:
-    zh_Hans: 搜索增强
-    en_US: Search Enhance
-  type: boolean
-  help:
-    zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。
-    en_US: Allow the model to perform external search to enhance the generation results.
-  required: false
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: top_k
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    type: int
+    help:
+      zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+      en_US: Only sample from the top K options for each subsequent token.
+    required: false
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 1000
+    min: 1
+    max: 4000
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: with_search_enhance
+    label:
+      zh_Hans: 搜索增强
+      en_US: Search Enhance
+    type: boolean
+    help:
+      zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。
+      en_US: Allow the model to perform external search to enhance the generation results.
+    required: false

+ 33 - 33
api/core/model_runtime/model_providers/baichuan/llm/baichuan2-turbo-192k.yaml

@@ -3,40 +3,40 @@ label:
   en_US: Baichuan2-Turbo-192K
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 192000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: top_k
-  label:
-    zh_Hans: 取样数量
-    en_US: Top k
-  type: int
-  help:
-    zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
-    en_US: Only sample from the top K options for each subsequent token.
-  required: false
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 8000
-  min: 1
-  max: 192000
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: with_search_enhance
-  label:
-    zh_Hans: 搜索增强
-    en_US: Search Enhance
-  type: boolean
-  help:
-    zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。
-    en_US: Allow the model to perform external search to enhance the generation results.
-  required: false
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: top_k
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    type: int
+    help:
+      zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+      en_US: Only sample from the top K options for each subsequent token.
+    required: false
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 8000
+    min: 1
+    max: 192000
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: with_search_enhance
+    label:
+      zh_Hans: 搜索增强
+      en_US: Search Enhance
+    type: boolean
+    help:
+      zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。
+      en_US: Allow the model to perform external search to enhance the generation results.
+    required: false

+ 33 - 33
api/core/model_runtime/model_providers/baichuan/llm/baichuan2-turbo.yaml

@@ -3,40 +3,40 @@ label:
   en_US: Baichuan2-Turbo
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 192000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: top_k
-  label:
-    zh_Hans: 取样数量
-    en_US: Top k
-  type: int
-  help:
-    zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
-    en_US: Only sample from the top K options for each subsequent token.
-  required: false
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 8000
-  min: 1
-  max: 192000
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: with_search_enhance
-  label:
-    zh_Hans: 搜索增强
-    en_US: Search Enhance
-  type: boolean
-  help:
-    zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。
-    en_US: Allow the model to perform external search to enhance the generation results.
-  required: false
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: top_k
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    type: int
+    help:
+      zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+      en_US: Only sample from the top K options for each subsequent token.
+    required: false
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 8000
+    min: 1
+    max: 192000
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: with_search_enhance
+    label:
+      zh_Hans: 搜索增强
+      en_US: Search Enhance
+    type: boolean
+    help:
+      zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。
+      en_US: Allow the model to perform external search to enhance the generation results.
+    required: false

+ 1 - 1
api/core/model_runtime/model_providers/baichuan/text_embedding/baichuan-text-embedding.yaml

@@ -2,4 +2,4 @@ model: baichuan-text-embedding
 model_type: text-embedding
 model_properties:
   context_size: 512
-  max_chunks: 16
+  max_chunks: 16

+ 10 - 10
api/core/model_runtime/model_providers/chatglm/chatglm.yaml

@@ -13,16 +13,16 @@ help:
   url:
     en_US: https://github.com/THUDM/ChatGLM3
 supported_model_types:
-- llm
+  - llm
 configurate_methods:
-- predefined-model
+  - predefined-model
 provider_credential_schema:
   credential_form_schemas:
-  - variable: api_base
-    label:
-      en_US: API URL
-    type: text-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 API URL
-      en_US: Enter your API URL
+    - variable: api_base
+      label:
+        en_US: API URL
+      type: text-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 API URL
+        en_US: Enter your API URL

+ 12 - 12
api/core/model_runtime/model_providers/chatglm/llm/chatglm2-6b-32k.yaml

@@ -3,19 +3,19 @@ label:
   en_US: ChatGLM2-6B-32K
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 32000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-  required: false
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 2000
-  min: 1
-  max: 32000
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+    required: false
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 2000
+    min: 1
+    max: 32000

+ 12 - 12
api/core/model_runtime/model_providers/chatglm/llm/chatglm2-6b.yaml

@@ -3,19 +3,19 @@ label:
   en_US: ChatGLM2-6B
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 2000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-  required: false
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 256
-  min: 1
-  max: 2000
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+    required: false
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 256
+    min: 1
+    max: 2000

+ 13 - 13
api/core/model_runtime/model_providers/chatglm/llm/chatglm3-6b-32k.yaml

@@ -3,20 +3,20 @@ label:
   en_US: ChatGLM3-6B-32K
 model_type: llm
 features:
-- tool-call
-- agent-thought
+  - tool-call
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 32000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-  required: false
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 8000
-  min: 1
-  max: 32000
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+    required: false
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 8000
+    min: 1
+    max: 32000

+ 13 - 13
api/core/model_runtime/model_providers/chatglm/llm/chatglm3-6b.yaml

@@ -3,20 +3,20 @@ label:
   en_US: ChatGLM3-6B
 model_type: llm
 features:
-- tool-call
-- agent-thought
+  - tool-call
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 8000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-  required: false
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 256
-  min: 1
-  max: 8000
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+    required: false
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 256
+    min: 1
+    max: 8000

+ 12 - 12
api/core/model_runtime/model_providers/cohere/cohere.yaml

@@ -14,18 +14,18 @@ help:
   url:
     en_US: https://dashboard.cohere.com/api-keys
 supported_model_types:
-- rerank
+  - rerank
 configurate_methods:
-- predefined-model
+  - predefined-model
 provider_credential_schema:
   credential_form_schemas:
-  - variable: api_key
-    label:
-      zh_Hans: API Key
-      en_US: API Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 请填写 API Key
-      en_US: Please fill in API Key
-    show_on: []
+    - variable: api_key
+      label:
+        zh_Hans: API Key
+        en_US: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 请填写 API Key
+        en_US: Please fill in API Key
+      show_on: [ ]

+ 1 - 1
api/core/model_runtime/model_providers/cohere/rerank/rerank-multilingual-v2.0.yaml

@@ -1,4 +1,4 @@
 model: rerank-multilingual-v2.0
 model_type: rerank
 model_properties:
-  context_size: 5120
+  context_size: 5120

+ 10 - 11
api/core/model_runtime/model_providers/google/google.yaml

@@ -16,17 +16,16 @@ help:
   url:
     en_US: https://ai.google.dev/
 supported_model_types:
-- llm
+  - llm
 configurate_methods:
-- predefined-model
+  - predefined-model
 provider_credential_schema:
   credential_form_schemas:
-  - variable: google_api_key
-    label:
-      en_US: API Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 API Key
-      en_US: Enter your API Key
-  
+    - variable: google_api_key
+      label:
+        en_US: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API Key

+ 21 - 21
api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml

@@ -3,32 +3,32 @@ label:
   en_US: Gemini Pro Vision
 model_type: llm
 features:
-- vision
+  - vision
 model_properties:
   mode: chat
   context_size: 12288
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: top_k
-  label:
-    zh_Hans: 取样数量
-    en_US: Top k
-  type: int
-  help:
-    zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
-    en_US: Only sample from the top K options for each subsequent token.
-  required: false
-- name: max_tokens_to_sample
-  use_template: max_tokens
-  required: true
-  default: 4096
-  min: 1
-  max: 4096
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: top_k
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    type: int
+    help:
+      zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+      en_US: Only sample from the top K options for each subsequent token.
+    required: false
+  - name: max_tokens_to_sample
+    use_template: max_tokens
+    required: true
+    default: 4096
+    min: 1
+    max: 4096
 pricing:
   input: '0.00'
   output: '0.00'
   unit: '0.000001'
-  currency: USD
+  currency: USD

+ 21 - 21
api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml

@@ -3,32 +3,32 @@ label:
   en_US: Gemini Pro
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 30720
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: top_k
-  label:
-    zh_Hans: 取样数量
-    en_US: Top k
-  type: int
-  help:
-    zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
-    en_US: Only sample from the top K options for each subsequent token.
-  required: false
-- name: max_tokens_to_sample
-  use_template: max_tokens
-  required: true
-  default: 2048
-  min: 1
-  max: 2048
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: top_k
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    type: int
+    help:
+      zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+      en_US: Only sample from the top K options for each subsequent token.
+    required: false
+  - name: max_tokens_to_sample
+    use_template: max_tokens
+    required: true
+    default: 2048
+    min: 1
+    max: 2048
 pricing:
   input: '0.00'
   output: '0.00'
   unit: '0.000001'
-  currency: USD
+  currency: USD

+ 74 - 74
api/core/model_runtime/model_providers/huggingface_hub/huggingface_hub.yaml

@@ -2,9 +2,9 @@ provider: huggingface_hub
 label:
   en_US: Hugging Face Model
 icon_small:
-    en_US: icon_s_en.svg
+  en_US: icon_s_en.svg
 icon_large:
-    en_US: icon_l_en.svg
+  en_US: icon_l_en.svg
 background: "#FFF8DC"
 help:
   title:
@@ -13,90 +13,90 @@ help:
   url:
     en_US: https://huggingface.co/settings/tokens
 supported_model_types:
-- llm
-- text-embedding
+  - llm
+  - text-embedding
 configurate_methods:
-- customizable-model
+  - customizable-model
 model_credential_schema:
   model:
     label:
       en_US: Model Name
       zh_Hans: 模型名称
   credential_form_schemas:
-  - variable: huggingfacehub_api_type
-    label:
-      en_US: Endpoint Type
-      zh_Hans: 端点类型
-    type: radio
-    required: true
-    default: hosted_inference_api
-    options:
-    - value: hosted_inference_api
+    - variable: huggingfacehub_api_type
       label:
-        en_US: Hosted Inference API
-    - value: inference_endpoints
+        en_US: Endpoint Type
+        zh_Hans: 端点类型
+      type: radio
+      required: true
+      default: hosted_inference_api
+      options:
+        - value: hosted_inference_api
+          label:
+            en_US: Hosted Inference API
+        - value: inference_endpoints
+          label:
+            en_US: Inference Endpoints
+    - variable: huggingfacehub_api_token
       label:
-        en_US: Inference Endpoints
-  - variable: huggingfacehub_api_token
-    label:
-      en_US: API Token
-      zh_Hans: API Token
-    type: secret-input
-    required: true
-    placeholder:
-      en_US: Enter your Hugging Face Hub API Token here
-      zh_Hans: 在此输入您的 Hugging Face Hub API Token
-  - variable: huggingface_namespace
-    label:
-      en_US: 'User Name / Organization Name'
-      zh_Hans: '用户名 / 组织名称'
-    type: text-input
-    required: true
-    placeholder:
-      en_US: 'Enter your User Name / Organization Name here'
-      zh_Hans: '在此输入您的用户名 / 组织名称'
-    show_on:
-    - variable: __model_type
-      value: text-embedding
-    - variable: huggingfacehub_api_type
-      value: inference_endpoints
-  - variable: huggingfacehub_endpoint_url
-    label:
-      en_US: Endpoint URL
-      zh_Hans: 端点 URL
-    type: text-input
-    required: true
-    placeholder:
-      en_US: Enter your Endpoint URL here
-      zh_Hans: 在此输入您的端点 URL
-    show_on:
-    - variable: huggingfacehub_api_type
-      value: inference_endpoints
-  - variable: task_type
-    label:
-      en_US: Task
-      zh_Hans: Task
-    type: select
-    options:
-    - value: text2text-generation
+        en_US: API Token
+        zh_Hans: API Token
+      type: secret-input
+      required: true
+      placeholder:
+        en_US: Enter your Hugging Face Hub API Token here
+        zh_Hans: 在此输入您的 Hugging Face Hub API Token
+    - variable: huggingface_namespace
       label:
-        en_US: Text-to-Text Generation
+        en_US: 'User Name / Organization Name'
+        zh_Hans: '用户名 / 组织名称'
+      type: text-input
+      required: true
+      placeholder:
+        en_US: 'Enter your User Name / Organization Name here'
+        zh_Hans: '在此输入您的用户名 / 组织名称'
       show_on:
-      - variable: __model_type
-        value: llm
-    - value: text-generation
+        - variable: __model_type
+          value: text-embedding
+        - variable: huggingfacehub_api_type
+          value: inference_endpoints
+    - variable: huggingfacehub_endpoint_url
       label:
-        en_US: Text Generation
-        zh_Hans: 文本生成
+        en_US: Endpoint URL
+        zh_Hans: 端点 URL
+      type: text-input
+      required: true
+      placeholder:
+        en_US: Enter your Endpoint URL here
+        zh_Hans: 在此输入您的端点 URL
       show_on:
-      - variable: __model_type
-        value: llm
-    - value: feature-extraction
+        - variable: huggingfacehub_api_type
+          value: inference_endpoints
+    - variable: task_type
       label:
-        en_US: Feature Extraction
+        en_US: Task
+        zh_Hans: Task
+      type: select
+      options:
+        - value: text2text-generation
+          label:
+            en_US: Text-to-Text Generation
+          show_on:
+            - variable: __model_type
+              value: llm
+        - value: text-generation
+          label:
+            en_US: Text Generation
+            zh_Hans: 文本生成
+          show_on:
+            - variable: __model_type
+              value: llm
+        - value: feature-extraction
+          label:
+            en_US: Feature Extraction
+          show_on:
+            - variable: __model_type
+              value: text-embedding
       show_on:
-      - variable: __model_type
-        value: text-embedding
-    show_on:
-    - variable: huggingfacehub_api_type
-      value: inference_endpoints
+        - variable: huggingfacehub_api_type
+          value: inference_endpoints

+ 11 - 11
api/core/model_runtime/model_providers/jina/jina.yaml

@@ -2,7 +2,7 @@ provider: jina
 label:
   en_US: Jina
 description:
-    en_US: Embedding Model Supported
+  en_US: Embedding Model Supported
 icon_small:
   en_US: icon_s_en.svg
 icon_large:
@@ -15,16 +15,16 @@ help:
   url:
     en_US: https://jina.ai/embeddings/
 supported_model_types:
-- text-embedding
+  - text-embedding
 configurate_methods:
-- predefined-model
+  - predefined-model
 provider_credential_schema:
   credential_form_schemas:
-  - variable: api_key
-    label:
-      en_US: API Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 API Key
-      en_US: Enter your API Key
+    - variable: api_key
+      label:
+        en_US: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API Key

+ 1 - 1
api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-base-en.yaml

@@ -6,4 +6,4 @@ model_properties:
 pricing:
   input: '0.001'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 1 - 1
api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-small-en.yaml

@@ -6,4 +6,4 @@ model_properties:
 pricing:
   input: '0.001'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 33 - 33
api/core/model_runtime/model_providers/localai/localai.yaml

@@ -13,10 +13,10 @@ help:
   url:
     en_US: https://github.com/go-skynet/LocalAI
 supported_model_types:
-- llm
-- text-embedding
+  - llm
+  - text-embedding
 configurate_methods:
-- customizable-model
+  - customizable-model
 model_credential_schema:
   model:
     label:
@@ -26,33 +26,33 @@ model_credential_schema:
       en_US: Enter your model name
       zh_Hans: 输入模型名称
   credential_form_schemas:
-  - variable: completion_type
-    show_on:
-      - variable: __model_type
-        value: llm
-    label:
-      en_US: Completion type
-    type: select
-    required: false
-    default: chat_completion
-    placeholder:
-      zh_Hans: 选择对话类型
-      en_US: Select completion type
-    options:
-      - value: completion
-        label:
-          en_US: Completion
-          zh_Hans: 补全
-      - value: chat_completion
-        label:
-          en_US: ChatCompletion
-          zh_Hans: 对话
-  - variable: server_url
-    label:
-      zh_Hans: 服务器URL
-      en_US: Server url
-    type: text-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入LocalAI的服务器地址,如 https://example.com/xxx
-      en_US: Enter the url of your LocalAI, for example https://example.com/xxx
+    - variable: completion_type
+      show_on:
+        - variable: __model_type
+          value: llm
+      label:
+        en_US: Completion type
+      type: select
+      required: false
+      default: chat_completion
+      placeholder:
+        zh_Hans: 选择对话类型
+        en_US: Select completion type
+      options:
+        - value: completion
+          label:
+            en_US: Completion
+            zh_Hans: 补全
+        - value: chat_completion
+          label:
+            en_US: ChatCompletion
+            zh_Hans: 对话
+    - variable: server_url
+      label:
+        zh_Hans: 服务器URL
+        en_US: Server url
+      type: text-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入LocalAI的服务器地址,如 https://example.com/xxx
+        en_US: Enter the url of your LocalAI, for example https://example.com/xxx

+ 16 - 16
api/core/model_runtime/model_providers/minimax/llm/abab5-chat.yaml

@@ -3,27 +3,27 @@ label:
   en_US: Abab5-Chat
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 6144
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 6144
-  min: 1
-  max: 6144
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 6144
+    min: 1
+    max: 6144
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
 pricing:
   input: '0.00'
   output: '0.015'
   unit: '0.001'
-  currency: RMB
+  currency: RMB

+ 23 - 23
api/core/model_runtime/model_providers/minimax/llm/abab5.5-chat.yaml

@@ -3,34 +3,34 @@ label:
   en_US: Abab5.5-Chat
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 16384
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 6144
-  min: 1
-  max: 16384
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: plugin_web_search
-  required: false
-  default: false
-  type: boolean
-  label:
-    en_US: Enable Web Search
-    zh_Hans: 开启网页搜索
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 6144
+    min: 1
+    max: 16384
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: plugin_web_search
+    required: false
+    default: false
+    type: boolean
+    label:
+      en_US: Enable Web Search
+      zh_Hans: 开启网页搜索
 pricing:
   input: '0.00'
   output: '0.015'
   unit: '0.001'
-  currency: RMB
+  currency: RMB

+ 19 - 19
api/core/model_runtime/model_providers/minimax/minimax.yaml

@@ -13,25 +13,25 @@ help:
   url:
     en_US: https://api.minimax.chat/user-center/basic-information/interface-key
 supported_model_types:
-- llm
-- text-embedding
+  - llm
+  - text-embedding
 configurate_methods:
-- predefined-model
+  - predefined-model
 provider_credential_schema:
   credential_form_schemas:
-  - variable: minimax_api_key
-    label:
-      en_US: API Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 API Key
-      en_US: Enter your API Key
-  - variable: minimax_group_id
-    label:
-      en_US: Group ID
-    type: text-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 Group ID
-      en_US: Enter your group ID
+    - variable: minimax_api_key
+      label:
+        en_US: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API Key
+    - variable: minimax_group_id
+      label:
+        en_US: Group ID
+      type: text-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 Group ID
+        en_US: Enter your group ID

+ 1 - 1
api/core/model_runtime/model_providers/minimax/text_embedding/embo-01.yaml

@@ -6,4 +6,4 @@ model_properties:
 pricing:
   input: '0.0005'
   unit: '0.001'
-  currency: RMB
+  currency: RMB

+ 1 - 1
api/core/model_runtime/model_providers/openai/llm/_position.yaml

@@ -8,4 +8,4 @@
 - gpt-3.5-turbo-1106
 - gpt-3.5-turbo-0613
 - gpt-3.5-turbo-instruct
-- text-davinci-003
+- text-davinci-003

+ 16 - 16
api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0613.yaml

@@ -4,27 +4,27 @@ label:
   en_US: gpt-3.5-turbo-0613
 model_type: llm
 features:
-- multi-tool-call
-- agent-thought
+  - multi-tool-call
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 4096
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 4096
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 4096
 pricing:
   input: '0.0015'
   output: '0.002'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 16 - 16
api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-1106.yaml

@@ -4,27 +4,27 @@ label:
   en_US: gpt-3.5-turbo-1106
 model_type: llm
 features:
-- multi-tool-call
-- agent-thought
+  - multi-tool-call
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 16385
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 16385
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 16385
 pricing:
   input: '0.001'
   output: '0.002'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 16 - 16
api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k-0613.yaml

@@ -4,27 +4,27 @@ label:
   en_US: gpt-3.5-turbo-16k-0613
 model_type: llm
 features:
-- multi-tool-call
-- agent-thought
+  - multi-tool-call
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 16385
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 16385
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 16385
 pricing:
   input: '0.003'
   output: '0.004'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 16 - 16
api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k.yaml

@@ -4,27 +4,27 @@ label:
   en_US: gpt-3.5-turbo-16k
 model_type: llm
 features:
-- multi-tool-call
-- agent-thought
+  - multi-tool-call
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 16385
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 16385
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 16385
 pricing:
   input: '0.003'
   output: '0.004'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 15 - 15
api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-instruct.yaml

@@ -3,26 +3,26 @@ label:
   zh_Hans: gpt-3.5-turbo-instruct
   en_US: gpt-3.5-turbo-instruct
 model_type: llm
-features: []
+features: [ ]
 model_properties:
   mode: completion
   context_size: 4096
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 4096
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 4096
 pricing:
   input: '0.0015'
   output: '0.002'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 16 - 16
api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml

@@ -4,27 +4,27 @@ label:
   en_US: gpt-3.5-turbo
 model_type: llm
 features:
-- multi-tool-call
-- agent-thought
+  - multi-tool-call
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 4096
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 4096
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 4096
 pricing:
   input: '0.001'
   output: '0.002'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 44 - 44
api/core/model_runtime/model_providers/openai/llm/gpt-4-1106-preview.yaml

@@ -4,55 +4,55 @@ label:
   en_US: gpt-4-1106-preview
 model_type: llm
 features:
-- multi-tool-call
-- agent-thought
+  - multi-tool-call
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 128000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 128000
-- name: seed
-  label:
-    zh_Hans: 种子
-    en_US: Seed
-  type: int
-  help:
-    zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
-      响应参数来监视变化。
-    en_US: If specified, model will make a best effort to sample deterministically,
-      such that repeated requests with the same seed and parameters should return
-      the same result. Determinism is not guaranteed, and you should refer to the
-      system_fingerprint response parameter to monitor changes in the backend.
-  required: false
-  precision: 2
-  min: 0
-  max: 1
-- name: response_format
-  label:
-    zh_Hans: 回复格式
-    en_US: response_format
-  type: string
-  help:
-    zh_Hans: 指定模型必须输出的格式
-    en_US: specifying the format that the model must output
-  required: false
-  options:
-  - text
-  - json_object
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 128000
+  - name: seed
+    label:
+      zh_Hans: 种子
+      en_US: Seed
+    type: int
+    help:
+      zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
+        响应参数来监视变化。
+      en_US: If specified, model will make a best effort to sample deterministically,
+        such that repeated requests with the same seed and parameters should return
+        the same result. Determinism is not guaranteed, and you should refer to the
+        system_fingerprint response parameter to monitor changes in the backend.
+    required: false
+    precision: 2
+    min: 0
+    max: 1
+  - name: response_format
+    label:
+      zh_Hans: 回复格式
+      en_US: response_format
+    type: string
+    help:
+      zh_Hans: 指定模型必须输出的格式
+      en_US: specifying the format that the model must output
+    required: false
+    options:
+      - text
+      - json_object
 pricing:
   input: '0.01'
   output: '0.03'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 44 - 44
api/core/model_runtime/model_providers/openai/llm/gpt-4-32k.yaml

@@ -4,55 +4,55 @@ label:
   en_US: gpt-4-32k
 model_type: llm
 features:
-- multi-tool-call
-- agent-thought
+  - multi-tool-call
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 32768
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 32768
-- name: seed
-  label:
-    zh_Hans: 种子
-    en_US: Seed
-  type: int
-  help:
-    zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
-      响应参数来监视变化。
-    en_US: If specified, model will make a best effort to sample deterministically,
-      such that repeated requests with the same seed and parameters should return
-      the same result. Determinism is not guaranteed, and you should refer to the
-      system_fingerprint response parameter to monitor changes in the backend.
-  required: false
-  precision: 2
-  min: 0
-  max: 1
-- name: response_format
-  label:
-    zh_Hans: 回复格式
-    en_US: response_format
-  type: string
-  help:
-    zh_Hans: 指定模型必须输出的格式
-    en_US: specifying the format that the model must output
-  required: false
-  options:
-  - text
-  - json_object
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 32768
+  - name: seed
+    label:
+      zh_Hans: 种子
+      en_US: Seed
+    type: int
+    help:
+      zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
+        响应参数来监视变化。
+      en_US: If specified, model will make a best effort to sample deterministically,
+        such that repeated requests with the same seed and parameters should return
+        the same result. Determinism is not guaranteed, and you should refer to the
+        system_fingerprint response parameter to monitor changes in the backend.
+    required: false
+    precision: 2
+    min: 0
+    max: 1
+  - name: response_format
+    label:
+      zh_Hans: 回复格式
+      en_US: response_format
+    type: string
+    help:
+      zh_Hans: 指定模型必须输出的格式
+      en_US: specifying the format that the model must output
+    required: false
+    options:
+      - text
+      - json_object
 pricing:
   input: '0.06'
   output: '0.12'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 43 - 43
api/core/model_runtime/model_providers/openai/llm/gpt-4-vision-preview.yaml

@@ -4,54 +4,54 @@ label:
   en_US: gpt-4-vision-preview
 model_type: llm
 features:
-- vision
+  - vision
 model_properties:
   mode: chat
   context_size: 128000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 128000
-- name: seed
-  label:
-    zh_Hans: 种子
-    en_US: Seed
-  type: int
-  help:
-    zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
-      响应参数来监视变化。
-    en_US: If specified, model will make a best effort to sample deterministically,
-      such that repeated requests with the same seed and parameters should return
-      the same result. Determinism is not guaranteed, and you should refer to the
-      system_fingerprint response parameter to monitor changes in the backend.
-  required: false
-  precision: 2
-  min: 0
-  max: 1
-- name: response_format
-  label:
-    zh_Hans: 回复格式
-    en_US: response_format
-  type: string
-  help:
-    zh_Hans: 指定模型必须输出的格式
-    en_US: specifying the format that the model must output
-  required: false
-  options:
-  - text
-  - json_object
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 128000
+  - name: seed
+    label:
+      zh_Hans: 种子
+      en_US: Seed
+    type: int
+    help:
+      zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
+        响应参数来监视变化。
+      en_US: If specified, model will make a best effort to sample deterministically,
+        such that repeated requests with the same seed and parameters should return
+        the same result. Determinism is not guaranteed, and you should refer to the
+        system_fingerprint response parameter to monitor changes in the backend.
+    required: false
+    precision: 2
+    min: 0
+    max: 1
+  - name: response_format
+    label:
+      zh_Hans: 回复格式
+      en_US: response_format
+    type: string
+    help:
+      zh_Hans: 指定模型必须输出的格式
+      en_US: specifying the format that the model must output
+    required: false
+    options:
+      - text
+      - json_object
 pricing:
   input: '0.01'
   output: '0.03'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 44 - 44
api/core/model_runtime/model_providers/openai/llm/gpt-4.yaml

@@ -4,55 +4,55 @@ label:
   en_US: gpt-4
 model_type: llm
 features:
-- multi-tool-call
-- agent-thought
+  - multi-tool-call
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 8192
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 8192
-- name: seed
-  label:
-    zh_Hans: 种子
-    en_US: Seed
-  type: int
-  help:
-    zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
-      响应参数来监视变化。
-    en_US: If specified, model will make a best effort to sample deterministically,
-      such that repeated requests with the same seed and parameters should return
-      the same result. Determinism is not guaranteed, and you should refer to the
-      system_fingerprint response parameter to monitor changes in the backend.
-  required: false
-  precision: 2
-  min: 0
-  max: 1
-- name: response_format
-  label:
-    zh_Hans: 回复格式
-    en_US: response_format
-  type: string
-  help:
-    zh_Hans: 指定模型必须输出的格式
-    en_US: specifying the format that the model must output
-  required: false
-  options:
-  - text
-  - json_object
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 8192
+  - name: seed
+    label:
+      zh_Hans: 种子
+      en_US: Seed
+    type: int
+    help:
+      zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
+        响应参数来监视变化。
+      en_US: If specified, model will make a best effort to sample deterministically,
+        such that repeated requests with the same seed and parameters should return
+        the same result. Determinism is not guaranteed, and you should refer to the
+        system_fingerprint response parameter to monitor changes in the backend.
+    required: false
+    precision: 2
+    min: 0
+    max: 1
+  - name: response_format
+    label:
+      zh_Hans: 回复格式
+      en_US: response_format
+    type: string
+    help:
+      zh_Hans: 指定模型必须输出的格式
+      en_US: specifying the format that the model must output
+    required: false
+    options:
+      - text
+      - json_object
 pricing:
   input: '0.03'
   output: '0.06'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 15 - 15
api/core/model_runtime/model_providers/openai/llm/text-davinci-003.yaml

@@ -3,26 +3,26 @@ label:
   zh_Hans: text-davinci-003
   en_US: text-davinci-003
 model_type: llm
-features: []
+features: [ ]
 model_properties:
   mode: completion
   context_size: 4096
 parameter_rules:
-- name: temperature
-  use_template: temperature
-- name: top_p
-  use_template: top_p
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 4096
+  - name: temperature
+    use_template: temperature
+  - name: top_p
+    use_template: top_p
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 4096
 pricing:
   input: '0.001'
   output: '0.002'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 1 - 1
api/core/model_runtime/model_providers/openai/moderation/text-moderation-stable.yaml

@@ -2,4 +2,4 @@ model: text-moderation-stable
 model_type: moderation
 model_properties:
   max_chunks: 32
-  max_characters_per_chunk: 2000
+  max_characters_per_chunk: 2000

+ 56 - 56
api/core/model_runtime/model_providers/openai/openai.yaml

@@ -2,8 +2,8 @@ provider: openai
 label:
   en_US: OpenAI
 description:
-    en_US: Models provided by OpenAI, such as GPT-3.5-Turbo and GPT-4.
-    zh_Hans: OpenAI 提供的模型,例如 GPT-3.5-Turbo 和 GPT-4。
+  en_US: Models provided by OpenAI, such as GPT-3.5-Turbo and GPT-4.
+  zh_Hans: OpenAI 提供的模型,例如 GPT-3.5-Turbo 和 GPT-4。
 icon_small:
   en_US: icon_s_en.svg
 icon_large:
@@ -16,13 +16,13 @@ help:
   url:
     en_US: https://platform.openai.com/account/api-keys
 supported_model_types:
-- llm
-- text-embedding
-- speech2text
-- moderation
+  - llm
+  - text-embedding
+  - speech2text
+  - moderation
 configurate_methods:
-- predefined-model
-- customizable-model
+  - predefined-model
+  - customizable-model
 model_credential_schema:
   model:
     label:
@@ -32,57 +32,57 @@ model_credential_schema:
       en_US: Enter your model name
       zh_Hans: 输入模型名称
   credential_form_schemas:
-  - variable: openai_api_key
-    label:
-      en_US: API Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 API Key
-      en_US: Enter your API Key
-  - variable: openai_organization
-    label:
+    - variable: openai_api_key
+      label:
+        en_US: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API Key
+    - variable: openai_organization
+      label:
         zh_Hans: 组织 ID
         en_US: Organization
-    type: text-input
-    required: false
-    placeholder:
-      zh_Hans: 在此输入您的组织 ID
-      en_US: Enter your Organization ID
-  - variable: openai_api_base
-    label:
-      zh_Hans: API Base
-      en_US: API Base
-    type: text-input
-    required: false
-    placeholder:
-      zh_Hans: 在此输入您的 API Base
-      en_US: Enter your API Base
+      type: text-input
+      required: false
+      placeholder:
+        zh_Hans: 在此输入您的组织 ID
+        en_US: Enter your Organization ID
+    - variable: openai_api_base
+      label:
+        zh_Hans: API Base
+        en_US: API Base
+      type: text-input
+      required: false
+      placeholder:
+        zh_Hans: 在此输入您的 API Base
+        en_US: Enter your API Base
 provider_credential_schema:
   credential_form_schemas:
-  - variable: openai_api_key
-    label:
-      en_US: API Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 API Key
-      en_US: Enter your API Key
-  - variable: openai_organization
-    label:
+    - variable: openai_api_key
+      label:
+        en_US: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API Key
+    - variable: openai_organization
+      label:
         zh_Hans: 组织 ID
         en_US: Organization
-    type: text-input
-    required: false
-    placeholder:
-      zh_Hans: 在此输入您的组织 ID
-      en_US: Enter your Organization ID
-  - variable: openai_api_base
-    label:
-      zh_Hans: API Base
-      en_US: API Base
-    type: text-input
-    required: false
-    placeholder:
-      zh_Hans: 在此输入您的 API Base
-      en_US: Enter your API Base
+      type: text-input
+      required: false
+      placeholder:
+        zh_Hans: 在此输入您的组织 ID
+        en_US: Enter your Organization ID
+    - variable: openai_api_base
+      label:
+        zh_Hans: API Base
+        en_US: API Base
+      type: text-input
+      required: false
+      placeholder:
+        zh_Hans: 在此输入您的 API Base
+        en_US: Enter your API Base

+ 1 - 1
api/core/model_runtime/model_providers/openai/speech2text/whisper-1.yaml

@@ -2,4 +2,4 @@ model: whisper-1
 model_type: speech2text
 model_properties:
   file_upload_limit: 25
-  supported_file_extensions: mp3,mp4,mpeg,mpga,m4a,wav,webm
+  supported_file_extensions: mp3,mp4,mpeg,mpga,m4a,wav,webm

+ 1 - 1
api/core/model_runtime/model_providers/openai/text_embedding/text-embedidng-ada-002.yaml

@@ -6,4 +6,4 @@ model_properties:
 pricing:
   input: '0.0001'
   unit: '0.001'
-  currency: USD
+  currency: USD

+ 61 - 61
api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml

@@ -5,73 +5,73 @@ description:
   en_US: Model providers compatible with OpenAI's API standard, such as LM Studio.
   zh_Hans: 兼容 OpenAI API 的模型供应商,例如 LM Studio 。
 supported_model_types:
-- llm
-- text-embedding
+  - llm
+  - text-embedding
 configurate_methods:
-- customizable-model
+  - customizable-model
 model_credential_schema:
   model:
     label:
       en_US: Model Name
       zh_Hans: 模型名称
     placeholder:
-      en_US: Enter full model name   
+      en_US: Enter full model name
       zh_Hans: 输入模型全称
   credential_form_schemas:
-  - variable: api_key
-    label:
-      en_US: API Key
-    type: secret-input
-    required: false
-    placeholder:
-      zh_Hans: 在此输入您的 API Key
-      en_US: Enter your API Key
-  - variable: endpoint_url
-    label:
-      zh_Hans: API endpoint URL
-      en_US: API endpoint URL
-    type: text-input
-    required: true
-    placeholder:
-      zh_Hans: Base URL, eg. https://api.openai.com/v1
-      en_US: Base URL, eg. https://api.openai.com/v1
-  - variable: mode
-    show_on:
-      - variable: __model_type
-        value: llm
-    label:
-      en_US: Completion mode
-    type: select
-    required: false
-    default: chat
-    placeholder:
-      zh_Hans: 选择对话类型
-      en_US: Select completion mode
-    options:
-      - value: completion
-        label:
-          en_US: Completion
-          zh_Hans: 补全
-      - value: chat
-        label:
-          en_US: Chat
-          zh_Hans: 对话
-  - variable: context_size
-    label:
-      zh_Hans: 模型上下文长度
-      en_US: Model context size
-    required: true
-    type: text-input
-    default: '4096'
-    placeholder:
-      zh_Hans: 在此输入您的模型上下文长度
-      en_US: Enter your Model context size
-  - variable: max_tokens_to_sample
-    label:
-      zh_Hans: 最大 token 上限
-      en_US: Upper bound for max tokens
-    show_on:
-    - variable: __model_type
-      value: llm
-    default: '4096'
-    type: text-input
+    - variable: api_key
+      label:
+        en_US: API Key
+      type: secret-input
+      required: false
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API Key
+    - variable: endpoint_url
+      label:
+        zh_Hans: API endpoint URL
+        en_US: API endpoint URL
+      type: text-input
+      required: true
+      placeholder:
+        zh_Hans: Base URL, eg. https://api.openai.com/v1
+        en_US: Base URL, eg. https://api.openai.com/v1
+    - variable: mode
+      show_on:
+        - variable: __model_type
+          value: llm
+      label:
+        en_US: Completion mode
+      type: select
+      required: false
+      default: chat
+      placeholder:
+        zh_Hans: 选择对话类型
+        en_US: Select completion mode
+      options:
+        - value: completion
+          label:
+            en_US: Completion
+            zh_Hans: 补全
+        - value: chat
+          label:
+            en_US: Chat
+            zh_Hans: 对话
+    - variable: context_size
+      label:
+        zh_Hans: 模型上下文长度
+        en_US: Model context size
+      required: true
+      type: text-input
+      default: '4096'
+      placeholder:
+        zh_Hans: 在此输入您的模型上下文长度
+        en_US: Enter your Model context size
+    - variable: max_tokens_to_sample
+      label:
+        zh_Hans: 最大 token 上限
+        en_US: Upper bound for max tokens
+      show_on:
+        - variable: __model_type
+          value: llm
+      default: '4096'
+      type: text-input

+ 12 - 12
api/core/model_runtime/model_providers/openllm/openllm.yaml

@@ -13,10 +13,10 @@ help:
   url:
     en_US: https://github.com/bentoml/OpenLLM
 supported_model_types:
-- llm
-- text-embedding
+  - llm
+  - text-embedding
 configurate_methods:
-- customizable-model
+  - customizable-model
 model_credential_schema:
   model:
     label:
@@ -26,12 +26,12 @@ model_credential_schema:
       en_US: Enter your model name
       zh_Hans: 输入模型名称
   credential_form_schemas:
-  - variable: server_url
-    label:
-      zh_Hans: 服务器URL
-      en_US: Server url
-    type: text-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入OpenLLM的服务器地址,如 https://example.com/xxx
-      en_US: Enter the url of your OpenLLM, for example https://example.com/xxx
+    - variable: server_url
+      label:
+        zh_Hans: 服务器URL
+        en_US: Server url
+      type: text-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入OpenLLM的服务器地址,如 https://example.com/xxx
+        en_US: Enter the url of your OpenLLM, for example https://example.com/xxx

+ 19 - 19
api/core/model_runtime/model_providers/replicate/replicate.yaml

@@ -13,29 +13,29 @@ help:
   url:
     en_US: https://replicate.com/account/api-tokens
 supported_model_types:
-- llm
-- text-embedding
+  - llm
+  - text-embedding
 configurate_methods:
-- customizable-model
+  - customizable-model
 model_credential_schema:
   model:
     label:
       en_US: Model Name
       zh_Hans: 模型名称
   credential_form_schemas:
-  - variable: replicate_api_token
-    label:
-      en_US: API Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 Replicate API Key
-      en_US: Enter your Replicate API Key
-  - variable: model_version
-    label:
-      en_US: Model Version
-    type: text-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的模型版本
-      en_US: Enter your model version
+    - variable: replicate_api_token
+      label:
+        en_US: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 Replicate API Key
+        en_US: Enter your Replicate API Key
+    - variable: model_version
+      label:
+        en_US: Model Version
+      type: text-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的模型版本
+        en_US: Enter your model version

+ 26 - 26
api/core/model_runtime/model_providers/spark/llm/spark-1.5.yaml

@@ -5,29 +5,29 @@ model_type: llm
 model_properties:
   mode: chat
 parameter_rules:
-- name: temperature
-  use_template: temperature
-  default: 0.5
-  help:
-    zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。
-    en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question.
-- name: max_tokens
-  use_template: max_tokens
-  default: 512
-  min: 1
-  max: 4096
-  help:
-    zh_Hans: 模型回答的tokens的最大长度。
-    en_US: 模型回答的tokens的最大长度。
-- name: top_k
-  label:
-    zh_Hans: 取样数量
-    en_US: Top k
-  type: int
-  default: 4
-  min: 1
-  max: 6
-  help:
-    zh_Hans: 从 k 个候选中随机选择⼀个(⾮等概率)。
-    en_US: Randomly select one from k candidates (non-equal probability).
-  required: false
+  - name: temperature
+    use_template: temperature
+    default: 0.5
+    help:
+      zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。
+      en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question.
+  - name: max_tokens
+    use_template: max_tokens
+    default: 512
+    min: 1
+    max: 4096
+    help:
+      zh_Hans: 模型回答的tokens的最大长度。
+      en_US: 模型回答的tokens的最大长度。
+  - name: top_k
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    type: int
+    default: 4
+    min: 1
+    max: 6
+    help:
+      zh_Hans: 从 k 个候选中随机选择⼀个(⾮等概率)。
+      en_US: Randomly select one from k candidates (non-equal probability).
+    required: false

+ 26 - 26
api/core/model_runtime/model_providers/spark/llm/spark-2.yaml

@@ -6,29 +6,29 @@ model_type: llm
 model_properties:
   mode: chat
 parameter_rules:
-- name: temperature
-  use_template: temperature
-  default: 0.5
-  help:
-    zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。
-    en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question.
-- name: max_tokens
-  use_template: max_tokens
-  default: 2048
-  min: 1
-  max: 8192
-  help:
-    zh_Hans: 模型回答的tokens的最大长度。
-    en_US: 模型回答的tokens的最大长度。
-- name: top_k
-  label:
-    zh_Hans: 取样数量
-    en_US: Top k
-  type: int
-  default: 4
-  min: 1
-  max: 6
-  help:
-    zh_Hans: 从 k 个候选中随机选择⼀个(⾮等概率)。
-    en_US: Randomly select one from k candidates (non-equal probability).
-  required: false
+  - name: temperature
+    use_template: temperature
+    default: 0.5
+    help:
+      zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。
+      en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question.
+  - name: max_tokens
+    use_template: max_tokens
+    default: 2048
+    min: 1
+    max: 8192
+    help:
+      zh_Hans: 模型回答的tokens的最大长度。
+      en_US: 模型回答的tokens的最大长度。
+  - name: top_k
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    type: int
+    default: 4
+    min: 1
+    max: 6
+    help:
+      zh_Hans: 从 k 个候选中随机选择⼀个(⾮等概率)。
+      en_US: Randomly select one from k candidates (non-equal probability).
+    required: false

+ 26 - 26
api/core/model_runtime/model_providers/spark/llm/spark-3.yaml

@@ -5,29 +5,29 @@ model_type: llm
 model_properties:
   mode: chat
 parameter_rules:
-- name: temperature
-  use_template: temperature
-  default: 0.5
-  help:
-    zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。
-    en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question.
-- name: max_tokens
-  use_template: max_tokens
-  default: 2048
-  min: 1
-  max: 8192
-  help:
-    zh_Hans: 模型回答的tokens的最大长度。
-    en_US: 模型回答的tokens的最大长度。
-- name: top_k
-  label:
-    zh_Hans: 取样数量
-    en_US: Top k
-  type: int
-  default: 4
-  min: 1
-  max: 6
-  help:
-    zh_Hans: 从 k 个候选中随机选择⼀个(⾮等概率)。
-    en_US: Randomly select one from k candidates (non-equal probability).
-  required: false
+  - name: temperature
+    use_template: temperature
+    default: 0.5
+    help:
+      zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。
+      en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question.
+  - name: max_tokens
+    use_template: max_tokens
+    default: 2048
+    min: 1
+    max: 8192
+    help:
+      zh_Hans: 模型回答的tokens的最大长度。
+      en_US: 模型回答的tokens的最大长度。
+  - name: top_k
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    type: int
+    default: 4
+    min: 1
+    max: 6
+    help:
+      zh_Hans: 从 k 个候选中随机选择⼀个(⾮等概率)。
+      en_US: Randomly select one from k candidates (non-equal probability).
+    required: false

+ 26 - 26
api/core/model_runtime/model_providers/spark/spark.yaml

@@ -15,32 +15,32 @@ help:
   url:
     en_US: https://www.xfyun.cn/solutions/xinghuoAPI
 supported_model_types:
-- llm
+  - llm
 configurate_methods:
-- predefined-model
+  - predefined-model
 provider_credential_schema:
   credential_form_schemas:
-  - variable: app_id
-    label:
-      en_US: APPID
-    type: text-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 APPID
-      en_US: Enter your APPID
-  - variable: api_secret
-    label:
-      en_US: APISecret
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 APISecret
-      en_US: Enter your APISecret
-  - variable: api_key
-    label:
-      en_US: APIKey
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 APIKey
-      en_US: Enter your APIKey
+    - variable: app_id
+      label:
+        en_US: APPID
+      type: text-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 APPID
+        en_US: Enter your APPID
+    - variable: api_secret
+      label:
+        en_US: APISecret
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 APISecret
+        en_US: Enter your APISecret
+    - variable: api_key
+      label:
+        en_US: APIKey
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 APIKey
+        en_US: Enter your APIKey

+ 53 - 53
api/core/model_runtime/model_providers/togetherai/togetherai.yaml

@@ -2,9 +2,9 @@ provider: togetherai
 label:
   en_US: together.ai
 icon_small:
-    en_US: togetherai_square.svg
+  en_US: togetherai_square.svg
 icon_large:
-    en_US: togetherai.svg
+  en_US: togetherai.svg
 background: "#F1EFED"
 help:
   title:
@@ -13,63 +13,63 @@ help:
   url:
     en_US: https://api.together.xyz/
 supported_model_types:
-- llm
+  - llm
 configurate_methods:
-- customizable-model
+  - customizable-model
 model_credential_schema:
   model:
     label:
       en_US: Model Name
       zh_Hans: 模型名称
     placeholder:
-      en_US: Enter full model name   
+      en_US: Enter full model name
       zh_Hans: 输入模型全称
   credential_form_schemas:
-  - variable: api_key
-    required: true
-    label:
-      en_US: API Key
-    type: secret-input
-    placeholder:
-      zh_Hans: 在此输入您的 API Key
-      en_US: Enter your API Key
-  - variable: mode
-    show_on:
-      - variable: __model_type
-        value: llm
-    label:
-      en_US: Completion mode
-    type: select
-    required: false
-    default: chat
-    placeholder:
-      zh_Hans: 选择对话类型
-      en_US: Select completion mode
-    options:
-      - value: completion
-        label:
-          en_US: Completion
-          zh_Hans: 补全
-      - value: chat
-        label:
-          en_US: Chat
-          zh_Hans: 对话
-  - variable: context_size
-    label:
-      zh_Hans: 模型上下文长度
-      en_US: Model context size
-    required: true
-    type: text-input
-    default: '4096'
-    placeholder:
-      zh_Hans: 在此输入您的模型上下文长度
-      en_US: Enter your Model context size
-  - variable: max_tokens_to_sample
-    label:
-      zh_Hans: 最大 token 上限
-      en_US: Upper bound for max tokens
-    show_on:
-    - variable: __model_type
-      value: llm
-    default: '4096'
-    type: text-input
+    - variable: api_key
+      required: true
+      label:
+        en_US: API Key
+      type: secret-input
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API Key
+    - variable: mode
+      show_on:
+        - variable: __model_type
+          value: llm
+      label:
+        en_US: Completion mode
+      type: select
+      required: false
+      default: chat
+      placeholder:
+        zh_Hans: 选择对话类型
+        en_US: Select completion mode
+      options:
+        - value: completion
+          label:
+            en_US: Completion
+            zh_Hans: 补全
+        - value: chat
+          label:
+            en_US: Chat
+            zh_Hans: 对话
+    - variable: context_size
+      label:
+        zh_Hans: 模型上下文长度
+        en_US: Model context size
+      required: true
+      type: text-input
+      default: '4096'
+      placeholder:
+        zh_Hans: 在此输入您的模型上下文长度
+        en_US: Enter your Model context size
+    - variable: max_tokens_to_sample
+      label:
+        zh_Hans: 最大 token 上限
+        en_US: Upper bound for max tokens
+      show_on:
+        - variable: __model_type
+          value: llm
+      default: '4096'
+      type: text-input

Fichier diff supprimé car celui-ci est trop grand
+ 49 - 49
api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml


Fichier diff supprimé car celui-ci est trop grand
+ 50 - 50
api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml


+ 10 - 10
api/core/model_runtime/model_providers/tongyi/tongyi.yaml

@@ -15,16 +15,16 @@ help:
   url:
     en_US: https://dashscope.console.aliyun.com/api-key_management
 supported_model_types:
-- llm
+  - llm
 configurate_methods:
-- predefined-model
+  - predefined-model
 provider_credential_schema:
   credential_form_schemas:
-  - variable: dashscope_api_key
-    label:
-      en_US: APIKey
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 APIKey
-      en_US: Enter your APIKey
+    - variable: dashscope_api_key
+      label:
+        en_US: APIKey
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 APIKey
+        en_US: Enter your APIKey

+ 27 - 27
api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-4.yaml

@@ -3,34 +3,34 @@ label:
   en_US: Ernie Bot 4
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 4800
 parameter_rules:
-- name: temperature
-  use_template: temperature
-  min: 0.1
-  max: 1.0
-  default: 0.8 
-- name: top_p
-  use_template: top_p
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 256
-  min: 1
-  max: 4800
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: disable_search
-  label:
-    zh_Hans: 禁用搜索
-    en_US: Disable Search
-  type: boolean
-  help:
-    zh_Hans: 禁用模型自行进行外部搜索。
-    en_US: Disable the model to perform external search.
-  required: false
+  - name: temperature
+    use_template: temperature
+    min: 0.1
+    max: 1.0
+    default: 0.8
+  - name: top_p
+    use_template: top_p
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 256
+    min: 1
+    max: 4800
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: disable_search
+    label:
+      zh_Hans: 禁用搜索
+      en_US: Disable Search
+    type: boolean
+    help:
+      zh_Hans: 禁用模型自行进行外部搜索。
+      en_US: Disable the model to perform external search.
+    required: false

+ 27 - 27
api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-8k.yaml

@@ -3,34 +3,34 @@ label:
   en_US: Ernie Bot 8k
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 8000
 parameter_rules:
-- name: temperature
-  use_template: temperature
-  min: 0.1
-  max: 1.0
-  default: 0.8 
-- name: top_p
-  use_template: top_p
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 1024
-  min: 1
-  max: 8000
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: disable_search
-  label:
-    zh_Hans: 禁用搜索
-    en_US: Disable Search
-  type: boolean
-  help:
-    zh_Hans: 禁用模型自行进行外部搜索。
-    en_US: Disable the model to perform external search.
-  required: false
+  - name: temperature
+    use_template: temperature
+    min: 0.1
+    max: 1.0
+    default: 0.8
+  - name: top_p
+    use_template: top_p
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 1024
+    min: 1
+    max: 8000
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: disable_search
+    label:
+      zh_Hans: 禁用搜索
+      en_US: Disable Search
+    type: boolean
+    help:
+      zh_Hans: 禁用模型自行进行外部搜索。
+      en_US: Disable the model to perform external search.
+    required: false

+ 18 - 18
api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-turbo.yaml

@@ -3,25 +3,25 @@ label:
   en_US: Ernie Bot Turbo
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 11200
 parameter_rules:
-- name: temperature
-  use_template: temperature
-  min: 0.1
-  max: 1.0
-  default: 0.8 
-- name: top_p
-  use_template: top_p
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 1024
-  min: 1
-  max: 11200
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
+  - name: temperature
+    use_template: temperature
+    min: 0.1
+    max: 1.0
+    default: 0.8
+  - name: top_p
+    use_template: top_p
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 1024
+    min: 1
+    max: 11200
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty

+ 27 - 27
api/core/model_runtime/model_providers/wenxin/llm/ernie-bot.yaml

@@ -3,34 +3,34 @@ label:
   en_US: Ernie Bot
 model_type: llm
 features:
-- agent-thought
+  - agent-thought
 model_properties:
   mode: chat
   context_size: 4800
 parameter_rules:
-- name: temperature
-  use_template: temperature
-  min: 0.1
-  max: 1.0
-  default: 0.8 
-- name: top_p
-  use_template: top_p
-- name: max_tokens
-  use_template: max_tokens
-  required: true
-  default: 256
-  min: 1
-  max: 4800
-- name: presence_penalty
-  use_template: presence_penalty
-- name: frequency_penalty
-  use_template: frequency_penalty
-- name: disable_search
-  label:
-    zh_Hans: 禁用搜索
-    en_US: Disable Search
-  type: boolean
-  help:
-    zh_Hans: 禁用模型自行进行外部搜索。
-    en_US: Disable the model to perform external search.
-  required: false
+  - name: temperature
+    use_template: temperature
+    min: 0.1
+    max: 1.0
+    default: 0.8
+  - name: top_p
+    use_template: top_p
+  - name: max_tokens
+    use_template: max_tokens
+    required: true
+    default: 256
+    min: 1
+    max: 4800
+  - name: presence_penalty
+    use_template: presence_penalty
+  - name: frequency_penalty
+    use_template: frequency_penalty
+  - name: disable_search
+    label:
+      zh_Hans: 禁用搜索
+      en_US: Disable Search
+    type: boolean
+    help:
+      zh_Hans: 禁用模型自行进行外部搜索。
+      en_US: Disable the model to perform external search.
+    required: false

+ 18 - 18
api/core/model_runtime/model_providers/wenxin/wenxin.yaml

@@ -16,24 +16,24 @@ help:
   url:
     en_US: https://cloud.baidu.com/wenxin.html
 supported_model_types:
-- llm
+  - llm
 configurate_methods:
-- predefined-model
+  - predefined-model
 provider_credential_schema:
   credential_form_schemas:
-  - variable: api_key
-    label:
-      en_US: API Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 API Key
-      en_US: Enter your API Key
-  - variable: secret_key
-    label:
-      en_US: Secret Key
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 Secret Key
-      en_US: Enter your Secret Key
+    - variable: api_key
+      label:
+        en_US: API Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 API Key
+        en_US: Enter your API Key
+    - variable: secret_key
+      label:
+        en_US: Secret Key
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 Secret Key
+        en_US: Enter your Secret Key

+ 22 - 22
api/core/model_runtime/model_providers/xinference/xinference.yaml

@@ -13,11 +13,11 @@ help:
   url:
     en_US: https://github.com/xorbitsai/inference
 supported_model_types:
-- llm
-- text-embedding
-- rerank
+  - llm
+  - text-embedding
+  - rerank
 configurate_methods:
-- customizable-model
+  - customizable-model
 model_credential_schema:
   model:
     label:
@@ -27,21 +27,21 @@ model_credential_schema:
       en_US: Enter your model name
       zh_Hans: 输入模型名称
   credential_form_schemas:
-  - variable: server_url
-    label:
-      zh_Hans: 服务器URL
-      en_US: Server url
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入Xinference的服务器地址,如 https://example.com/xxx
-      en_US: Enter the url of your Xinference, for example https://example.com/xxx
-  - variable: model_uid
-    label:
-      zh_Hans: 模型UID
-      en_US: Model uid
-    type: text-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的Model UID
-      en_US: Enter the model uid
+    - variable: server_url
+      label:
+        zh_Hans: 服务器URL
+        en_US: Server url
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入Xinference的服务器地址,如 https://example.com/xxx
+        en_US: Enter the url of your Xinference, for example https://example.com/xxx
+    - variable: model_uid
+      label:
+        zh_Hans: 模型UID
+        en_US: Model uid
+      type: text-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的Model UID
+        en_US: Enter the model uid

Fichier diff supprimé car celui-ci est trop grand
+ 15 - 15
api/core/model_runtime/model_providers/zhipuai/llm/chatglm_lite.yaml


Fichier diff supprimé car celui-ci est trop grand
+ 15 - 15
api/core/model_runtime/model_providers/zhipuai/llm/chatglm_lite_32k.yaml


Fichier diff supprimé car celui-ci est trop grand
+ 15 - 15
api/core/model_runtime/model_providers/zhipuai/llm/chatglm_pro.yaml


Fichier diff supprimé car celui-ci est trop grand
+ 15 - 15
api/core/model_runtime/model_providers/zhipuai/llm/chatglm_std.yaml


Fichier diff supprimé car celui-ci est trop grand
+ 35 - 35
api/core/model_runtime/model_providers/zhipuai/llm/chatglm_turbo.yaml


+ 1 - 1
api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.yaml

@@ -1,4 +1,4 @@
 model: text_embedding
 model_type: text-embedding
 model_properties:
-  context_size: 512
+  context_size: 512

+ 11 - 11
api/core/model_runtime/model_providers/zhipuai/zhipuai.yaml

@@ -15,17 +15,17 @@ help:
   url:
     en_US: https://open.bigmodel.cn/usercenter/apikeys
 supported_model_types:
-- llm
-- text-embedding
+  - llm
+  - text-embedding
 configurate_methods:
-- predefined-model
+  - predefined-model
 provider_credential_schema:
   credential_form_schemas:
-  - variable: api_key
-    label:
-      en_US: APIKey
-    type: secret-input
-    required: true
-    placeholder:
-      zh_Hans: 在此输入您的 APIKey
-      en_US: Enter your APIKey
+    - variable: api_key
+      label:
+        en_US: APIKey
+      type: secret-input
+      required: true
+      placeholder:
+        zh_Hans: 在此输入您的 APIKey
+        en_US: Enter your APIKey

+ 1 - 1
docker/docker-compose.yaml

@@ -236,7 +236,7 @@ services:
     # ports:
     #   - "5432:5432"
     healthcheck:
-      test: ["CMD", "pg_isready"]
+      test: [ "CMD", "pg_isready" ]
       interval: 1s
       timeout: 3s
       retries: 30