浏览代码

Merge main

Yeuoly 10 月之前
父节点
当前提交
4eaba3049a
共有 100 个文件被更改,包括 3827 次插入454 次删除
  1. 9 0
      .gitignore
  2. 2 2
      CONTRIBUTING_CN.md
  3. 4 7
      api/core/embedding/cached_embedding.py
  4. 1 1
      api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-haiku-v1.yaml
  5. 1 1
      api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.5.yaml
  6. 1 1
      api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.yaml
  7. 4 4
      api/core/model_runtime/model_providers/bedrock/llm/llm.py
  8. 1 1
      api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-haiku-v1.yaml
  9. 1 1
      api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-opus-v1.yaml
  10. 1 1
      api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.5.yaml
  11. 1 1
      api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.yaml
  12. 2 2
      api/core/model_runtime/model_providers/jina/jina.py
  13. 9 0
      api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v3.yaml
  14. 3 0
      api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py
  15. 1 0
      api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0613.yaml
  16. 1 0
      api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k-0613.yaml
  17. 2 2
      api/core/model_runtime/model_providers/openai/llm/o1-mini-2024-09-12.yaml
  18. 2 2
      api/core/model_runtime/model_providers/openai/llm/o1-mini.yaml
  19. 81 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml
  20. 1 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml
  21. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml
  22. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml
  23. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml
  24. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml
  25. 1 1
      api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml
  26. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml
  27. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml
  28. 47 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml
  29. 47 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml
  30. 1 1
      api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml
  31. 47 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml
  32. 1 1
      api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml
  33. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml
  34. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml
  35. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml
  36. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml
  37. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml
  38. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml
  39. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml
  40. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml
  41. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml
  42. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml
  43. 79 0
      api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml
  44. 2 2
      api/core/model_runtime/model_providers/tongyi/tongyi.yaml
  45. 1 1
      api/core/rag/retrieval/dataset_retrieval.py
  46. 8 0
      api/core/tools/provider/builtin/brave/brave.yaml
  47. 20 5
      api/core/tools/provider/builtin/brave/tools/brave_search.py
  48. 12 0
      api/core/tools/provider/builtin/brave/tools/brave_search.yaml
  49. 二进制
      api/core/tools/provider/builtin/comfyui/_assets/icon.png
  50. 17 0
      api/core/tools/provider/builtin/comfyui/comfyui.py
  51. 42 0
      api/core/tools/provider/builtin/comfyui/comfyui.yaml
  52. 475 0
      api/core/tools/provider/builtin/comfyui/tools/comfyui_stable_diffusion.py
  53. 212 0
      api/core/tools/provider/builtin/comfyui/tools/comfyui_stable_diffusion.yaml
  54. 107 0
      api/core/tools/provider/builtin/comfyui/tools/txt2img.json
  55. 3 1
      api/core/tools/provider/builtin/feishu_document/tools/get_document_raw_content.py
  56. 49 0
      api/core/tools/provider/builtin/feishu_document/tools/get_document_content.yaml
  57. 0 23
      api/core/tools/provider/builtin/feishu_document/tools/get_document_raw_content.yaml
  58. 0 48
      api/core/tools/provider/builtin/feishu_document/tools/list_document_block.yaml
  59. 1 1
      api/core/tools/provider/builtin/feishu_document/tools/list_document_block.py
  60. 74 0
      api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.yaml
  61. 18 15
      api/core/tools/provider/builtin/feishu_document/tools/write_document.yaml
  62. 9 2
      api/core/tools/provider/builtin/siliconflow/tools/flux.py
  63. 18 3
      api/core/tools/provider/builtin/siliconflow/tools/flux.yaml
  64. 1 1
      api/core/tools/provider/tool_provider.py
  65. 5 2
      api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py
  66. 7 5
      api/core/tools/utils/feishu_api_utils.py
  67. 2 14
      api/core/workflow/graph_engine/entities/graph.py
  68. 17 12
      api/core/workflow/graph_engine/graph_engine.py
  69. 1 0
      api/core/workflow/nodes/answer/answer_stream_generate_router.py
  70. 82 117
      api/core/workflow/nodes/iteration/iteration_node.py
  71. 9 1
      api/extensions/ext_sentry.py
  72. 39 10
      api/extensions/ext_storage.py
  73. 14 34
      api/extensions/storage/aliyun_storage.py
  74. 13 13
      api/poetry.lock
  75. 1 1
      api/pyproject.toml
  76. 7 0
      docker/.env.example
  77. 1 0
      docker/docker-compose.middleware.yaml
  78. 2 1
      docker/docker-compose.yaml
  79. 282 0
      sdks/python-client/dify_client/client.py
  80. 1 1
      sdks/python-client/setup.py
  81. 148 1
      sdks/python-client/tests/test_client.py
  82. 3 0
      web/.env.example
  83. 27 0
      web/.husky/pre-commit
  84. 5 1
      web/README.md
  85. 7 2
      web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout.tsx
  86. 1 3
      web/app/components/app/configuration/toolbox/annotation/annotation-ctrl-btn/index.tsx
  87. 2 2
      web/app/components/app/log/list.tsx
  88. 3 2
      web/app/components/app/overview/settings/index.tsx
  89. 17 36
      web/app/components/base/chat/chat/answer/index.tsx
  90. 1 1
      web/app/components/base/chat/chat/answer/operation.tsx
  91. 2 2
      web/app/components/base/chat/chat/index.tsx
  92. 229 23
      web/app/components/base/image-uploader/image-preview.tsx
  93. 20 17
      web/app/components/base/markdown.tsx
  94. 79 0
      web/app/components/base/svg-gallery/index.tsx
  95. 18 0
      web/app/components/datasets/create/step-two/escape.ts
  96. 27 10
      web/app/components/datasets/create/step-two/index.tsx
  97. 54 0
      web/app/components/datasets/create/step-two/unescape.ts
  98. 9 3
      web/app/components/share/text-generation/result/index.tsx
  99. 8 9
      web/app/components/share/text-generation/run-once/index.tsx
  100. 0 0
      web/app/components/tools/add-tool-modal/tools.tsx

+ 9 - 0
.gitignore

@@ -153,6 +153,9 @@ docker-legacy/volumes/etcd/*
 docker-legacy/volumes/minio/*
 docker-legacy/volumes/milvus/*
 docker-legacy/volumes/chroma/*
+docker-legacy/volumes/opensearch/data/*
+docker-legacy/volumes/pgvectors/data/*
+docker-legacy/volumes/pgvector/data/*
 
 docker/volumes/app/storage/*
 docker/volumes/certbot/*
@@ -164,6 +167,12 @@ docker/volumes/etcd/*
 docker/volumes/minio/*
 docker/volumes/milvus/*
 docker/volumes/chroma/*
+docker/volumes/opensearch/data/*
+docker/volumes/myscale/data/*
+docker/volumes/myscale/log/*
+docker/volumes/unstructured/*
+docker/volumes/pgvector/data/*
+docker/volumes/pgvecto_rs/data/*
 
 docker/nginx/conf.d/default.conf
 docker/middleware.env

+ 2 - 2
CONTRIBUTING_CN.md

@@ -36,7 +36,7 @@
   | 被团队成员标记为高优先级的功能    | 高优先级   |
   | 在 [community feedback board](https://github.com/langgenius/dify/discussions/categories/feedbacks) 内反馈的常见功能请求 | 中等优先级 |
   | 非核心功能和小幅改进                     | 低优先级    |
-  | 有价值不紧急                                   | 未来功能  |
+  | 有价值不紧急                                   | 未来功能  |
 
 ### 其他任何事情(例如 bug 报告、性能优化、拼写错误更正):
 * 立即开始编码。
@@ -138,7 +138,7 @@ Dify 的后端使用 Python 编写,使用 [Flask](https://flask.palletsproject
 ├── models                // 描述数据模型和 API 响应的形状
 ├── public                // 如 favicon 等元资源
 ├── service               // 定义 API 操作的形状
-├── test                  
+├── test
 ├── types                 // 函数参数和返回值的描述
 └── utils                 // 共享的实用函数
 ```

+ 4 - 7
api/core/embedding/cached_embedding.py

@@ -65,7 +65,7 @@ class CacheEmbedding(Embeddings):
                         except IntegrityError:
                             db.session.rollback()
                         except Exception as e:
-                            logging.exception("Failed transform embedding: ", e)
+                            logging.exception("Failed transform embedding: %s", e)
                 cache_embeddings = []
                 try:
                     for i, embedding in zip(embedding_queue_indices, embedding_queue_embeddings):
@@ -85,7 +85,7 @@ class CacheEmbedding(Embeddings):
                     db.session.rollback()
             except Exception as ex:
                 db.session.rollback()
-                logger.error("Failed to embed documents: ", ex)
+                logger.error("Failed to embed documents: %s", ex)
                 raise ex
 
         return text_embeddings
@@ -116,10 +116,7 @@ class CacheEmbedding(Embeddings):
             # Transform to string
             encoded_str = encoded_vector.decode("utf-8")
             redis_client.setex(embedding_cache_key, 600, encoded_str)
-
-        except IntegrityError:
-            db.session.rollback()
-        except:
-            logging.exception("Failed to add embedding to redis")
+        except Exception as ex:
+            logging.exception("Failed to add embedding to redis %s", ex)
 
         return embedding_results

+ 1 - 1
api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-haiku-v1.yaml

@@ -1,6 +1,6 @@
 model: eu.anthropic.claude-3-haiku-20240307-v1:0
 label:
-  en_US: Claude 3 Haiku(Cross Region Inference)
+  en_US: Claude 3 Haiku(EU.Cross Region Inference)
 model_type: llm
 features:
   - agent-thought

+ 1 - 1
api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.5.yaml

@@ -1,6 +1,6 @@
 model: eu.anthropic.claude-3-5-sonnet-20240620-v1:0
 label:
-  en_US: Claude 3.5 Sonnet(Cross Region Inference)
+  en_US: Claude 3.5 Sonnet(EU.Cross Region Inference)
 model_type: llm
 features:
   - agent-thought

+ 1 - 1
api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.yaml

@@ -1,6 +1,6 @@
 model: eu.anthropic.claude-3-sonnet-20240229-v1:0
 label:
-  en_US: Claude 3 Sonnet(Cross Region Inference)
+  en_US: Claude 3 Sonnet(EU.Cross Region Inference)
 model_type: llm
 features:
   - agent-thought

+ 4 - 4
api/core/model_runtime/model_providers/bedrock/llm/llm.py

@@ -1,8 +1,8 @@
 # standard import
 import base64
-import io
 import json
 import logging
+import mimetypes
 from collections.abc import Generator
 from typing import Optional, Union, cast
 
@@ -17,7 +17,6 @@ from botocore.exceptions import (
     ServiceNotInRegionError,
     UnknownServiceError,
 )
-from PIL.Image import Image
 
 # local import
 from core.model_runtime.callbacks.base_callback import Callback
@@ -443,8 +442,9 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
                             try:
                                 url = message_content.data
                                 image_content = requests.get(url).content
-                                with Image.open(io.BytesIO(image_content)) as img:
-                                    mime_type = f"image/{img.format.lower()}"
+                                if "?" in url:
+                                    url = url.split("?")[0]
+                                mime_type, _ = mimetypes.guess_type(url)
                                 base64_data = base64.b64encode(image_content).decode("utf-8")
                             except Exception as ex:
                                 raise ValueError(f"Failed to fetch image data from url {message_content.data}, {ex}")

+ 1 - 1
api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-haiku-v1.yaml

@@ -1,6 +1,6 @@
 model: us.anthropic.claude-3-haiku-20240307-v1:0
 label:
-  en_US: Claude 3 Haiku(Cross Region Inference)
+  en_US: Claude 3 Haiku(US.Cross Region Inference)
 model_type: llm
 features:
   - agent-thought

+ 1 - 1
api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-opus-v1.yaml

@@ -1,6 +1,6 @@
 model: us.anthropic.claude-3-opus-20240229-v1:0
 label:
-  en_US: Claude 3 Opus(Cross Region Inference)
+  en_US: Claude 3 Opus(US.Cross Region Inference)
 model_type: llm
 features:
   - agent-thought

+ 1 - 1
api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.5.yaml

@@ -1,6 +1,6 @@
 model: us.anthropic.claude-3-5-sonnet-20240620-v1:0
 label:
-  en_US: Claude 3.5 Sonnet(Cross Region Inference)
+  en_US: Claude 3.5 Sonnet(US.Cross Region Inference)
 model_type: llm
 features:
   - agent-thought

+ 1 - 1
api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.yaml

@@ -1,6 +1,6 @@
 model: us.anthropic.claude-3-sonnet-20240229-v1:0
 label:
-  en_US: Claude 3 Sonnet(Cross Region Inference)
+  en_US: Claude 3 Sonnet(US.Cross Region Inference)
 model_type: llm
 features:
   - agent-thought

+ 2 - 2
api/core/model_runtime/model_providers/jina/jina.py

@@ -18,9 +18,9 @@ class JinaProvider(ModelProvider):
         try:
             model_instance = self.get_model_instance(ModelType.TEXT_EMBEDDING)
 
-            # Use `jina-embeddings-v2-base-en` model for validate,
+            # Use `jina-embeddings-v3` model for validate,
             # no matter what model you pass in, text completion model or chat model
-            model_instance.validate_credentials(model="jina-embeddings-v2-base-en", credentials=credentials)
+            model_instance.validate_credentials(model="jina-embeddings-v3", credentials=credentials)
         except CredentialsValidateFailedError as ex:
             raise ex
         except Exception as ex:

+ 9 - 0
api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v3.yaml

@@ -0,0 +1,9 @@
+model: jina-embeddings-v3
+model_type: text-embedding
+model_properties:
+  context_size: 8192
+  max_chunks: 2048
+pricing:
+  input: '0.001'
+  unit: '0.001'
+  currency: USD

+ 3 - 0
api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py

@@ -56,6 +56,9 @@ class JinaTextEmbeddingModel(TextEmbeddingModel):
 
         data = {"model": model, "input": [transform_jina_input_text(model, text) for text in texts]}
 
+        if model == "jina-embeddings-v3":
+            data["task"] = "text-matching"
+
         try:
             response = post(url, headers=headers, data=dumps(data))
         except Exception as e:

+ 1 - 0
api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0613.yaml

@@ -31,3 +31,4 @@ pricing:
   output: '0.002'
   unit: '0.001'
   currency: USD
+deprecated: true

+ 1 - 0
api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k-0613.yaml

@@ -31,3 +31,4 @@ pricing:
   output: '0.004'
   unit: '0.001'
   currency: USD
+deprecated: true

+ 2 - 2
api/core/model_runtime/model_providers/openai/llm/o1-mini-2024-09-12.yaml

@@ -11,9 +11,9 @@ model_properties:
 parameter_rules:
   - name: max_tokens
     use_template: max_tokens
-    default: 65563
+    default: 65536
     min: 1
-    max: 65563
+    max: 65536
   - name: response_format
     label:
       zh_Hans: 回复格式

+ 2 - 2
api/core/model_runtime/model_providers/openai/llm/o1-mini.yaml

@@ -11,9 +11,9 @@ model_properties:
 parameter_rules:
   - name: max_tokens
     use_template: max_tokens
-    default: 65563
+    default: 65536
     min: 1
-    max: 65563
+    max: 65536
   - name: response_format
     label:
       zh_Hans: 回复格式

文件差异内容过多而无法显示
+ 81 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml


+ 1 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml

@@ -79,3 +79,4 @@ pricing:
   output: '0.12'
   unit: '0.001'
   currency: RMB
+deprecated: true

文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml


+ 1 - 1
api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml

@@ -6,7 +6,7 @@ features:
   - agent-thought
 model_properties:
   mode: completion
-  context_size: 32768
+  context_size: 131072
 parameter_rules:
   - name: temperature
     use_template: temperature

文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml


+ 47 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml

@@ -0,0 +1,47 @@
+model: qwen-vl-max-0201
+label:
+  en_US: qwen-vl-max-0201
+model_type: llm
+features:
+  - vision
+  - agent-thought
+model_properties:
+  mode: chat
+  context_size: 8192
+parameter_rules:
+  - name: top_p
+    use_template: top_p
+    type: float
+    default: 0.8
+    min: 0.1
+    max: 0.9
+    help:
+      zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
+      en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
+  - name: top_k
+    type: int
+    min: 0
+    max: 99
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    help:
+      zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
+      en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
+  - name: seed
+    required: false
+    type: int
+    default: 1234
+    label:
+      zh_Hans: 随机种子
+      en_US: Random seed
+    help:
+      zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
+      en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
+  - name: response_format
+    use_template: response_format
+pricing:
+  input: '0.02'
+  output: '0.02'
+  unit: '0.001'
+  currency: RMB

+ 47 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml

@@ -0,0 +1,47 @@
+model: qwen-vl-max-0809
+label:
+  en_US: qwen-vl-max-0809
+model_type: llm
+features:
+  - vision
+  - agent-thought
+model_properties:
+  mode: chat
+  context_size: 32768
+parameter_rules:
+  - name: top_p
+    use_template: top_p
+    type: float
+    default: 0.8
+    min: 0.1
+    max: 0.9
+    help:
+      zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
+      en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
+  - name: top_k
+    type: int
+    min: 0
+    max: 99
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    help:
+      zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
+      en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
+  - name: seed
+    required: false
+    type: int
+    default: 1234
+    label:
+      zh_Hans: 随机种子
+      en_US: Random seed
+    help:
+      zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
+      en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
+  - name: response_format
+    use_template: response_format
+pricing:
+  input: '0.02'
+  output: '0.02'
+  unit: '0.001'
+  currency: RMB

+ 1 - 1
api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml

@@ -7,7 +7,7 @@ features:
   - agent-thought
 model_properties:
   mode: chat
-  context_size: 8192
+  context_size: 32768
 parameter_rules:
   - name: top_p
     use_template: top_p

+ 47 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml

@@ -0,0 +1,47 @@
+model: qwen-vl-plus-0809
+label:
+  en_US: qwen-vl-plus-0809
+model_type: llm
+features:
+  - vision
+  - agent-thought
+model_properties:
+  mode: chat
+  context_size: 32768
+parameter_rules:
+  - name: top_p
+    use_template: top_p
+    type: float
+    default: 0.8
+    min: 0.1
+    max: 0.9
+    help:
+      zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
+      en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
+  - name: top_k
+    type: int
+    min: 0
+    max: 99
+    label:
+      zh_Hans: 取样数量
+      en_US: Top k
+    help:
+      zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
+      en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
+  - name: seed
+    required: false
+    type: int
+    default: 1234
+    label:
+      zh_Hans: 随机种子
+      en_US: Random seed
+    help:
+      zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
+      en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
+  - name: response_format
+    use_template: response_format
+pricing:
+  input: '0.008'
+  output: '0.008'
+  unit: '0.001'
+  currency: RMB

+ 1 - 1
api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml

@@ -7,7 +7,7 @@ features:
   - agent-thought
 model_properties:
   mode: chat
-  context_size: 32768
+  context_size: 8192
 parameter_rules:
   - name: top_p
     use_template: top_p

文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml


文件差异内容过多而无法显示
+ 79 - 0
api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml


+ 2 - 2
api/core/model_runtime/model_providers/tongyi/tongyi.yaml

@@ -11,9 +11,9 @@ background: "#EFF1FE"
 help:
   title:
     en_US: Get your API key from AliCloud
-    zh_Hans: 从阿里云获取 API Key
+    zh_Hans: 从阿里云百炼获取 API Key
   url:
-    en_US: https://dashscope.console.aliyun.com/api-key_management
+    en_US: https://bailian.console.aliyun.com/?apiKey=1#/api-key
 supported_model_types:
   - llm
   - tts

+ 1 - 1
api/core/rag/retrieval/dataset_retrieval.py

@@ -426,7 +426,7 @@ class DatasetRetrieval:
                         retrieval_method=retrieval_model["search_method"],
                         dataset_id=dataset.id,
                         query=query,
-                        top_k=top_k,
+                        top_k=retrieval_model.get("top_k") or 2,
                         score_threshold=retrieval_model.get("score_threshold", 0.0)
                         if retrieval_model["score_threshold_enabled"]
                         else 0.0,

+ 8 - 0
api/core/tools/provider/builtin/brave/brave.yaml

@@ -29,3 +29,11 @@ credentials_for_provider:
       zh_Hans: 从 Brave 获取您的 Brave Search API key
       pt_BR: Get your Brave Search API key from Brave
     url: https://brave.com/search/api/
+  base_url:
+    type: text-input
+    required: false
+    label:
+      en_US: Brave server's Base URL
+      zh_Hans: Brave服务器的API URL
+    placeholder:
+      en_US: https://api.search.brave.com/res/v1/web/search

+ 20 - 5
api/core/tools/provider/builtin/brave/tools/brave_search.py

@@ -7,6 +7,8 @@ from pydantic import BaseModel, Field
 from core.tools.entities.tool_entities import ToolInvokeMessage
 from core.tools.tool.builtin_tool import BuiltinTool
 
+BRAVE_BASE_URL = "https://api.search.brave.com/res/v1/web/search"
+
 
 class BraveSearchWrapper(BaseModel):
     """Wrapper around the Brave search engine."""
@@ -15,8 +17,10 @@ class BraveSearchWrapper(BaseModel):
     """The API key to use for the Brave search engine."""
     search_kwargs: dict = Field(default_factory=dict)
     """Additional keyword arguments to pass to the search request."""
-    base_url: str = "https://api.search.brave.com/res/v1/web/search"
+    base_url: str = BRAVE_BASE_URL
     """The base URL for the Brave search engine."""
+    ensure_ascii: bool = True
+    """Ensure the JSON output is ASCII encoded."""
 
     def run(self, query: str) -> str:
         """Query the Brave search engine and return the results as a JSON string.
@@ -36,7 +40,7 @@ class BraveSearchWrapper(BaseModel):
             }
             for item in web_search_results
         ]
-        return json.dumps(final_results)
+        return json.dumps(final_results, ensure_ascii=self.ensure_ascii)
 
     def _search_request(self, query: str) -> list[dict]:
         headers = {
@@ -68,7 +72,9 @@ class BraveSearch(BaseModel):
     search_wrapper: BraveSearchWrapper
 
     @classmethod
-    def from_api_key(cls, api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any) -> "BraveSearch":
+    def from_api_key(
+        cls, api_key: str, base_url: str, search_kwargs: Optional[dict] = None, ensure_ascii: bool = True, **kwargs: Any
+    ) -> "BraveSearch":
         """Create a tool from an api key.
 
         Args:
@@ -79,7 +85,9 @@ class BraveSearch(BaseModel):
         Returns:
             A tool.
         """
-        wrapper = BraveSearchWrapper(api_key=api_key, search_kwargs=search_kwargs or {})
+        wrapper = BraveSearchWrapper(
+            api_key=api_key, base_url=base_url, search_kwargs=search_kwargs or {}, ensure_ascii=ensure_ascii
+        )
         return cls(search_wrapper=wrapper, **kwargs)
 
     def _run(
@@ -109,11 +117,18 @@ class BraveSearchTool(BuiltinTool):
         query = tool_parameters.get("query", "")
         count = tool_parameters.get("count", 3)
         api_key = self.runtime.credentials["brave_search_api_key"]
+        base_url = self.runtime.credentials.get("base_url", BRAVE_BASE_URL)
+        ensure_ascii = tool_parameters.get("ensure_ascii", True)
+
+        if len(base_url) == 0:
+            base_url = BRAVE_BASE_URL
 
         if not query:
             return self.create_text_message("Please input query")
 
-        tool = BraveSearch.from_api_key(api_key=api_key, search_kwargs={"count": count})
+        tool = BraveSearch.from_api_key(
+            api_key=api_key, base_url=base_url, search_kwargs={"count": count}, ensure_ascii=ensure_ascii
+        )
 
         results = tool._run(query)
 

+ 12 - 0
api/core/tools/provider/builtin/brave/tools/brave_search.yaml

@@ -39,3 +39,15 @@ parameters:
       pt_BR: O número de resultados de pesquisa a serem retornados, permitindo que os usuários controlem a amplitude de sua saída de pesquisa.
     llm_description: Specifies the amount of search results to be displayed, offering users the ability to adjust the scope of their search findings.
     form: llm
+  - name: ensure_ascii
+    type: boolean
+    default: true
+    label:
+      en_US: Ensure ASCII
+      zh_Hans: 确保 ASCII
+      pt_BR: Ensure ASCII
+    human_description:
+      en_US: Ensure the JSON output is ASCII encoded
+      zh_Hans: 确保输出的 JSON 是 ASCII 编码
+      pt_BR: Ensure the JSON output is ASCII encoded
+    form: form

二进制
api/core/tools/provider/builtin/comfyui/_assets/icon.png


+ 17 - 0
api/core/tools/provider/builtin/comfyui/comfyui.py

@@ -0,0 +1,17 @@
+from typing import Any
+
+from core.tools.errors import ToolProviderCredentialValidationError
+from core.tools.provider.builtin.comfyui.tools.comfyui_stable_diffusion import ComfyuiStableDiffusionTool
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+
+
+class ComfyUIProvider(BuiltinToolProviderController):
+    def _validate_credentials(self, credentials: dict[str, Any]) -> None:
+        try:
+            ComfyuiStableDiffusionTool().fork_tool_runtime(
+                runtime={
+                    "credentials": credentials,
+                }
+            ).validate_models()
+        except Exception as e:
+            raise ToolProviderCredentialValidationError(str(e))

+ 42 - 0
api/core/tools/provider/builtin/comfyui/comfyui.yaml

@@ -0,0 +1,42 @@
+identity:
+  author: Qun
+  name: comfyui
+  label:
+    en_US: ComfyUI
+    zh_Hans: ComfyUI
+    pt_BR: ComfyUI
+  description:
+    en_US: ComfyUI is a tool for generating images which can be deployed locally.
+    zh_Hans: ComfyUI 是一个可以在本地部署的图片生成的工具。
+    pt_BR: ComfyUI is a tool for generating images which can be deployed locally.
+  icon: icon.png
+  tags:
+    - image
+credentials_for_provider:
+  base_url:
+    type: text-input
+    required: true
+    label:
+      en_US: Base URL
+      zh_Hans: ComfyUI服务器的Base URL
+      pt_BR: Base URL
+    placeholder:
+      en_US: Please input your ComfyUI server's Base URL
+      zh_Hans: 请输入你的 ComfyUI 服务器的 Base URL
+      pt_BR: Please input your ComfyUI server's Base URL
+  model:
+    type: text-input
+    required: true
+    label:
+      en_US: Model with suffix
+      zh_Hans: 模型, 需要带后缀
+      pt_BR: Model with suffix
+    placeholder:
+      en_US: Please input your model
+      zh_Hans: 请输入你的模型名称
+      pt_BR: Please input your model
+    help:
+      en_US: The checkpoint name of the ComfyUI server, e.g. xxx.safetensors
+      zh_Hans: ComfyUI服务器的模型名称, 比如 xxx.safetensors
+      pt_BR: The checkpoint name of the ComfyUI server, e.g. xxx.safetensors
+    url: https://docs.dify.ai/tutorials/tool-configuration/comfyui

+ 475 - 0
api/core/tools/provider/builtin/comfyui/tools/comfyui_stable_diffusion.py

@@ -0,0 +1,475 @@
+import json
+import os
+import random
+import uuid
+from copy import deepcopy
+from enum import Enum
+from typing import Any, Union
+
+import websocket
+from httpx import get, post
+from yarl import URL
+
+from core.tools.entities.common_entities import I18nObject
+from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter, ToolParameterOption
+from core.tools.errors import ToolProviderCredentialValidationError
+from core.tools.tool.builtin_tool import BuiltinTool
+
+SD_TXT2IMG_OPTIONS = {}
+LORA_NODE = {
+    "inputs": {"lora_name": "", "strength_model": 1, "strength_clip": 1, "model": ["11", 0], "clip": ["11", 1]},
+    "class_type": "LoraLoader",
+    "_meta": {"title": "Load LoRA"},
+}
+FluxGuidanceNode = {
+    "inputs": {"guidance": 3.5, "conditioning": ["6", 0]},
+    "class_type": "FluxGuidance",
+    "_meta": {"title": "FluxGuidance"},
+}
+
+
+class ModelType(Enum):
+    SD15 = 1
+    SDXL = 2
+    SD3 = 3
+    FLUX = 4
+
+
+class ComfyuiStableDiffusionTool(BuiltinTool):
+    def _invoke(
+        self, user_id: str, tool_parameters: dict[str, Any]
+    ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+        """
+        invoke tools
+        """
+        # base url
+        base_url = self.runtime.credentials.get("base_url", "")
+        if not base_url:
+            return self.create_text_message("Please input base_url")
+
+        if tool_parameters.get("model"):
+            self.runtime.credentials["model"] = tool_parameters["model"]
+
+        model = self.runtime.credentials.get("model", None)
+        if not model:
+            return self.create_text_message("Please input model")
+
+        # prompt
+        prompt = tool_parameters.get("prompt", "")
+        if not prompt:
+            return self.create_text_message("Please input prompt")
+
+        # get negative prompt
+        negative_prompt = tool_parameters.get("negative_prompt", "")
+
+        # get size
+        width = tool_parameters.get("width", 1024)
+        height = tool_parameters.get("height", 1024)
+
+        # get steps
+        steps = tool_parameters.get("steps", 1)
+
+        # get sampler_name
+        sampler_name = tool_parameters.get("sampler_name", "euler")
+
+        # scheduler
+        scheduler = tool_parameters.get("scheduler", "normal")
+
+        # get cfg
+        cfg = tool_parameters.get("cfg", 7.0)
+
+        # get model type
+        model_type = tool_parameters.get("model_type", ModelType.SD15.name)
+
+        # get lora
+        # supports up to 3 loras
+        lora_list = []
+        lora_strength_list = []
+        if tool_parameters.get("lora_1"):
+            lora_list.append(tool_parameters["lora_1"])
+            lora_strength_list.append(tool_parameters.get("lora_strength_1", 1))
+        if tool_parameters.get("lora_2"):
+            lora_list.append(tool_parameters["lora_2"])
+            lora_strength_list.append(tool_parameters.get("lora_strength_2", 1))
+        if tool_parameters.get("lora_3"):
+            lora_list.append(tool_parameters["lora_3"])
+            lora_strength_list.append(tool_parameters.get("lora_strength_3", 1))
+
+        return self.text2img(
+            base_url=base_url,
+            model=model,
+            model_type=model_type,
+            prompt=prompt,
+            negative_prompt=negative_prompt,
+            width=width,
+            height=height,
+            steps=steps,
+            sampler_name=sampler_name,
+            scheduler=scheduler,
+            cfg=cfg,
+            lora_list=lora_list,
+            lora_strength_list=lora_strength_list,
+        )
+
+    def get_checkpoints(self) -> list[str]:
+        """
+        get checkpoints
+        """
+        try:
+            base_url = self.runtime.credentials.get("base_url", None)
+            if not base_url:
+                return []
+            api_url = str(URL(base_url) / "models" / "checkpoints")
+            response = get(url=api_url, timeout=(2, 10))
+            if response.status_code != 200:
+                return []
+            else:
+                return response.json()
+        except Exception as e:
+            return []
+
+    def get_loras(self) -> list[str]:
+        """
+        get loras
+        """
+        try:
+            base_url = self.runtime.credentials.get("base_url", None)
+            if not base_url:
+                return []
+            api_url = str(URL(base_url) / "models" / "loras")
+            response = get(url=api_url, timeout=(2, 10))
+            if response.status_code != 200:
+                return []
+            else:
+                return response.json()
+        except Exception as e:
+            return []
+
+    def get_sample_methods(self) -> tuple[list[str], list[str]]:
+        """
+        get sample method
+        """
+        try:
+            base_url = self.runtime.credentials.get("base_url", None)
+            if not base_url:
+                return [], []
+            api_url = str(URL(base_url) / "object_info" / "KSampler")
+            response = get(url=api_url, timeout=(2, 10))
+            if response.status_code != 200:
+                return [], []
+            else:
+                data = response.json()["KSampler"]["input"]["required"]
+                return data["sampler_name"][0], data["scheduler"][0]
+        except Exception as e:
+            return [], []
+
+    def validate_models(self) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+        """
+        validate models
+        """
+        try:
+            base_url = self.runtime.credentials.get("base_url", None)
+            if not base_url:
+                raise ToolProviderCredentialValidationError("Please input base_url")
+            model = self.runtime.credentials.get("model", None)
+            if not model:
+                raise ToolProviderCredentialValidationError("Please input model")
+
+            api_url = str(URL(base_url) / "models" / "checkpoints")
+            response = get(url=api_url, timeout=(2, 10))
+            if response.status_code != 200:
+                raise ToolProviderCredentialValidationError("Failed to get models")
+            else:
+                models = response.json()
+                if len([d for d in models if d == model]) > 0:
+                    return self.create_text_message(json.dumps(models))
+                else:
+                    raise ToolProviderCredentialValidationError(f"model {model} does not exist")
+        except Exception as e:
+            raise ToolProviderCredentialValidationError(f"Failed to get models, {e}")
+
+    def get_history(self, base_url, prompt_id):
+        """
+        get history
+        """
+        url = str(URL(base_url) / "history")
+        respond = get(url, params={"prompt_id": prompt_id}, timeout=(2, 10))
+        return respond.json()
+
+    def download_image(self, base_url, filename, subfolder, folder_type):
+        """
+        download image
+        """
+        url = str(URL(base_url) / "view")
+        response = get(url, params={"filename": filename, "subfolder": subfolder, "type": folder_type}, timeout=(2, 10))
+        return response.content
+
+    def queue_prompt_image(self, base_url, client_id, prompt):
+        """
+        send prompt task and rotate
+        """
+        # initiate task execution
+        url = str(URL(base_url) / "prompt")
+        respond = post(url, data=json.dumps({"client_id": client_id, "prompt": prompt}), timeout=(2, 10))
+        prompt_id = respond.json()["prompt_id"]
+
+        ws = websocket.WebSocket()
+        if "https" in base_url:
+            ws_url = base_url.replace("https", "ws")
+        else:
+            ws_url = base_url.replace("http", "ws")
+        ws.connect(str(URL(f"{ws_url}") / "ws") + f"?clientId={client_id}", timeout=120)
+
+        # websocket rotate execution status
+        output_images = {}
+        while True:
+            out = ws.recv()
+            if isinstance(out, str):
+                message = json.loads(out)
+                if message["type"] == "executing":
+                    data = message["data"]
+                    if data["node"] is None and data["prompt_id"] == prompt_id:
+                        break  # Execution is done
+                elif message["type"] == "status":
+                    data = message["data"]
+                    if data["status"]["exec_info"]["queue_remaining"] == 0 and data.get("sid"):
+                        break  # Execution is done
+            else:
+                continue  # previews are binary data
+
+        # download image when execution finished
+        history = self.get_history(base_url, prompt_id)[prompt_id]
+        for o in history["outputs"]:
+            for node_id in history["outputs"]:
+                node_output = history["outputs"][node_id]
+                if "images" in node_output:
+                    images_output = []
+                    for image in node_output["images"]:
+                        image_data = self.download_image(base_url, image["filename"], image["subfolder"], image["type"])
+                        images_output.append(image_data)
+                    output_images[node_id] = images_output
+
+        ws.close()
+
+        return output_images
+
+    def text2img(
+        self,
+        base_url: str,
+        model: str,
+        model_type: str,
+        prompt: str,
+        negative_prompt: str,
+        width: int,
+        height: int,
+        steps: int,
+        sampler_name: str,
+        scheduler: str,
+        cfg: float,
+        lora_list: list,
+        lora_strength_list: list,
+    ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+        """
+        generate image
+        """
+        if not SD_TXT2IMG_OPTIONS:
+            current_dir = os.path.dirname(os.path.realpath(__file__))
+            with open(os.path.join(current_dir, "txt2img.json")) as file:
+                SD_TXT2IMG_OPTIONS.update(json.load(file))
+
+        draw_options = deepcopy(SD_TXT2IMG_OPTIONS)
+        draw_options["3"]["inputs"]["steps"] = steps
+        draw_options["3"]["inputs"]["sampler_name"] = sampler_name
+        draw_options["3"]["inputs"]["scheduler"] = scheduler
+        draw_options["3"]["inputs"]["cfg"] = cfg
+        # generate different image when using same prompt next time
+        draw_options["3"]["inputs"]["seed"] = random.randint(0, 100000000)
+        draw_options["4"]["inputs"]["ckpt_name"] = model
+        draw_options["5"]["inputs"]["width"] = width
+        draw_options["5"]["inputs"]["height"] = height
+        draw_options["6"]["inputs"]["text"] = prompt
+        draw_options["7"]["inputs"]["text"] = negative_prompt
+        # if the model is SD3 or FLUX series, the Latent class should be corresponding to SD3 Latent
+        if model_type in {ModelType.SD3.name, ModelType.FLUX.name}:
+            draw_options["5"]["class_type"] = "EmptySD3LatentImage"
+
+        if lora_list:
+            # last Lora node link to KSampler node
+            draw_options["3"]["inputs"]["model"][0] = "10"
+            # last Lora node link to positive and negative Clip node
+            draw_options["6"]["inputs"]["clip"][0] = "10"
+            draw_options["7"]["inputs"]["clip"][0] = "10"
+            # every Lora node link to next Lora node, and Checkpoints node link to first Lora node
+            for i, (lora, strength) in enumerate(zip(lora_list, lora_strength_list), 10):
+                if i - 10 == len(lora_list) - 1:
+                    next_node_id = "4"
+                else:
+                    next_node_id = str(i + 1)
+                lora_node = deepcopy(LORA_NODE)
+                lora_node["inputs"]["lora_name"] = lora
+                lora_node["inputs"]["strength_model"] = strength
+                lora_node["inputs"]["strength_clip"] = strength
+                lora_node["inputs"]["model"][0] = next_node_id
+                lora_node["inputs"]["clip"][0] = next_node_id
+                draw_options[str(i)] = lora_node
+
+        # FLUX need to add FluxGuidance Node
+        if model_type == ModelType.FLUX.name:
+            last_node_id = str(10 + len(lora_list))
+            draw_options[last_node_id] = deepcopy(FluxGuidanceNode)
+            draw_options[last_node_id]["inputs"]["conditioning"][0] = "6"
+            draw_options["3"]["inputs"]["positive"][0] = last_node_id
+
+        try:
+            client_id = str(uuid.uuid4())
+            result = self.queue_prompt_image(base_url, client_id, prompt=draw_options)
+
+            # get first image
+            image = b""
+            for node in result:
+                for img in result[node]:
+                    if img:
+                        image = img
+                        break
+
+            return self.create_blob_message(
+                blob=image, meta={"mime_type": "image/png"}, save_as=self.VARIABLE_KEY.IMAGE.value
+            )
+
+        except Exception as e:
+            return self.create_text_message(f"Failed to generate image: {str(e)}")
+
+    def get_runtime_parameters(self) -> list[ToolParameter]:
+        parameters = [
+            ToolParameter(
+                name="prompt",
+                label=I18nObject(en_US="Prompt", zh_Hans="Prompt"),
+                human_description=I18nObject(
+                    en_US="Image prompt, you can check the official documentation of Stable Diffusion",
+                    zh_Hans="图像提示词,您可以查看 Stable Diffusion 的官方文档",
+                ),
+                type=ToolParameter.ToolParameterType.STRING,
+                form=ToolParameter.ToolParameterForm.LLM,
+                llm_description="Image prompt of Stable Diffusion, you should describe the image "
+                "you want to generate as a list of words as possible as detailed, "
+                "the prompt must be written in English.",
+                required=True,
+            ),
+        ]
+        if self.runtime.credentials:
+            try:
+                models = self.get_checkpoints()
+                if len(models) != 0:
+                    parameters.append(
+                        ToolParameter(
+                            name="model",
+                            label=I18nObject(en_US="Model", zh_Hans="Model"),
+                            human_description=I18nObject(
+                                en_US="Model of Stable Diffusion or FLUX, "
+                                "you can check the official documentation of Stable Diffusion or FLUX",
+                                zh_Hans="Stable Diffusion 或者 FLUX 的模型,您可以查看 Stable Diffusion 的官方文档",
+                            ),
+                            type=ToolParameter.ToolParameterType.SELECT,
+                            form=ToolParameter.ToolParameterForm.FORM,
+                            llm_description="Model of Stable Diffusion or FLUX, "
+                            "you can check the official documentation of Stable Diffusion or FLUX",
+                            required=True,
+                            default=models[0],
+                            options=[
+                                ToolParameterOption(value=i, label=I18nObject(en_US=i, zh_Hans=i)) for i in models
+                            ],
+                        )
+                    )
+                loras = self.get_loras()
+                if len(loras) != 0:
+                    for n in range(1, 4):
+                        parameters.append(
+                            ToolParameter(
+                                name=f"lora_{n}",
+                                label=I18nObject(en_US=f"Lora {n}", zh_Hans=f"Lora {n}"),
+                                human_description=I18nObject(
+                                    en_US="Lora of Stable Diffusion, "
+                                    "you can check the official documentation of Stable Diffusion",
+                                    zh_Hans="Stable Diffusion 的 Lora 模型,您可以查看 Stable Diffusion 的官方文档",
+                                ),
+                                type=ToolParameter.ToolParameterType.SELECT,
+                                form=ToolParameter.ToolParameterForm.FORM,
+                                llm_description="Lora of Stable Diffusion, "
+                                "you can check the official documentation of "
+                                "Stable Diffusion",
+                                required=False,
+                                options=[
+                                    ToolParameterOption(value=i, label=I18nObject(en_US=i, zh_Hans=i)) for i in loras
+                                ],
+                            )
+                        )
+                sample_methods, schedulers = self.get_sample_methods()
+                if len(sample_methods) != 0:
+                    parameters.append(
+                        ToolParameter(
+                            name="sampler_name",
+                            label=I18nObject(en_US="Sampling method", zh_Hans="Sampling method"),
+                            human_description=I18nObject(
+                                en_US="Sampling method of Stable Diffusion, "
+                                "you can check the official documentation of Stable Diffusion",
+                                zh_Hans="Stable Diffusion 的Sampling method,您可以查看 Stable Diffusion 的官方文档",
+                            ),
+                            type=ToolParameter.ToolParameterType.SELECT,
+                            form=ToolParameter.ToolParameterForm.FORM,
+                            llm_description="Sampling method of Stable Diffusion, "
+                            "you can check the official documentation of Stable Diffusion",
+                            required=True,
+                            default=sample_methods[0],
+                            options=[
+                                ToolParameterOption(value=i, label=I18nObject(en_US=i, zh_Hans=i))
+                                for i in sample_methods
+                            ],
+                        )
+                    )
+                if len(schedulers) != 0:
+                    parameters.append(
+                        ToolParameter(
+                            name="scheduler",
+                            label=I18nObject(en_US="Scheduler", zh_Hans="Scheduler"),
+                            human_description=I18nObject(
+                                en_US="Scheduler of Stable Diffusion, "
+                                "you can check the official documentation of Stable Diffusion",
+                                zh_Hans="Stable Diffusion 的Scheduler,您可以查看 Stable Diffusion 的官方文档",
+                            ),
+                            type=ToolParameter.ToolParameterType.SELECT,
+                            form=ToolParameter.ToolParameterForm.FORM,
+                            llm_description="Scheduler of Stable Diffusion, "
+                            "you can check the official documentation of Stable Diffusion",
+                            required=True,
+                            default=schedulers[0],
+                            options=[
+                                ToolParameterOption(value=i, label=I18nObject(en_US=i, zh_Hans=i)) for i in schedulers
+                            ],
+                        )
+                    )
+                parameters.append(
+                    ToolParameter(
+                        name="model_type",
+                        label=I18nObject(en_US="Model Type", zh_Hans="Model Type"),
+                        human_description=I18nObject(
+                            en_US="Model Type of Stable Diffusion or Flux, "
+                            "you can check the official documentation of Stable Diffusion or Flux",
+                            zh_Hans="Stable Diffusion 或 FLUX 的模型类型,"
+                            "您可以查看 Stable Diffusion 或 Flux 的官方文档",
+                        ),
+                        type=ToolParameter.ToolParameterType.SELECT,
+                        form=ToolParameter.ToolParameterForm.FORM,
+                        llm_description="Model Type of Stable Diffusion or Flux, "
+                        "you can check the official documentation of Stable Diffusion or Flux",
+                        required=True,
+                        default=ModelType.SD15.name,
+                        options=[
+                            ToolParameterOption(value=i, label=I18nObject(en_US=i, zh_Hans=i))
+                            for i in ModelType.__members__
+                        ],
+                    )
+                )
+            except:
+                pass
+
+        return parameters

+ 212 - 0
api/core/tools/provider/builtin/comfyui/tools/comfyui_stable_diffusion.yaml

@@ -0,0 +1,212 @@
+identity:
+  name: txt2img workflow
+  author: Qun
+  label:
+    en_US: Txt2Img Workflow
+    zh_Hans: Txt2Img Workflow
+    pt_BR: Txt2Img Workflow
+description:
+  human:
+    en_US: a pre-defined comfyui workflow that can use one model and up to 3 loras to generate images. Support SD1.5, SDXL, SD3 and FLUX which contain text encoders/clip, but does not support models that requires a triple clip loader.
+    zh_Hans: 一个预定义的 ComfyUI 工作流,可以使用一个模型和最多3个loras来生成图像。支持包含文本编码器/clip的SD1.5、SDXL、SD3和FLUX,但不支持需要clip加载器的模型。
+    pt_BR: a pre-defined comfyui workflow that can use one model and up to 3 loras to generate images. Support SD1.5, SDXL, SD3 and FLUX which contain text encoders/clip, but does not support models that requires a triple clip loader.
+  llm: draw the image you want based on your prompt.
+parameters:
+  - name: prompt
+    type: string
+    required: true
+    label:
+      en_US: Prompt
+      zh_Hans: 提示词
+      pt_BR: Prompt
+    human_description:
+      en_US: Image prompt, you can check the official documentation of Stable Diffusion or FLUX
+      zh_Hans: 图像提示词,您可以查看 Stable Diffusion 或者 FLUX 的官方文档
+      pt_BR: Image prompt, you can check the official documentation of Stable Diffusion or FLUX
+    llm_description: Image prompt of Stable Diffusion, you should describe the image you want to generate as a list of words as possible as detailed, the prompt must be written in English.
+    form: llm
+  - name: model
+    type: string
+    required: true
+    label:
+      en_US: Model Name
+      zh_Hans: 模型名称
+      pt_BR: Model Name
+    human_description:
+      en_US: Model Name
+      zh_Hans: 模型名称
+      pt_BR: Model Name
+    form: form
+  - name: model_type
+    type: string
+    required: true
+    label:
+      en_US: Model Type
+      zh_Hans: 模型类型
+      pt_BR: Model Type
+    human_description:
+      en_US: Model Type
+      zh_Hans: 模型类型
+      pt_BR: Model Type
+    form: form
+  - name: lora_1
+    type: string
+    required: false
+    label:
+      en_US: Lora 1
+      zh_Hans: Lora 1
+      pt_BR: Lora 1
+    human_description:
+      en_US: Lora 1
+      zh_Hans: Lora 1
+      pt_BR: Lora 1
+    form: form
+  - name: lora_strength_1
+    type: number
+    required: false
+    label:
+      en_US: Lora Strength 1
+      zh_Hans: Lora Strength 1
+      pt_BR: Lora Strength 1
+    human_description:
+      en_US: Lora Strength 1
+      zh_Hans: Lora模型的权重
+      pt_BR: Lora Strength 1
+    form: form
+  - name: steps
+    type: number
+    required: false
+    label:
+      en_US: Steps
+      zh_Hans: Steps
+      pt_BR: Steps
+    human_description:
+      en_US: Steps
+      zh_Hans: Steps
+      pt_BR: Steps
+    form: form
+    default: 20
+  - name: width
+    type: number
+    required: false
+    label:
+      en_US: Width
+      zh_Hans: Width
+      pt_BR: Width
+    human_description:
+      en_US: Width
+      zh_Hans: Width
+      pt_BR: Width
+    form: form
+    default: 1024
+  - name: height
+    type: number
+    required: false
+    label:
+      en_US: Height
+      zh_Hans: Height
+      pt_BR: Height
+    human_description:
+      en_US: Height
+      zh_Hans: Height
+      pt_BR: Height
+    form: form
+    default: 1024
+  - name: negative_prompt
+    type: string
+    required: false
+    label:
+      en_US: Negative prompt
+      zh_Hans: Negative prompt
+      pt_BR: Negative prompt
+    human_description:
+      en_US: Negative prompt
+      zh_Hans: Negative prompt
+      pt_BR: Negative prompt
+    form: form
+    default: bad art, ugly, deformed, watermark, duplicated, discontinuous lines
+  - name: cfg
+    type: number
+    required: false
+    label:
+      en_US: CFG Scale
+      zh_Hans: CFG Scale
+      pt_BR: CFG Scale
+    human_description:
+      en_US: CFG Scale
+      zh_Hans: 提示词相关性(CFG Scale)
+      pt_BR: CFG Scale
+    form: form
+    default: 7.0
+  - name: sampler_name
+    type: string
+    required: false
+    label:
+      en_US: Sampling method
+      zh_Hans: Sampling method
+      pt_BR: Sampling method
+    human_description:
+      en_US: Sampling method
+      zh_Hans: Sampling method
+      pt_BR: Sampling method
+    form: form
+  - name: scheduler
+    type: string
+    required: false
+    label:
+      en_US: Scheduler
+      zh_Hans: Scheduler
+      pt_BR: Scheduler
+    human_description:
+      en_US: Scheduler
+      zh_Hans: Scheduler
+      pt_BR: Scheduler
+    form: form
+  - name: lora_2
+    type: string
+    required: false
+    label:
+      en_US: Lora 2
+      zh_Hans: Lora 2
+      pt_BR: Lora 2
+    human_description:
+      en_US: Lora 2
+      zh_Hans: Lora 2
+      pt_BR: Lora 2
+    form: form
+  - name: lora_strength_2
+    type: number
+    required: false
+    label:
+      en_US: Lora Strength 2
+      zh_Hans: Lora Strength 2
+      pt_BR: Lora Strength 2
+    human_description:
+      en_US: Lora Strength 2
+      zh_Hans: Lora模型的权重
+      pt_BR: Lora Strength 2
+    form: form
+  - name: lora_3
+    type: string
+    required: false
+    label:
+      en_US: Lora 3
+      zh_Hans: Lora 3
+      pt_BR: Lora 3
+    human_description:
+      en_US: Lora 3
+      zh_Hans: Lora 3
+      pt_BR: Lora 3
+    form: form
+  - name: lora_strength_3
+    type: number
+    required: false
+    label:
+      en_US: Lora Strength 3
+      zh_Hans: Lora Strength 3
+      pt_BR: Lora Strength 3
+    human_description:
+      en_US: Lora Strength 3
+      zh_Hans: Lora模型的权重
+      pt_BR: Lora Strength 3
+    form: form

+ 107 - 0
api/core/tools/provider/builtin/comfyui/tools/txt2img.json

@@ -0,0 +1,107 @@
+{
+  "3": {
+    "inputs": {
+      "seed": 156680208700286,
+      "steps": 20,
+      "cfg": 8,
+      "sampler_name": "euler",
+      "scheduler": "normal",
+      "denoise": 1,
+      "model": [
+        "4",
+        0
+      ],
+      "positive": [
+        "6",
+        0
+      ],
+      "negative": [
+        "7",
+        0
+      ],
+      "latent_image": [
+        "5",
+        0
+      ]
+    },
+    "class_type": "KSampler",
+    "_meta": {
+      "title": "KSampler"
+    }
+  },
+  "4": {
+    "inputs": {
+      "ckpt_name": "3dAnimationDiffusion_v10.safetensors"
+    },
+    "class_type": "CheckpointLoaderSimple",
+    "_meta": {
+      "title": "Load Checkpoint"
+    }
+  },
+  "5": {
+    "inputs": {
+      "width": 512,
+      "height": 512,
+      "batch_size": 1
+    },
+    "class_type": "EmptyLatentImage",
+    "_meta": {
+      "title": "Empty Latent Image"
+    }
+  },
+  "6": {
+    "inputs": {
+      "text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
+      "clip": [
+        "4",
+        1
+      ]
+    },
+    "class_type": "CLIPTextEncode",
+    "_meta": {
+      "title": "CLIP Text Encode (Prompt)"
+    }
+  },
+  "7": {
+    "inputs": {
+      "text": "text, watermark",
+      "clip": [
+        "4",
+        1
+      ]
+    },
+    "class_type": "CLIPTextEncode",
+    "_meta": {
+      "title": "CLIP Text Encode (Prompt)"
+    }
+  },
+  "8": {
+    "inputs": {
+      "samples": [
+        "3",
+        0
+      ],
+      "vae": [
+        "4",
+        2
+      ]
+    },
+    "class_type": "VAEDecode",
+    "_meta": {
+      "title": "VAE Decode"
+    }
+  },
+  "9": {
+    "inputs": {
+      "filename_prefix": "ComfyUI",
+      "images": [
+        "8",
+        0
+      ]
+    },
+    "class_type": "SaveImage",
+    "_meta": {
+      "title": "Save Image"
+    }
+  }
+}

+ 3 - 1
api/core/tools/provider/builtin/feishu_document/tools/get_document_raw_content.py

@@ -12,6 +12,8 @@ class GetDocumentRawContentTool(BuiltinTool):
         client = FeishuRequest(app_id, app_secret)
 
         document_id = tool_parameters.get("document_id")
+        mode = tool_parameters.get("mode")
+        lang = tool_parameters.get("lang", 0)
 
-        res = client.get_document_raw_content(document_id)
+        res = client.get_document_content(document_id, mode, lang)
         return self.create_json_message(res)

+ 49 - 0
api/core/tools/provider/builtin/feishu_document/tools/get_document_content.yaml

@@ -0,0 +1,49 @@
+identity:
+  name: get_document_content
+  author: Doug Lea
+  label:
+    en_US: Get Document Content
+    zh_Hans: 获取飞书云文档的内容
+description:
+  human:
+    en_US: Get document content
+    zh_Hans: 获取飞书云文档的内容
+  llm: A tool for retrieving content from Feishu cloud documents.
+parameters:
+  - name: document_id
+    type: string
+    required: true
+    label:
+      en_US: document_id
+      zh_Hans: 飞书文档的唯一标识
+    human_description:
+      en_US: Unique identifier for a Feishu document. You can also input the document's URL.
+      zh_Hans: 飞书文档的唯一标识,支持输入文档的 URL。
+    llm_description: 飞书文档的唯一标识,支持输入文档的 URL。
+    form: llm
+
+  - name: mode
+    type: string
+    required: false
+    label:
+      en_US: mode
+      zh_Hans: 文档返回格式
+    human_description:
+      en_US: Format of the document return, optional values are text, markdown, can be empty, default is markdown.
+      zh_Hans: 文档返回格式,可选值有 text、markdown,可以为空,默认值为 markdown。
+    llm_description: 文档返回格式,可选值有 text、markdown,可以为空,默认值为 markdown。
+    form: llm
+
+  - name: lang
+    type: number
+    required: false
+    default: 0
+    label:
+      en_US: lang
+      zh_Hans: 指定@用户的语言
+    human_description:
+      en_US: |
+        Specifies the language for MentionUser, optional values are [0, 1]. 0: User's default name, 1: User's English name, default is 0.
+      zh_Hans: 指定返回的 MentionUser,即 @用户 的语言,可选值有 [0,1]。0:该用户的默认名称,1:该用户的英文名称,默认值为 0。
+    llm_description: 指定返回的 MentionUser,即 @用户 的语言,可选值有 [0,1]。0:该用户的默认名称,1:该用户的英文名称,默认值为 0。
+    form: llm

+ 0 - 23
api/core/tools/provider/builtin/feishu_document/tools/get_document_raw_content.yaml

@@ -1,23 +0,0 @@
-identity:
-  name: get_document_raw_content
-  author: Doug Lea
-  label:
-    en_US: Get Document Raw Content
-    zh_Hans: 获取文档纯文本内容
-description:
-  human:
-    en_US: Get document raw content
-    zh_Hans: 获取文档纯文本内容
-  llm: A tool for getting the plain text content of Feishu documents
-parameters:
-  - name: document_id
-    type: string
-    required: true
-    label:
-      en_US: document_id
-      zh_Hans: 飞书文档的唯一标识
-    human_description:
-      en_US: Unique ID of Feishu document document_id
-      zh_Hans: 飞书文档的唯一标识 document_id
-    llm_description: 飞书文档的唯一标识 document_id
-    form: llm

+ 0 - 48
api/core/tools/provider/builtin/feishu_document/tools/list_document_block.yaml

@@ -1,48 +0,0 @@
-identity:
-  name: list_document_block
-  author: Doug Lea
-  label:
-    en_US: List Document Block
-    zh_Hans: 获取飞书文档所有块
-description:
-  human:
-    en_US: List document block
-    zh_Hans: 获取飞书文档所有块的富文本内容并分页返回。
-  llm: A tool to get all blocks of Feishu documents
-parameters:
-  - name: document_id
-    type: string
-    required: true
-    label:
-      en_US: document_id
-      zh_Hans: 飞书文档的唯一标识
-    human_description:
-      en_US: Unique ID of Feishu document document_id
-      zh_Hans: 飞书文档的唯一标识 document_id
-    llm_description: 飞书文档的唯一标识 document_id
-    form: llm
-
-  - name: page_size
-    type: number
-    required: false
-    default: 500
-    label:
-      en_US: page_size
-      zh_Hans: 分页大小
-    human_description:
-      en_US: Paging size, the default and maximum value is 500.
-      zh_Hans: 分页大小, 默认值和最大值为 500。
-    llm_description: 分页大小, 表示一次请求最多返回多少条数据,默认值和最大值为 500。
-    form: llm
-
-  - name: page_token
-    type: string
-    required: false
-    label:
-      en_US: page_token
-      zh_Hans: 分页标记
-    human_description:
-      en_US: Pagination tag, used to paginate query results so that more items can be obtained in the next traversal.
-      zh_Hans: 分页标记,用于分页查询结果,以便下次遍历时获取更多项。
-    llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
-    form: llm

+ 1 - 1
api/core/tools/provider/builtin/feishu_document/tools/list_document_block.py

@@ -15,5 +15,5 @@ class ListDocumentBlockTool(BuiltinTool):
         page_size = tool_parameters.get("page_size", 500)
         page_token = tool_parameters.get("page_token", "")
 
-        res = client.list_document_block(document_id, page_token, page_size)
+        res = client.list_document_blocks(document_id, page_token, page_size)
         return self.create_json_message(res)

+ 74 - 0
api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.yaml

@@ -0,0 +1,74 @@
+identity:
+  name: list_document_blocks
+  author: Doug Lea
+  label:
+    en_US: List Document Blocks
+    zh_Hans: 获取飞书文档所有块
+description:
+  human:
+    en_US: List document blocks
+    zh_Hans: 获取飞书文档所有块的富文本内容并分页返回
+  llm: A tool to get all blocks of Feishu documents
+parameters:
+  - name: document_id
+    type: string
+    required: true
+    label:
+      en_US: document_id
+      zh_Hans: 飞书文档的唯一标识
+    human_description:
+      en_US: Unique identifier for a Feishu document. You can also input the document's URL.
+      zh_Hans: 飞书文档的唯一标识,支持输入文档的 URL。
+    llm_description: 飞书文档的唯一标识,支持输入文档的 URL。
+    form: llm
+
+  - name: user_id_type
+    type: select
+    required: false
+    options:
+      - value: open_id
+        label:
+          en_US: open_id
+          zh_Hans: open_id
+      - value: union_id
+        label:
+          en_US: union_id
+          zh_Hans: union_id
+      - value: user_id
+        label:
+          en_US: user_id
+          zh_Hans: user_id
+    default: "open_id"
+    label:
+      en_US: user_id_type
+      zh_Hans: 用户 ID 类型
+    human_description:
+      en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id.
+      zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+    llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+    form: llm
+
+  - name: page_size
+    type: number
+    required: false
+    default: "500"
+    label:
+      en_US: page_size
+      zh_Hans: 分页大小
+    human_description:
+      en_US: Paging size, the default and maximum value is 500.
+      zh_Hans: 分页大小, 默认值和最大值为 500。
+    llm_description: 分页大小, 表示一次请求最多返回多少条数据,默认值和最大值为 500。
+    form: llm
+
+  - name: page_token
+    type: string
+    required: false
+    label:
+      en_US: page_token
+      zh_Hans: 分页标记
+    human_description:
+      en_US: Pagination token used to navigate through query results, allowing retrieval of additional items in subsequent requests.
+      zh_Hans: 分页标记,用于分页查询结果,以便下次遍历时获取更多项。
+    llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
+    form: llm

+ 18 - 15
api/core/tools/provider/builtin/feishu_document/tools/write_document.yaml

@@ -17,33 +17,35 @@ parameters:
       en_US: document_id
       zh_Hans: 飞书文档的唯一标识
     human_description:
-      en_US: Unique ID of Feishu document document_id
-      zh_Hans: 飞书文档的唯一标识 document_id
-    llm_description: 飞书文档的唯一标识 document_id
+      en_US: Unique identifier for a Feishu document. You can also input the document's URL.
+      zh_Hans: 飞书文档的唯一标识,支持输入文档的 URL。
+    llm_description: 飞书文档的唯一标识,支持输入文档的 URL。
     form: llm
 
   - name: content
     type: string
     required: true
     label:
-      en_US: document content
-      zh_Hans: 文档内容
+      en_US: Plain text or Markdown content
+      zh_Hans: 纯文本或 Markdown 内容
     human_description:
-      en_US: Document content, supports markdown syntax, can be empty.
-      zh_Hans: 文档内容,支持 markdown 语法,可以为空
-    llm_description:
+      en_US: Plain text or Markdown content. Note that embedded tables in the document should not have merged cells.
+      zh_Hans: 纯文本或 Markdown 内容。注意文档的内嵌套表格不允许有单元格合并
+    llm_description: 纯文本或 Markdown 内容,注意文档的内嵌套表格不允许有单元格合并。
     form: llm
 
   - name: position
-    type: select
-    required: true
-    default: start
+    type: string
+    required: false
     label:
-      en_US: Choose where to add content
-      zh_Hans: 选择添加内容的位置
+      en_US: position
+      zh_Hans: 添加位置
     human_description:
-      en_US: Please fill in start or end to add content at the beginning or end of the document respectively.
-      zh_Hans: 请填入 start 或 end, 分别表示在文档开头(start)或结尾(end)添加内容。
+      en_US: |
+        Enumeration values: start or end. Use 'start' to add content at the beginning of the document, and 'end' to add content at the end. The default value is 'end'.
+      zh_Hans: 枚举值:start 或 end。使用 'start' 在文档开头添加内容,使用 'end' 在文档结尾添加内容,默认值为 'end'。
+    llm_description: |
+      枚举值 start、end,start: 在文档开头添加内容;end: 在文档结尾添加内容,默认值为 end。
     form: llm
     options:
       - value: start
@@ -54,3 +56,4 @@ parameters:
         label:
           en_US: end
           zh_Hans: 在文档结尾添加内容
+    default: start

+ 9 - 2
api/core/tools/provider/builtin/siliconflow/tools/flux.py

@@ -5,7 +5,10 @@ import requests
 from core.tools.entities.tool_entities import ToolInvokeMessage
 from core.tools.tool.builtin_tool import BuiltinTool
 
-FLUX_URL = "https://api.siliconflow.cn/v1/black-forest-labs/FLUX.1-schnell/text-to-image"
+FLUX_URL = {
+    "schnell": "https://api.siliconflow.cn/v1/black-forest-labs/FLUX.1-schnell/text-to-image",
+    "dev": "https://api.siliconflow.cn/v1/image/generations",
+}
 
 
 class FluxTool(BuiltinTool):
@@ -24,8 +27,12 @@ class FluxTool(BuiltinTool):
             "seed": tool_parameters.get("seed"),
             "num_inference_steps": tool_parameters.get("num_inference_steps", 20),
         }
+        model = tool_parameters.get("model", "schnell")
+        url = FLUX_URL.get(model)
+        if model == "dev":
+            payload["model"] = "black-forest-labs/FLUX.1-dev"
 
-        response = requests.post(FLUX_URL, json=payload, headers=headers)
+        response = requests.post(url, json=payload, headers=headers)
         if response.status_code != 200:
             return self.create_text_message(f"Got Error Response:{response.text}")
 

+ 18 - 3
api/core/tools/provider/builtin/siliconflow/tools/flux.yaml

@@ -6,8 +6,8 @@ identity:
   icon: icon.svg
 description:
   human:
-    en_US: Generate image via SiliconFlow's flux schnell.
-  llm: This tool is used to generate image from prompt via SiliconFlow's flux schnell model.
+    en_US: Generate image via SiliconFlow's flux model.
+  llm: This tool is used to generate image from prompt via SiliconFlow's flux model.
 parameters:
   - name: prompt
     type: string
@@ -17,9 +17,24 @@ parameters:
       zh_Hans: 提示词
     human_description:
       en_US: The text prompt used to generate the image.
-      zh_Hans: 用于生成图片的文字提示词
+      zh_Hans: 建议用英文的生成图片提示词以获得更好的生成效果。
     llm_description: this prompt text will be used to generate image.
     form: llm
+  - name: model
+    type: select
+    required: true
+    options:
+      - value: schnell
+        label:
+          en_US: Flux.1-schnell
+      - value: dev
+        label:
+          en_US: Flux.1-dev
+    default: schnell
+    label:
+      en_US: Choose Image Model
+      zh_Hans: 选择生成图片的模型
+    form: form
   - name: image_size
     type: select
     required: true

+ 1 - 1
api/core/tools/provider/tool_provider.py

@@ -67,7 +67,7 @@ class ToolProviderController(BaseModel, ABC):
 
             # check type
             credential_schema = credentials_need_to_validate[credential_name]
-            if credential_schema in {ProviderConfig.Type.SECRET_INPUT, ProviderConfig.Type.TEXT_INPUT}:
+            if credential_schema.type in {ProviderConfig.Type.SECRET_INPUT, ProviderConfig.Type.TEXT_INPUT}:
                 if not isinstance(credentials[credential_name], str):
                     raise ToolProviderCredentialValidationError(f"credential {credential_name} should be string")
 

+ 5 - 2
api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py

@@ -165,7 +165,10 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool):
             if dataset.indexing_technique == "economy":
                 # use keyword table query
                 documents = RetrievalService.retrieve(
-                    retrieval_method="keyword_search", dataset_id=dataset.id, query=query, top_k=self.top_k
+                    retrieval_method="keyword_search",
+                    dataset_id=dataset.id,
+                    query=query,
+                    top_k=retrieval_model.get("top_k") or 2,
                 )
                 if documents:
                     all_documents.extend(documents)
@@ -176,7 +179,7 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool):
                         retrieval_method=retrieval_model["search_method"],
                         dataset_id=dataset.id,
                         query=query,
-                        top_k=self.top_k,
+                        top_k=retrieval_model.get("top_k") or 2,
                         score_threshold=retrieval_model.get("score_threshold", 0.0)
                         if retrieval_model["score_threshold_enabled"]
                         else 0.0,

+ 7 - 5
api/core/tools/utils/feishu_api_utils.py

@@ -76,9 +76,9 @@ class FeishuRequest:
         url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/document/write_document"
         payload = {"document_id": document_id, "content": content, "position": position}
         res = self._send_request(url, payload=payload)
-        return res.get("data")
+        return res
 
-    def get_document_raw_content(self, document_id: str) -> dict:
+    def get_document_content(self, document_id: str, mode: str, lang: int = 0) -> dict:
         """
         API url: https://open.larkoffice.com/document/server-docs/docs/docs/docx-v1/document/raw_content
         Example Response:
@@ -92,16 +92,18 @@ class FeishuRequest:
         """  # noqa: E501
         params = {
             "document_id": document_id,
+            "mode": mode,
+            "lang": lang,
         }
-        url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/document/get_document_raw_content"
+        url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/document/get_document_content"
         res = self._send_request(url, method="get", params=params)
         return res.get("data").get("content")
 
-    def list_document_block(self, document_id: str, page_token: str, page_size: int = 500) -> dict:
+    def list_document_blocks(self, document_id: str, page_token: str, page_size: int = 500) -> dict:
         """
         API url: https://open.larkoffice.com/document/server-docs/docs/docs/docx-v1/document/list
         """
-        url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/document/list_document_block"
+        url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/document/list_document_blocks"
         params = {
             "document_id": document_id,
             "page_size": page_size,

+ 2 - 14
api/core/workflow/graph_engine/entities/graph.py

@@ -689,23 +689,11 @@ class Graph(BaseModel):
 
                     parallel_start_node_ids[graph_edge.source_node_id].append(branch_node_id)
 
-        parallel_start_node_id = None
-        for p_start_node_id, branch_node_ids in parallel_start_node_ids.items():
+        for _, branch_node_ids in parallel_start_node_ids.items():
             if set(branch_node_ids) == set(routes_node_ids.keys()):
-                parallel_start_node_id = p_start_node_id
                 return True
 
-        if not parallel_start_node_id:
-            raise Exception("Parallel start node id not found")
-
-        for graph_edge in reverse_edge_mapping[start_node_id]:
-            if (
-                graph_edge.source_node_id not in all_routes_node_ids
-                or graph_edge.source_node_id != parallel_start_node_id
-            ):
-                return False
-
-        return True
+        return False
 
     @classmethod
     def _is_node2_after_node1(cls, node1_id: str, node2_id: str, edge_mapping: dict[str, list[GraphEdge]]) -> bool:

+ 17 - 12
api/core/workflow/graph_engine/graph_engine.py

@@ -61,6 +61,9 @@ class GraphEngineThreadPool(ThreadPoolExecutor):
 
         return super().submit(fn, *args, **kwargs)
 
+    def task_done_callback(self, future):
+        self.submit_count -= 1
+
     def check_is_full(self) -> None:
         print(f"submit_count: {self.submit_count}, max_submit_count: {self.max_submit_count}")
         if self.submit_count > self.max_submit_count:
@@ -426,20 +429,22 @@ class GraphEngine:
             ):
                 continue
 
-            futures.append(
-                self.thread_pool.submit(
-                    self._run_parallel_node,
-                    **{
-                        "flask_app": current_app._get_current_object(),  # type: ignore[attr-defined]
-                        "q": q,
-                        "parallel_id": parallel_id,
-                        "parallel_start_node_id": edge.target_node_id,
-                        "parent_parallel_id": in_parallel_id,
-                        "parent_parallel_start_node_id": parallel_start_node_id,
-                    },
-                )
+            future = self.thread_pool.submit(
+                self._run_parallel_node,
+                **{
+                    "flask_app": current_app._get_current_object(),  # type: ignore[attr-defined]
+                    "q": q,
+                    "parallel_id": parallel_id,
+                    "parallel_start_node_id": edge.target_node_id,
+                    "parent_parallel_id": in_parallel_id,
+                    "parent_parallel_start_node_id": parallel_start_node_id,
+                },
             )
 
+            future.add_done_callback(self.thread_pool.task_done_callback)
+
+            futures.append(future)
+
         succeeded_count = 0
         while True:
             try:

+ 1 - 0
api/core/workflow/nodes/answer/answer_stream_generate_router.py

@@ -152,6 +152,7 @@ class AnswerStreamGeneratorRouter:
                 NodeType.ANSWER.value,
                 NodeType.IF_ELSE.value,
                 NodeType.QUESTION_CLASSIFIER.value,
+                NodeType.ITERATION.value,
             }:
                 answer_dependencies[answer_node_id].append(source_node_id)
             else:

+ 82 - 117
api/core/workflow/nodes/iteration/iteration_node.py

@@ -20,11 +20,9 @@ from core.workflow.graph_engine.entities.event import (
     NodeRunSucceededEvent,
 )
 from core.workflow.graph_engine.entities.graph import Graph
-from core.workflow.graph_engine.entities.run_condition import RunCondition
 from core.workflow.nodes.base_node import BaseNode
 from core.workflow.nodes.event import RunCompletedEvent, RunEvent
 from core.workflow.nodes.iteration.entities import IterationNodeData
-from core.workflow.utils.condition.entities import Condition
 from models.workflow import WorkflowNodeExecutionStatus
 
 logger = logging.getLogger(__name__)
@@ -68,38 +66,6 @@ class IterationNode(BaseNode):
         if not iteration_graph:
             raise ValueError("iteration graph not found")
 
-        leaf_node_ids = iteration_graph.get_leaf_node_ids()
-        iteration_leaf_node_ids = []
-        for leaf_node_id in leaf_node_ids:
-            node_config = iteration_graph.node_id_config_mapping.get(leaf_node_id)
-            if not node_config:
-                continue
-
-            leaf_node_iteration_id = node_config.get("data", {}).get("iteration_id")
-            if not leaf_node_iteration_id:
-                continue
-
-            if leaf_node_iteration_id != self.node_id:
-                continue
-
-            iteration_leaf_node_ids.append(leaf_node_id)
-
-            # add condition of end nodes to root node
-            iteration_graph.add_extra_edge(
-                source_node_id=leaf_node_id,
-                target_node_id=root_node_id,
-                run_condition=RunCondition(
-                    type="condition",
-                    conditions=[
-                        Condition(
-                            variable_selector=[self.node_id, "index"],
-                            comparison_operator="<",
-                            value=str(len(iterator_list_value)),
-                        )
-                    ],
-                ),
-            )
-
         variable_pool = self.graph_runtime_state.variable_pool
 
         # append iteration variable (item, index) to variable pool
@@ -149,91 +115,90 @@ class IterationNode(BaseNode):
 
         outputs: list[Any] = []
         try:
-            # run workflow
-            rst = graph_engine.run()
-            for event in rst:
-                if isinstance(event, (BaseNodeEvent | BaseParallelBranchEvent)) and not event.in_iteration_id:
-                    event.in_iteration_id = self.node_id
-
-                if (
-                    isinstance(event, BaseNodeEvent)
-                    and event.node_type == NodeType.ITERATION_START
-                    and not isinstance(event, NodeRunStreamChunkEvent)
-                ):
-                    continue
-
-                if isinstance(event, NodeRunSucceededEvent):
-                    if event.route_node_state.node_run_result:
-                        metadata = event.route_node_state.node_run_result.metadata
-                        if not metadata:
-                            metadata = {}
-
-                        if NodeRunMetadataKey.ITERATION_ID not in metadata:
-                            metadata[NodeRunMetadataKey.ITERATION_ID] = self.node_id
-                            metadata[NodeRunMetadataKey.ITERATION_INDEX] = variable_pool.get_any(
-                                [self.node_id, "index"]
-                            )
-                            event.route_node_state.node_run_result.metadata = metadata
-
-                    yield event
-
-                    # handle iteration run result
-                    if event.route_node_state.node_id in iteration_leaf_node_ids:
-                        # append to iteration output variable list
-                        current_iteration_output = variable_pool.get_any(self.node_data.output_selector)
-                        outputs.append(current_iteration_output)
-
-                        # remove all nodes outputs from variable pool
-                        for node_id in iteration_graph.node_ids:
-                            variable_pool.remove_node(node_id)
-
-                        # move to next iteration
-                        current_index = variable_pool.get([self.node_id, "index"])
-                        if current_index is None:
-                            raise ValueError(f"iteration {self.node_id} current index not found")
-
-                        next_index = int(current_index.to_object()) + 1
-                        variable_pool.add([self.node_id, "index"], next_index)
-
-                        if next_index < len(iterator_list_value):
-                            variable_pool.add([self.node_id, "item"], iterator_list_value[next_index])
-
-                        yield IterationRunNextEvent(
-                            iteration_id=self.id,
-                            iteration_node_id=self.node_id,
-                            iteration_node_type=self.node_type,
-                            iteration_node_data=self.node_data,
-                            index=next_index,
-                            pre_iteration_output=jsonable_encoder(current_iteration_output)
-                            if current_iteration_output
-                            else None,
-                        )
-                elif isinstance(event, BaseGraphEvent):
-                    if isinstance(event, GraphRunFailedEvent):
-                        # iteration run failed
-                        yield IterationRunFailedEvent(
-                            iteration_id=self.id,
-                            iteration_node_id=self.node_id,
-                            iteration_node_type=self.node_type,
-                            iteration_node_data=self.node_data,
-                            start_at=start_at,
-                            inputs=inputs,
-                            outputs={"output": jsonable_encoder(outputs)},
-                            steps=len(iterator_list_value),
-                            metadata={"total_tokens": graph_engine.graph_runtime_state.total_tokens},
-                            error=event.error,
-                        )
-
-                        yield RunCompletedEvent(
-                            run_result=NodeRunResult(
-                                status=WorkflowNodeExecutionStatus.FAILED,
+            for _ in range(len(iterator_list_value)):
+                # run workflow
+                rst = graph_engine.run()
+                for event in rst:
+                    if isinstance(event, (BaseNodeEvent | BaseParallelBranchEvent)) and not event.in_iteration_id:
+                        event.in_iteration_id = self.node_id
+
+                    if (
+                        isinstance(event, BaseNodeEvent)
+                        and event.node_type == NodeType.ITERATION_START
+                        and not isinstance(event, NodeRunStreamChunkEvent)
+                    ):
+                        continue
+
+                    if isinstance(event, NodeRunSucceededEvent):
+                        if event.route_node_state.node_run_result:
+                            metadata = event.route_node_state.node_run_result.metadata
+                            if not metadata:
+                                metadata = {}
+
+                            if NodeRunMetadataKey.ITERATION_ID not in metadata:
+                                metadata[NodeRunMetadataKey.ITERATION_ID] = self.node_id
+                                metadata[NodeRunMetadataKey.ITERATION_INDEX] = variable_pool.get_any(
+                                    [self.node_id, "index"]
+                                )
+                                event.route_node_state.node_run_result.metadata = metadata
+
+                        yield event
+                    elif isinstance(event, BaseGraphEvent):
+                        if isinstance(event, GraphRunFailedEvent):
+                            # iteration run failed
+                            yield IterationRunFailedEvent(
+                                iteration_id=self.id,
+                                iteration_node_id=self.node_id,
+                                iteration_node_type=self.node_type,
+                                iteration_node_data=self.node_data,
+                                start_at=start_at,
+                                inputs=inputs,
+                                outputs={"output": jsonable_encoder(outputs)},
+                                steps=len(iterator_list_value),
+                                metadata={"total_tokens": graph_engine.graph_runtime_state.total_tokens},
                                 error=event.error,
                             )
-                        )
-                        break
-                else:
-                    event = cast(InNodeEvent, event)
-                    yield event
+
+                            yield RunCompletedEvent(
+                                run_result=NodeRunResult(
+                                    status=WorkflowNodeExecutionStatus.FAILED,
+                                    error=event.error,
+                                )
+                            )
+                            return
+                    else:
+                        event = cast(InNodeEvent, event)
+                        yield event
+
+                # append to iteration output variable list
+                current_iteration_output = variable_pool.get_any(self.node_data.output_selector)
+                outputs.append(current_iteration_output)
+
+                # remove all nodes outputs from variable pool
+                for node_id in iteration_graph.node_ids:
+                    variable_pool.remove_node(node_id)
+
+                # move to next iteration
+                current_index = variable_pool.get([self.node_id, "index"])
+                if current_index is None:
+                    raise ValueError(f"iteration {self.node_id} current index not found")
+
+                next_index = int(current_index.to_object()) + 1
+                variable_pool.add([self.node_id, "index"], next_index)
+
+                if next_index < len(iterator_list_value):
+                    variable_pool.add([self.node_id, "item"], iterator_list_value[next_index])
+
+                yield IterationRunNextEvent(
+                    iteration_id=self.id,
+                    iteration_node_id=self.node_id,
+                    iteration_node_type=self.node_type,
+                    iteration_node_data=self.node_data,
+                    index=next_index,
+                    pre_iteration_output=jsonable_encoder(current_iteration_output)
+                    if current_iteration_output
+                    else None,
+                )
 
             yield IterationRunSucceededEvent(
                 iteration_id=self.id,

+ 9 - 1
api/extensions/ext_sentry.py

@@ -5,6 +5,8 @@ from sentry_sdk.integrations.celery import CeleryIntegration
 from sentry_sdk.integrations.flask import FlaskIntegration
 from werkzeug.exceptions import HTTPException
 
+from core.model_runtime.errors.invoke import InvokeRateLimitError
+
 
 def before_send(event, hint):
     if "exc_info" in hint:
@@ -20,7 +22,13 @@ def init_app(app):
         sentry_sdk.init(
             dsn=app.config.get("SENTRY_DSN"),
             integrations=[FlaskIntegration(), CeleryIntegration()],
-            ignore_errors=[HTTPException, ValueError, openai.APIStatusError, parse_error.defaultErrorResponse],
+            ignore_errors=[
+                HTTPException,
+                ValueError,
+                openai.APIStatusError,
+                InvokeRateLimitError,
+                parse_error.defaultErrorResponse,
+            ],
             traces_sample_rate=app.config.get("SENTRY_TRACES_SAMPLE_RATE", 1.0),
             profiles_sample_rate=app.config.get("SENTRY_PROFILES_SAMPLE_RATE", 1.0),
             environment=app.config.get("DEPLOY_ENV"),

+ 39 - 10
api/extensions/ext_storage.py

@@ -1,3 +1,4 @@
+import logging
 from collections.abc import Generator
 from typing import Union
 
@@ -40,28 +41,56 @@ class Storage:
             self.storage_runner = LocalStorage(app=app)
 
     def save(self, filename, data):
-        self.storage_runner.save(filename, data)
+        try:
+            self.storage_runner.save(filename, data)
+        except Exception as e:
+            logging.exception("Failed to save file: %s", e)
+            raise e
 
     def load(self, filename: str, stream: bool = False) -> Union[bytes, Generator]:
-        if stream:
-            return self.load_stream(filename)
-        else:
-            return self.load_once(filename)
+        try:
+            if stream:
+                return self.load_stream(filename)
+            else:
+                return self.load_once(filename)
+        except Exception as e:
+            logging.exception("Failed to load file: %s", e)
+            raise e
 
     def load_once(self, filename: str) -> bytes:
-        return self.storage_runner.load_once(filename)
+        try:
+            return self.storage_runner.load_once(filename)
+        except Exception as e:
+            logging.exception("Failed to load_once file: %s", e)
+            raise e
 
     def load_stream(self, filename: str) -> Generator:
-        return self.storage_runner.load_stream(filename)
+        try:
+            return self.storage_runner.load_stream(filename)
+        except Exception as e:
+            logging.exception("Failed to load_stream file: %s", e)
+            raise e
 
     def download(self, filename, target_filepath):
-        self.storage_runner.download(filename, target_filepath)
+        try:
+            self.storage_runner.download(filename, target_filepath)
+        except Exception as e:
+            logging.exception("Failed to download file: %s", e)
+            raise e
 
     def exists(self, filename):
-        return self.storage_runner.exists(filename)
+        try:
+            return self.storage_runner.exists(filename)
+        except Exception as e:
+            logging.exception("Failed to check file exists: %s", e)
+            raise e
 
     def delete(self, filename):
-        return self.storage_runner.delete(filename)
+        try:
+            return self.storage_runner.delete(filename)
+        except Exception as e:
+            logging.exception("Failed to delete file: %s", e)
+            raise e
 
 
 storage = Storage()

+ 14 - 34
api/extensions/storage/aliyun_storage.py

@@ -31,54 +31,34 @@ class AliyunStorage(BaseStorage):
         )
 
     def save(self, filename, data):
-        if not self.folder or self.folder.endswith("/"):
-            filename = self.folder + filename
-        else:
-            filename = self.folder + "/" + filename
-        self.client.put_object(filename, data)
+        self.client.put_object(self.__wrapper_folder_filename(filename), data)
 
     def load_once(self, filename: str) -> bytes:
-        if not self.folder or self.folder.endswith("/"):
-            filename = self.folder + filename
-        else:
-            filename = self.folder + "/" + filename
-
-        with closing(self.client.get_object(filename)) as obj:
+        with closing(self.client.get_object(self.__wrapper_folder_filename(filename))) as obj:
             data = obj.read()
         return data
 
     def load_stream(self, filename: str) -> Generator:
         def generate(filename: str = filename) -> Generator:
-            if not self.folder or self.folder.endswith("/"):
-                filename = self.folder + filename
-            else:
-                filename = self.folder + "/" + filename
-
-            with closing(self.client.get_object(filename)) as obj:
+            with closing(self.client.get_object(self.__wrapper_folder_filename(filename))) as obj:
                 while chunk := obj.read(4096):
                     yield chunk
 
         return generate()
 
     def download(self, filename, target_filepath):
-        if not self.folder or self.folder.endswith("/"):
-            filename = self.folder + filename
-        else:
-            filename = self.folder + "/" + filename
-
-        self.client.get_object_to_file(filename, target_filepath)
+        self.client.get_object_to_file(self.__wrapper_folder_filename(filename), target_filepath)
 
     def exists(self, filename):
-        if not self.folder or self.folder.endswith("/"):
-            filename = self.folder + filename
-        else:
-            filename = self.folder + "/" + filename
-
-        return self.client.object_exists(filename)
+        return self.client.object_exists(self.__wrapper_folder_filename(filename))
 
     def delete(self, filename):
-        if not self.folder or self.folder.endswith("/"):
-            filename = self.folder + filename
-        else:
-            filename = self.folder + "/" + filename
-        self.client.delete_object(filename)
+        self.client.delete_object(self.__wrapper_folder_filename(filename))
+
+    def __wrapper_folder_filename(self, filename) -> str:
+        if self.folder:
+            if self.folder.endswith("/"):
+                filename = self.folder + filename
+            else:
+                filename = self.folder + "/" + filename
+        return filename

+ 13 - 13
api/poetry.lock

@@ -2296,18 +2296,18 @@ files = [
 
 [[package]]
 name = "duckduckgo-search"
-version = "6.2.11"
+version = "6.2.12"
 description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine."
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "duckduckgo_search-6.2.11-py3-none-any.whl", hash = "sha256:6fb7069b79e8928f487001de6859034ade19201bdcd257ec198802430e374bfe"},
-    {file = "duckduckgo_search-6.2.11.tar.gz", hash = "sha256:6b6ef1b552c5e67f23e252025d2504caf6f9fc14f70e86c6dd512200f386c673"},
+    {file = "duckduckgo_search-6.2.12-py3-none-any.whl", hash = "sha256:0d379c1f845b632a41553efb13d571788f19ad289229e641a27b5710d92097a6"},
+    {file = "duckduckgo_search-6.2.12.tar.gz", hash = "sha256:04f9f1459763668d268344c7a32d943173d0e060dad53a5c2df4b4d3ca9a74cf"},
 ]
 
 [package.dependencies]
 click = ">=8.1.7"
-primp = ">=0.6.1"
+primp = ">=0.6.2"
 
 [package.extras]
 dev = ["mypy (>=1.11.1)", "pytest (>=8.3.1)", "pytest-asyncio (>=0.23.8)", "ruff (>=0.6.1)"]
@@ -6356,19 +6356,19 @@ dill = ["dill (>=0.3.8)"]
 
 [[package]]
 name = "primp"
-version = "0.6.1"
+version = "0.6.2"
 description = "HTTP client that can impersonate web browsers, mimicking their headers and `TLS/JA3/JA4/HTTP2` fingerprints"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "primp-0.6.1-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:60cfe95e0bdf154b0f9036d38acaddc9aef02d6723ed125839b01449672d3946"},
-    {file = "primp-0.6.1-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:e1e92433ecf32639f9e800bc3a5d58b03792bdec99421b7fb06500e2fae63c85"},
-    {file = "primp-0.6.1-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e02353f13f07fb5a6f91df9e2f4d8ec9f41312de95088744dce1c9729a3865d"},
-    {file = "primp-0.6.1-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:c5a2ccfdf488b17be225a529a31e2b22724b2e22fba8e1ae168a222f857c2dc0"},
-    {file = "primp-0.6.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f335c2ace907800a23bbb7bc6e15acc7fff659b86a2d5858817f6ed79cea07cf"},
-    {file = "primp-0.6.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5dc15bd9d47ded7bc356fcb5d8321972dcbeba18e7d3b7250e12bb7365447b2b"},
-    {file = "primp-0.6.1-cp38-abi3-win_amd64.whl", hash = "sha256:eebf0412ebba4089547b16b97b765d83f69f1433d811bb02b02cdcdbca20f672"},
-    {file = "primp-0.6.1.tar.gz", hash = "sha256:64b3c12e3d463a887518811c46f3ec37cca02e6af1ddf1287e548342de436301"},
+    {file = "primp-0.6.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:4a35d441462a55d9a9525bf170e2ffd2fcb3db6039b23e802859fa22c18cdd51"},
+    {file = "primp-0.6.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:f67ccade95bdbca3cf9b96b93aa53f9617d85ddbf988da4e9c523aa785fd2d54"},
+    {file = "primp-0.6.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8074b93befaf36567e4cf3d4a1a8cd6ab9cc6e4dd4ff710650678daa405aee71"},
+    {file = "primp-0.6.2-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:7d3e2a3f8c6262e9b883651b79c4ff2b7677a76f47293a139f541c9ea333ce3b"},
+    {file = "primp-0.6.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a460ea389371c6d04839b4b50b5805d99da8ebe281a2e8b534d27377c6d44f0e"},
+    {file = "primp-0.6.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5b6b27e89d3c05c811aff0e4fde7a36d6957b15b3112f4ce28b6b99e8ca1e725"},
+    {file = "primp-0.6.2-cp38-abi3-win_amd64.whl", hash = "sha256:1006a40a85f88a4c5222094813a1ebc01f85a63e9a33d2c443288c0720bed321"},
+    {file = "primp-0.6.2.tar.gz", hash = "sha256:5a96a6b65195a8a989157e67d23bd171c49be238654e02bdf1b1fda36cbcc068"},
 ]
 
 [package.extras]

+ 1 - 1
api/pyproject.toml

@@ -209,7 +209,7 @@ zhipuai = "1.0.7"
 # Before adding new dependency, consider place it in alphabet order (a-z) and suitable group.
 
 ############################################################
-# Related transparent dependencies with pinned verion
+# Related transparent dependencies with pinned version
 # required by main implementations
 ############################################################
 azure-ai-ml = "^1.19.0"

+ 7 - 0
docker/.env.example

@@ -569,6 +569,13 @@ SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128
 SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128
 
 # ------------------------------
+# Environment Variables for web Service
+# ------------------------------
+
+# The timeout for the text generation in millisecond
+TEXT_GENERATION_TIMEOUT_MS=60000
+
+# ------------------------------
 # Environment Variables for db Service
 # ------------------------------
 

+ 1 - 0
docker/docker-compose.middleware.yaml

@@ -89,6 +89,7 @@ services:
   weaviate:
     image: semitechnologies/weaviate:1.19.0
     profiles:
+      - ""
       - weaviate
     restart: always
     volumes:

+ 2 - 1
docker/docker-compose.yaml

@@ -75,7 +75,7 @@ x-shared-env: &shared-api-worker-env
   ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-}
   ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-}
   ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
-  ALIYUN_OSS_PATHS: ${ALIYUN_OSS_PATH:-}
+  ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-}
   TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-}
   TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-}
   TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-}
@@ -254,6 +254,7 @@ services:
       APP_API_URL: ${APP_API_URL:-}
       SENTRY_DSN: ${WEB_SENTRY_DSN:-}
       NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
+      TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
 
   # The postgres database.
   db:

+ 282 - 0
sdks/python-client/dify_client/client.py

@@ -1,3 +1,4 @@
+import json
 import requests
 
 
@@ -131,3 +132,284 @@ class WorkflowClient(DifyClient):
     def stop(self, task_id, user):
         data = {"user": user}
         return self._send_request("POST", f"/workflows/tasks/{task_id}/stop", data)
+
+    def get_result(self, workflow_run_id):
+        return self._send_request("GET", f"/workflows/run/{workflow_run_id}")
+
+
+
+class KnowledgeBaseClient(DifyClient):
+
+    def __init__(self, api_key, base_url: str = 'https://api.dify.ai/v1', dataset_id: str = None):
+        """
+        Construct a KnowledgeBaseClient object.
+
+        Args:
+            api_key (str): API key of Dify.
+            base_url (str, optional): Base URL of Dify API. Defaults to 'https://api.dify.ai/v1'.
+            dataset_id (str, optional): ID of the dataset. Defaults to None. You don't need this if you just want to
+                create a new dataset. or list datasets. otherwise you need to set this.
+        """
+        super().__init__(
+            api_key=api_key,
+            base_url=base_url
+        )
+        self.dataset_id = dataset_id
+
+    def _get_dataset_id(self):
+        if self.dataset_id is None:
+            raise ValueError("dataset_id is not set")
+        return self.dataset_id
+
+    def create_dataset(self, name: str, **kwargs):
+        return self._send_request('POST', '/datasets', {'name': name}, **kwargs)
+
+    def list_datasets(self, page: int = 1, page_size: int = 20, **kwargs):
+        return self._send_request('GET', f'/datasets?page={page}&limit={page_size}', **kwargs)
+
+    def create_document_by_text(self, name, text, extra_params: dict = None, **kwargs):
+        """
+        Create a document by text.
+
+        :param name: Name of the document
+        :param text: Text content of the document
+        :param extra_params: extra parameters pass to the API, such as indexing_technique, process_rule. (optional)
+            e.g.
+            {
+            'indexing_technique': 'high_quality',
+            'process_rule': {
+                'rules': {
+                    'pre_processing_rules': [
+                        {'id': 'remove_extra_spaces', 'enabled': True},
+                        {'id': 'remove_urls_emails', 'enabled': True}
+                    ],
+                    'segmentation': {
+                        'separator': '\n',
+                        'max_tokens': 500
+                    }
+                },
+                'mode': 'custom'
+            }
+        }
+        :return: Response from the API
+        """
+        data = {
+            'indexing_technique': 'high_quality',
+            'process_rule': {
+                'mode': 'automatic'
+            },
+            'name': name,
+            'text': text
+        }
+        if extra_params is not None and isinstance(extra_params, dict):
+            data.update(extra_params)
+        url = f"/datasets/{self._get_dataset_id()}/document/create_by_text"
+        return self._send_request("POST", url, json=data, **kwargs)
+
+    def update_document_by_text(self, document_id, name, text, extra_params: dict = None, **kwargs):
+        """
+        Update a document by text.
+
+        :param document_id: ID of the document
+        :param name: Name of the document
+        :param text: Text content of the document
+        :param extra_params: extra parameters pass to the API, such as indexing_technique, process_rule. (optional)
+            e.g.
+            {
+            'indexing_technique': 'high_quality',
+            'process_rule': {
+                'rules': {
+                    'pre_processing_rules': [
+                        {'id': 'remove_extra_spaces', 'enabled': True},
+                        {'id': 'remove_urls_emails', 'enabled': True}
+                    ],
+                    'segmentation': {
+                        'separator': '\n',
+                        'max_tokens': 500
+                    }
+                },
+                'mode': 'custom'
+            }
+        }
+        :return: Response from the API
+        """
+        data = {
+            'name': name,
+            'text': text
+        }
+        if extra_params is not None and isinstance(extra_params, dict):
+            data.update(extra_params)
+        url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/update_by_text"
+        return self._send_request("POST", url, json=data, **kwargs)
+
+    def create_document_by_file(self, file_path, original_document_id=None, extra_params: dict = None):
+        """
+        Create a document by file.
+
+        :param file_path: Path to the file
+        :param original_document_id: pass this ID if you want to replace the original document (optional)
+        :param extra_params: extra parameters pass to the API, such as indexing_technique, process_rule. (optional)
+            e.g.
+            {
+            'indexing_technique': 'high_quality',
+            'process_rule': {
+                'rules': {
+                    'pre_processing_rules': [
+                        {'id': 'remove_extra_spaces', 'enabled': True},
+                        {'id': 'remove_urls_emails', 'enabled': True}
+                    ],
+                    'segmentation': {
+                        'separator': '\n',
+                        'max_tokens': 500
+                    }
+                },
+                'mode': 'custom'
+            }
+        }
+        :return: Response from the API
+        """
+        files = {"file": open(file_path, "rb")}
+        data = {
+            'process_rule': {
+                'mode': 'automatic'
+            },
+            'indexing_technique': 'high_quality'
+        }
+        if extra_params is not None and isinstance(extra_params, dict):
+            data.update(extra_params)
+        if original_document_id is not None:
+            data['original_document_id'] = original_document_id
+        url = f"/datasets/{self._get_dataset_id()}/document/create_by_file"
+        return self._send_request_with_files("POST", url, {"data": json.dumps(data)}, files)
+
+    def update_document_by_file(self, document_id, file_path, extra_params: dict = None):
+        """
+        Update a document by file.
+
+        :param document_id: ID of the document
+        :param file_path: Path to the file
+        :param extra_params: extra parameters pass to the API, such as indexing_technique, process_rule. (optional)
+            e.g.
+            {
+            'indexing_technique': 'high_quality',
+            'process_rule': {
+                'rules': {
+                    'pre_processing_rules': [
+                        {'id': 'remove_extra_spaces', 'enabled': True},
+                        {'id': 'remove_urls_emails', 'enabled': True}
+                    ],
+                    'segmentation': {
+                        'separator': '\n',
+                        'max_tokens': 500
+                    }
+                },
+                'mode': 'custom'
+            }
+        }
+        :return:
+        """
+        files = {"file": open(file_path, "rb")}
+        data = {}
+        if extra_params is not None and isinstance(extra_params, dict):
+            data.update(extra_params)
+        url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/update_by_file"
+        return self._send_request_with_files("POST", url, {"data": json.dumps(data)}, files)
+
+    def batch_indexing_status(self, batch_id: str, **kwargs):
+        """
+        Get the status of the batch indexing.
+
+        :param batch_id: ID of the batch uploading
+        :return: Response from the API
+        """
+        url = f"/datasets/{self._get_dataset_id()}/documents/{batch_id}/indexing-status"
+        return self._send_request("GET", url, **kwargs)
+
+    def delete_dataset(self):
+        """
+        Delete this dataset.
+
+        :return: Response from the API
+        """
+        url = f"/datasets/{self._get_dataset_id()}"
+        return self._send_request("DELETE", url)
+
+    def delete_document(self, document_id):
+        """
+        Delete a document.
+
+        :param document_id: ID of the document
+        :return: Response from the API
+        """
+        url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}"
+        return self._send_request("DELETE", url)
+
+    def list_documents(self, page: int = None, page_size: int = None, keyword: str = None, **kwargs):
+        """
+        Get a list of documents in this dataset.
+
+        :return: Response from the API
+        """
+        params = {}
+        if page is not None:
+            params['page'] = page
+        if page_size is not None:
+            params['limit'] = page_size
+        if keyword is not None:
+            params['keyword'] = keyword
+        url = f"/datasets/{self._get_dataset_id()}/documents"
+        return self._send_request("GET", url, params=params, **kwargs)
+
+    def add_segments(self, document_id, segments, **kwargs):
+        """
+        Add segments to a document.
+
+        :param document_id: ID of the document
+        :param segments: List of segments to add, example: [{"content": "1", "answer": "1", "keyword": ["a"]}]
+        :return: Response from the API
+        """
+        data = {"segments": segments}
+        url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments"
+        return self._send_request("POST", url, json=data, **kwargs)
+
+    def query_segments(self, document_id, keyword: str = None, status: str = None, **kwargs):
+        """
+        Query segments in this document.
+
+        :param document_id: ID of the document
+        :param keyword: query keyword, optional
+        :param status: status of the segment, optional, e.g. completed
+        """
+        url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments"
+        params = {}
+        if keyword is not None:
+            params['keyword'] = keyword
+        if status is not None:
+            params['status'] = status
+        if "params" in kwargs:
+            params.update(kwargs["params"])
+        return self._send_request("GET", url, params=params, **kwargs)
+
+    def delete_document_segment(self, document_id, segment_id):
+        """
+        Delete a segment from a document.
+
+        :param document_id: ID of the document
+        :param segment_id: ID of the segment
+        :return: Response from the API
+        """
+        url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments/{segment_id}"
+        return self._send_request("DELETE", url)
+
+    def update_document_segment(self, document_id, segment_id, segment_data, **kwargs):
+        """
+        Update a segment in a document.
+
+        :param document_id: ID of the document
+        :param segment_id: ID of the segment
+        :param segment_data: Data of the segment, example: {"content": "1", "answer": "1", "keyword": ["a"], "enabled": True}
+        :return: Response from the API
+        """
+        data = {"segment": segment_data}
+        url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments/{segment_id}"
+        return self._send_request("POST", url, json=data, **kwargs)

+ 1 - 1
sdks/python-client/setup.py

@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
 
 setup(
     name="dify-client",
-    version="0.1.10",
+    version="0.1.12",
     author="Dify",
     author_email="hello@dify.ai",
     description="A package for interacting with the Dify Service-API",

+ 148 - 1
sdks/python-client/tests/test_client.py

@@ -1,10 +1,157 @@
 import os
+import time
 import unittest
 
-from dify_client.client import ChatClient, CompletionClient, DifyClient
+from dify_client.client import ChatClient, CompletionClient, DifyClient, KnowledgeBaseClient
 
 API_KEY = os.environ.get("API_KEY")
 APP_ID = os.environ.get("APP_ID")
+API_BASE_URL = os.environ.get("API_BASE_URL", "https://api.dify.ai/v1")
+FILE_PATH_BASE = os.path.dirname(__file__)
+
+
+class TestKnowledgeBaseClient(unittest.TestCase):
+    def setUp(self):
+        self.knowledge_base_client = KnowledgeBaseClient(API_KEY, base_url=API_BASE_URL)
+        self.README_FILE_PATH = os.path.abspath(os.path.join(FILE_PATH_BASE, "../README.md"))
+        self.dataset_id = None
+        self.document_id = None
+        self.segment_id = None
+        self.batch_id = None
+
+    def _get_dataset_kb_client(self):
+        self.assertIsNotNone(self.dataset_id)
+        return KnowledgeBaseClient(API_KEY, base_url=API_BASE_URL, dataset_id=self.dataset_id)
+
+    def test_001_create_dataset(self):
+        response = self.knowledge_base_client.create_dataset(name="test_dataset")
+        data = response.json()
+        self.assertIn("id", data)
+        self.dataset_id = data["id"]
+        self.assertEqual("test_dataset", data["name"])
+
+        # the following tests require to be executed in order because they use
+        # the dataset/document/segment ids from the previous test
+        self._test_002_list_datasets()
+        self._test_003_create_document_by_text()
+        time.sleep(1)
+        self._test_004_update_document_by_text()
+        # self._test_005_batch_indexing_status()
+        time.sleep(1)
+        self._test_006_update_document_by_file()
+        time.sleep(1)
+        self._test_007_list_documents()
+        self._test_008_delete_document()
+        self._test_009_create_document_by_file()
+        time.sleep(1)
+        self._test_010_add_segments()
+        self._test_011_query_segments()
+        self._test_012_update_document_segment()
+        self._test_013_delete_document_segment()
+        self._test_014_delete_dataset()
+
+    def _test_002_list_datasets(self):
+        response = self.knowledge_base_client.list_datasets()
+        data = response.json()
+        self.assertIn("data", data)
+        self.assertIn("total", data)
+
+    def _test_003_create_document_by_text(self):
+        client = self._get_dataset_kb_client()
+        response = client.create_document_by_text("test_document", "test_text")
+        data = response.json()
+        self.assertIn("document", data)
+        self.document_id = data["document"]["id"]
+        self.batch_id = data["batch"]
+
+    def _test_004_update_document_by_text(self):
+        client = self._get_dataset_kb_client()
+        self.assertIsNotNone(self.document_id)
+        response = client.update_document_by_text(self.document_id, "test_document_updated", "test_text_updated")
+        data = response.json()
+        self.assertIn("document", data)
+        self.assertIn("batch", data)
+        self.batch_id = data["batch"]
+
+    def _test_005_batch_indexing_status(self):
+        client = self._get_dataset_kb_client()
+        response = client.batch_indexing_status(self.batch_id)
+        data = response.json()
+        self.assertEqual(response.status_code, 200)
+
+    def _test_006_update_document_by_file(self):
+        client = self._get_dataset_kb_client()
+        self.assertIsNotNone(self.document_id)
+        response = client.update_document_by_file(self.document_id, self.README_FILE_PATH)
+        data = response.json()
+        self.assertIn("document", data)
+        self.assertIn("batch", data)
+        self.batch_id = data["batch"]
+
+    def _test_007_list_documents(self):
+        client = self._get_dataset_kb_client()
+        response = client.list_documents()
+        data = response.json()
+        self.assertIn("data", data)
+
+    def _test_008_delete_document(self):
+        client = self._get_dataset_kb_client()
+        self.assertIsNotNone(self.document_id)
+        response = client.delete_document(self.document_id)
+        data = response.json()
+        self.assertIn("result", data)
+        self.assertEqual("success", data["result"])
+
+    def _test_009_create_document_by_file(self):
+        client = self._get_dataset_kb_client()
+        response = client.create_document_by_file(self.README_FILE_PATH)
+        data = response.json()
+        self.assertIn("document", data)
+        self.document_id = data["document"]["id"]
+        self.batch_id = data["batch"]
+
+    def _test_010_add_segments(self):
+        client = self._get_dataset_kb_client()
+        response = client.add_segments(self.document_id, [
+            {"content": "test text segment 1"}
+        ])
+        data = response.json()
+        self.assertIn("data", data)
+        self.assertGreater(len(data["data"]), 0)
+        segment = data["data"][0]
+        self.segment_id = segment["id"]
+
+    def _test_011_query_segments(self):
+        client = self._get_dataset_kb_client()
+        response = client.query_segments(self.document_id)
+        data = response.json()
+        self.assertIn("data", data)
+        self.assertGreater(len(data["data"]), 0)
+
+    def _test_012_update_document_segment(self):
+        client = self._get_dataset_kb_client()
+        self.assertIsNotNone(self.segment_id)
+        response = client.update_document_segment(self.document_id, self.segment_id,
+                                                  {"content": "test text segment 1 updated"}
+                                                  )
+        data = response.json()
+        self.assertIn("data", data)
+        self.assertGreater(len(data["data"]), 0)
+        segment = data["data"]
+        self.assertEqual("test text segment 1 updated", segment["content"])
+
+    def _test_013_delete_document_segment(self):
+        client = self._get_dataset_kb_client()
+        self.assertIsNotNone(self.segment_id)
+        response = client.delete_document_segment(self.document_id, self.segment_id)
+        data = response.json()
+        self.assertIn("result", data)
+        self.assertEqual("success", data["result"])
+
+    def _test_014_delete_dataset(self):
+        client = self._get_dataset_kb_client()
+        response = client.delete_dataset()
+        self.assertEqual(204, response.status_code)
 
 
 class TestChatClient(unittest.TestCase):

+ 3 - 0
web/.env.example

@@ -19,3 +19,6 @@ NEXT_TELEMETRY_DISABLED=1
 
 # Disable Upload Image as WebApp icon default is false
 NEXT_PUBLIC_UPLOAD_IMAGE_AS_ICON=false
+
+# The timeout for the text generation in millisecond
+NEXT_PUBLIC_TEXT_GENERATION_TIMEOUT_MS=60000

+ 27 - 0
web/.husky/pre-commit

@@ -51,5 +51,32 @@ if $web_modified; then
     echo "Running ESLint on web module"
     cd ./web || exit 1
     npx lint-staged
+
+    echo "Running unit tests check"
+    modified_files=$(git diff --cached --name-only -- utils | grep -v '\.spec\.ts$' || true)
+
+    if [ -n "$modified_files" ]; then
+        for file in $modified_files; do
+            test_file="${file%.*}.spec.ts"
+            echo "Checking for test file: $test_file"
+
+            # check if the test file exists
+            if [ -f "../$test_file" ]; then
+                echo "Detected changes in $file, running corresponding unit tests..."
+                npm run test "../$test_file"
+
+                if [ $? -ne 0 ]; then
+                    echo "Unit tests failed. Please fix the errors before committing."
+                    exit 1
+                fi
+                echo "Unit tests for $file passed."
+            else
+                echo "Warning: $file does not have a corresponding test file."
+            fi
+
+        done
+        echo "All unit tests for modified web/utils files have passed."
+    fi
+
     cd ../
 fi

+ 5 - 1
web/README.md

@@ -18,6 +18,10 @@ yarn install --frozen-lockfile
 
 Then, configure the environment variables. Create a file named `.env.local` in the current directory and copy the contents from `.env.example`. Modify the values of these environment variables according to your requirements:
 
+```bash
+cp .env.example .env.local
+```
+
 ```
 # For production release, change this to PRODUCTION
 NEXT_PUBLIC_DEPLOY_ENV=DEVELOPMENT
@@ -78,7 +82,7 @@ If your IDE is VSCode, rename `web/.vscode/settings.example.json` to `web/.vscod
 
 We start to use [Jest](https://jestjs.io/) and [React Testing Library](https://testing-library.com/docs/react-testing-library/intro/) for Unit Testing.
 
-You can create a test file with a suffix of `.spec` beside the file that to be tested. For example, if you want to test a file named `util.ts`. The test file name should be `util.spec.ts`. 
+You can create a test file with a suffix of `.spec` beside the file that to be tested. For example, if you want to test a file named `util.ts`. The test file name should be `util.spec.ts`.
 
 Run test:
 

+ 7 - 2
web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout.tsx

@@ -109,6 +109,11 @@ const AppDetailLayout: FC<IAppDetailLayoutProps> = (props) => {
     setAppDetail()
     fetchAppDetail({ url: '/apps', id: appId }).then((res) => {
       // redirection
+      const canIEditApp = isCurrentWorkspaceEditor
+      if (!canIEditApp && (pathname.endsWith('configuration') || pathname.endsWith('workflow') || pathname.endsWith('logs'))) {
+        router.replace(`/app/${appId}/overview`)
+        return
+      }
       if ((res.mode === 'workflow' || res.mode === 'advanced-chat') && (pathname).endsWith('configuration')) {
         router.replace(`/app/${appId}/workflow`)
       }
@@ -118,7 +123,7 @@ const AppDetailLayout: FC<IAppDetailLayoutProps> = (props) => {
       else {
         setAppDetail({ ...res, enable_sso: false })
         setNavigation(getNavigations(appId, isCurrentWorkspaceEditor, res.mode))
-        if (systemFeatures.enable_web_sso_switch_component) {
+        if (systemFeatures.enable_web_sso_switch_component && canIEditApp) {
           fetchAppSSO({ appId }).then((ssoRes) => {
             setAppDetail({ ...res, enable_sso: ssoRes.enabled })
           })
@@ -128,7 +133,7 @@ const AppDetailLayout: FC<IAppDetailLayoutProps> = (props) => {
       if (e.status === 404)
         router.replace('/apps')
     })
-  }, [appId, isCurrentWorkspaceEditor, systemFeatures])
+  }, [appId, isCurrentWorkspaceEditor, systemFeatures, getNavigations, pathname, router, setAppDetail])
 
   useUnmount(() => {
     setAppDetail()

+ 1 - 3
web/app/components/app/configuration/toolbox/annotation/annotation-ctrl-btn/index.tsx

@@ -73,7 +73,7 @@ const CacheCtrlBtn: FC<Props> = ({
     setShowModal(false)
   }
   return (
-    <div className={cn(className, 'inline-block')}>
+    <div className={cn('inline-block', className)}>
       <div className='inline-flex p-0.5 space-x-0.5 rounded-lg bg-white border border-gray-100 shadow-md text-gray-500 cursor-pointer'>
         {cached
           ? (
@@ -101,7 +101,6 @@ const CacheCtrlBtn: FC<Props> = ({
             ? (
               <Tooltip
                 popupContent={t('appDebug.feature.annotation.add')}
-                needsDelay
               >
                 <div
                   className='p-1 rounded-md hover:bg-[#EEF4FF] hover:text-[#444CE7] cursor-pointer'
@@ -115,7 +114,6 @@ const CacheCtrlBtn: FC<Props> = ({
         }
         <Tooltip
           popupContent={t('appDebug.feature.annotation.edit')}
-          needsDelay
         >
           <div
             className='p-1 cursor-pointer rounded-md hover:bg-black/5'

+ 2 - 2
web/app/components/app/log/list.tsx

@@ -338,7 +338,7 @@ function DetailPanel<T extends ChatConversationFullDetailResponse | CompletionCo
   return (
     <div ref={ref} className='rounded-xl border-[0.5px] border-gray-200 h-full flex flex-col overflow-auto'>
       {/* Panel Header */}
-      <div className='border-b border-gray-100 py-4 px-6 flex items-center justify-between'>
+      <div className='border-b border-gray-100 py-4 px-6 flex items-center justify-between bg-components-panel-bg'>
         <div>
           <div className='text-gray-500 text-[10px] leading-[14px]'>{isChatMode ? t('appLog.detail.conversationId') : t('appLog.detail.time')}</div>
           {isChatMode && (
@@ -725,7 +725,7 @@ const ConversationList: FC<IConversationList> = ({ logs, appDetail, onRefresh })
         onClose={onCloseDrawer}
         mask={isMobile}
         footer={null}
-        panelClassname='mt-16 mx-2 sm:mr-2 mb-4 !p-0 !max-w-[640px] rounded-xl'
+        panelClassname='mt-16 mx-2 sm:mr-2 mb-4 !p-0 !max-w-[640px] rounded-xl bg-background-gradient-bg-fill-chat-bg-1'
       >
         <DrawerContext.Provider value={{
           onClose: onCloseDrawer,

+ 3 - 2
web/app/components/app/overview/settings/index.tsx

@@ -16,7 +16,7 @@ import type { AppIconType, AppSSO, Language } from '@/types/app'
 import { useToastContext } from '@/app/components/base/toast'
 import { languages } from '@/i18n/language'
 import Tooltip from '@/app/components/base/tooltip'
-import AppContext from '@/context/app-context'
+import AppContext, { useAppContext } from '@/context/app-context'
 import type { AppIconSelection } from '@/app/components/base/app-icon-picker'
 import AppIconPicker from '@/app/components/base/app-icon-picker'
 
@@ -57,6 +57,7 @@ const SettingsModal: FC<ISettingsModalProps> = ({
   onSave,
 }) => {
   const systemFeatures = useContextSelector(AppContext, state => state.systemFeatures)
+  const { isCurrentWorkspaceEditor } = useAppContext()
   const { notify } = useToastContext()
   const [isShowMore, setIsShowMore] = useState(false)
   const {
@@ -265,7 +266,7 @@ const SettingsModal: FC<ISettingsModalProps> = ({
               }
               asChild={false}
             >
-              <Switch disabled={!systemFeatures.sso_enforced_for_web} defaultValue={systemFeatures.sso_enforced_for_web && inputInfo.enable_sso} onChange={v => setInputInfo({ ...inputInfo, enable_sso: v })}></Switch>
+              <Switch disabled={!systemFeatures.sso_enforced_for_web || !isCurrentWorkspaceEditor} defaultValue={systemFeatures.sso_enforced_for_web && inputInfo.enable_sso} onChange={v => setInputInfo({ ...inputInfo, enable_sso: v })}></Switch>
             </Tooltip>
           </div>
           <p className='body-xs-regular text-gray-500'>{t(`${prefixSettings}.sso.description`)}</p>

+ 17 - 36
web/app/components/base/chat/chat/answer/index.tsx

@@ -8,7 +8,6 @@ import type {
   ChatConfig,
   ChatItem,
 } from '../../types'
-import { useChatContext } from '../context'
 import Operation from './operation'
 import AgentContent from './agent-content'
 import BasicContent from './basic-content'
@@ -16,13 +15,13 @@ import SuggestedQuestions from './suggested-questions'
 import More from './more'
 import WorkflowProcess from './workflow-process'
 import { AnswerTriangle } from '@/app/components/base/icons/src/vender/solid/general'
-import { MessageFast } from '@/app/components/base/icons/src/vender/solid/communication'
 import LoadingAnim from '@/app/components/base/chat/chat/loading-anim'
 import Citation from '@/app/components/base/chat/chat/citation'
 import { EditTitle } from '@/app/components/app/annotation/edit-annotation-modal/edit-item'
 import type { Emoji } from '@/app/components/tools/types'
 import type { AppData } from '@/models/share'
 import AnswerIcon from '@/app/components/base/answer-icon'
+import cn from '@/utils/classnames'
 
 type AnswerProps = {
   item: ChatItem
@@ -61,27 +60,25 @@ const Answer: FC<AnswerProps> = ({
   } = item
   const hasAgentThoughts = !!agent_thoughts?.length
 
-  const [containerWidth] = useState(0)
+  const [containerWidth, setContainerWidth] = useState(0)
   const [contentWidth, setContentWidth] = useState(0)
   const containerRef = useRef<HTMLDivElement>(null)
   const contentRef = useRef<HTMLDivElement>(null)
 
-  const {
-    config: chatContextConfig,
-  } = useChatContext()
+  const getContainerWidth = () => {
+    if (containerRef.current)
+      setContainerWidth(containerRef.current?.clientWidth + 16)
+  }
+  useEffect(() => {
+    getContainerWidth()
+  }, [])
 
-  const voiceRef = useRef(chatContextConfig?.text_to_speech?.voice)
   const getContentWidth = () => {
     if (contentRef.current)
       setContentWidth(contentRef.current?.clientWidth)
   }
 
   useEffect(() => {
-    voiceRef.current = chatContextConfig?.text_to_speech?.voice
-  }
-  , [chatContextConfig?.text_to_speech?.voice])
-
-  useEffect(() => {
     if (!responding)
       getContentWidth()
   }, [responding])
@@ -89,36 +86,20 @@ const Answer: FC<AnswerProps> = ({
   return (
     <div className='flex mb-2 last:mb-0'>
       <div className='shrink-0 relative w-10 h-10'>
-        {
-          answerIcon || <AnswerIcon />
-        }
-        {
-          responding && (
-            <div className='absolute -top-[3px] -left-[3px] pl-[6px] flex items-center w-4 h-4 bg-white rounded-full shadow-xs border-[0.5px] border-gray-50'>
-              <LoadingAnim type='avatar' />
-            </div>
-          )
-        }
+        {answerIcon || <AnswerIcon />}
+        {responding && (
+          <div className='absolute -top-[3px] -left-[3px] pl-[6px] flex items-center w-4 h-4 bg-white rounded-full shadow-xs border-[0.5px] border-gray-50'>
+            <LoadingAnim type='avatar' />
+          </div>
+        )}
       </div>
       <div className='chat-answer-container group grow w-0 ml-4' ref={containerRef}>
-        <div className={`group relative pr-10 ${chatAnswerContainerInner}`}>
+        <div className={cn('group relative pr-10', chatAnswerContainerInner)}>
           <AnswerTriangle className='absolute -left-2 top-0 w-2 h-3 text-gray-100' />
           <div
             ref={contentRef}
-            className={`
-              relative inline-block px-4 py-3 max-w-full bg-gray-100 rounded-b-2xl rounded-tr-2xl text-sm text-gray-900
-              ${workflowProcess && 'w-full'}
-            `}
+            className={cn('relative inline-block px-4 py-3 max-w-full bg-gray-100 rounded-b-2xl rounded-tr-2xl text-sm text-gray-900', workflowProcess && 'w-full')}
           >
-            {annotation?.id && (
-              <div
-                className='absolute -top-3.5 -right-3.5 box-border flex items-center justify-center h-7 w-7 p-0.5 rounded-lg bg-white cursor-pointer text-[#444CE7] shadow-md group-hover:hidden'
-              >
-                <div className='p-1 rounded-lg bg-[#EEF4FF] '>
-                  <MessageFast className='w-4 h-4' />
-                </div>
-              </div>
-            )}
             {
               !responding && (
                 <Operation

+ 1 - 1
web/app/components/base/chat/chat/answer/operation.tsx

@@ -149,7 +149,7 @@ const Operation: FC<OperationProps> = ({
           />
         )}
         {
-          !positionRight && annotation?.id && (
+          annotation?.id && (
             <div
               className='relative box-border flex items-center justify-center h-7 w-7 p-0.5 rounded-lg bg-white cursor-pointer text-[#444CE7] shadow-md group-hover:hidden'
             >

+ 2 - 2
web/app/components/base/chat/chat/index.tsx

@@ -109,9 +109,9 @@ const Chat: FC<ChatProps> = ({
   const userScrolledRef = useRef(false)
 
   const handleScrollToBottom = useCallback(() => {
-    if (chatContainerRef.current && !userScrolledRef.current)
+    if (chatList.length > 1 && chatContainerRef.current && !userScrolledRef.current)
       chatContainerRef.current.scrollTop = chatContainerRef.current.scrollHeight
-  }, [])
+  }, [chatList.length])
 
   const handleWindowResize = useCallback(() => {
     if (chatContainerRef.current)

+ 229 - 23
web/app/components/base/image-uploader/image-preview.tsx

@@ -1,26 +1,42 @@
 import type { FC } from 'react'
-import { useRef } from 'react'
+import React, { useCallback, useEffect, useRef, useState } from 'react'
 import { t } from 'i18next'
 import { createPortal } from 'react-dom'
-import { RiCloseLine, RiExternalLinkLine } from '@remixicon/react'
+import { RiAddBoxLine, RiCloseLine, RiDownloadCloud2Line, RiFileCopyLine, RiZoomInLine, RiZoomOutLine } from '@remixicon/react'
 import Tooltip from '@/app/components/base/tooltip'
-import { randomString } from '@/utils'
+import Toast from '@/app/components/base/toast'
 
 type ImagePreviewProps = {
   url: string
   title: string
   onCancel: () => void
 }
+
+const isBase64 = (str: string): boolean => {
+  try {
+    return btoa(atob(str)) === str
+  }
+  catch (err) {
+    return false
+  }
+}
+
 const ImagePreview: FC<ImagePreviewProps> = ({
   url,
   title,
   onCancel,
 }) => {
-  const selector = useRef(`copy-tooltip-${randomString(4)}`)
+  const [scale, setScale] = useState(1)
+  const [position, setPosition] = useState({ x: 0, y: 0 })
+  const [isDragging, setIsDragging] = useState(false)
+  const imgRef = useRef<HTMLImageElement>(null)
+  const dragStartRef = useRef({ x: 0, y: 0 })
+  const [isCopied, setIsCopied] = useState(false)
+  const containerRef = useRef<HTMLDivElement>(null)
 
   const openInNewTab = () => {
     // Open in a new window, considering the case when the page is inside an iframe
-    if (url.startsWith('http')) {
+    if (url.startsWith('http') || url.startsWith('https')) {
       window.open(url, '_blank')
     }
     else if (url.startsWith('data:image')) {
@@ -29,34 +45,224 @@ const ImagePreview: FC<ImagePreviewProps> = ({
       win?.document.write(`<img src="${url}" alt="${title}" />`)
     }
     else {
-      console.error('Unable to open image', url)
+      Toast.notify({
+        type: 'error',
+        message: `Unable to open image: ${url}`,
+      })
+    }
+  }
+  const downloadImage = () => {
+    // Open in a new window, considering the case when the page is inside an iframe
+    if (url.startsWith('http') || url.startsWith('https')) {
+      const a = document.createElement('a')
+      a.href = url
+      a.download = title
+      a.click()
     }
+    else if (url.startsWith('data:image')) {
+      // Base64 image
+      const a = document.createElement('a')
+      a.href = url
+      a.download = title
+      a.click()
+    }
+    else {
+      Toast.notify({
+        type: 'error',
+        message: `Unable to open image: ${url}`,
+      })
+    }
+  }
+
+  const zoomIn = () => {
+    setScale(prevScale => Math.min(prevScale * 1.2, 15))
   }
 
+  const zoomOut = () => {
+    setScale((prevScale) => {
+      const newScale = Math.max(prevScale / 1.2, 0.5)
+      if (newScale === 1)
+        setPosition({ x: 0, y: 0 }) // Reset position when fully zoomed out
+
+      return newScale
+    })
+  }
+
+  const imageTobase64ToBlob = (base64: string, type = 'image/png'): Blob => {
+    const byteCharacters = atob(base64)
+    const byteArrays = []
+
+    for (let offset = 0; offset < byteCharacters.length; offset += 512) {
+      const slice = byteCharacters.slice(offset, offset + 512)
+      const byteNumbers = new Array(slice.length)
+      for (let i = 0; i < slice.length; i++)
+        byteNumbers[i] = slice.charCodeAt(i)
+
+      const byteArray = new Uint8Array(byteNumbers)
+      byteArrays.push(byteArray)
+    }
+
+    return new Blob(byteArrays, { type })
+  }
+
+  const imageCopy = useCallback(() => {
+    const shareImage = async () => {
+      try {
+        const base64Data = url.split(',')[1]
+        const blob = imageTobase64ToBlob(base64Data, 'image/png')
+
+        await navigator.clipboard.write([
+          new ClipboardItem({
+            [blob.type]: blob,
+          }),
+        ])
+        setIsCopied(true)
+
+        Toast.notify({
+          type: 'success',
+          message: t('common.operation.imageCopied'),
+        })
+      }
+      catch (err) {
+        console.error('Failed to copy image:', err)
+
+        const link = document.createElement('a')
+        link.href = url
+        link.download = `${title}.png`
+        document.body.appendChild(link)
+        link.click()
+        document.body.removeChild(link)
+
+        Toast.notify({
+          type: 'info',
+          message: t('common.operation.imageDownloaded'),
+        })
+      }
+    }
+    shareImage()
+  }, [title, url])
+
+  const handleWheel = useCallback((e: React.WheelEvent<HTMLDivElement>) => {
+    if (e.deltaY < 0)
+      zoomIn()
+    else
+      zoomOut()
+  }, [])
+
+  const handleMouseDown = useCallback((e: React.MouseEvent<HTMLDivElement>) => {
+    if (scale > 1) {
+      setIsDragging(true)
+      dragStartRef.current = { x: e.clientX - position.x, y: e.clientY - position.y }
+    }
+  }, [scale, position])
+
+  const handleMouseMove = useCallback((e: React.MouseEvent<HTMLDivElement>) => {
+    if (isDragging && scale > 1) {
+      const deltaX = e.clientX - dragStartRef.current.x
+      const deltaY = e.clientY - dragStartRef.current.y
+
+      // Calculate boundaries
+      const imgRect = imgRef.current?.getBoundingClientRect()
+      const containerRect = imgRef.current?.parentElement?.getBoundingClientRect()
+
+      if (imgRect && containerRect) {
+        const maxX = (imgRect.width * scale - containerRect.width) / 2
+        const maxY = (imgRect.height * scale - containerRect.height) / 2
+
+        setPosition({
+          x: Math.max(-maxX, Math.min(maxX, deltaX)),
+          y: Math.max(-maxY, Math.min(maxY, deltaY)),
+        })
+      }
+    }
+  }, [isDragging, scale])
+
+  const handleMouseUp = useCallback(() => {
+    setIsDragging(false)
+  }, [])
+
+  useEffect(() => {
+    document.addEventListener('mouseup', handleMouseUp)
+    return () => {
+      document.removeEventListener('mouseup', handleMouseUp)
+    }
+  }, [handleMouseUp])
+
+  useEffect(() => {
+    const handleKeyDown = (event: KeyboardEvent) => {
+      if (event.key === 'Escape')
+        onCancel()
+    }
+
+    window.addEventListener('keydown', handleKeyDown)
+
+    // Set focus to the container element
+    if (containerRef.current)
+      containerRef.current.focus()
+
+    // Cleanup function
+    return () => {
+      window.removeEventListener('keydown', handleKeyDown)
+    }
+  }, [onCancel])
+
   return createPortal(
-    <div className='fixed inset-0 p-8 flex items-center justify-center bg-black/80 z-[1000]' onClick={e => e.stopPropagation()}>
+    <div className='fixed inset-0 p-8 flex items-center justify-center bg-black/80 z-[1000] image-preview-container'
+      onClick={e => e.stopPropagation()}
+      onWheel={handleWheel}
+      onMouseDown={handleMouseDown}
+      onMouseMove={handleMouseMove}
+      onMouseUp={handleMouseUp}
+      style={{ cursor: scale > 1 ? 'move' : 'default' }}
+      tabIndex={-1}>
       {/* eslint-disable-next-line @next/next/no-img-element */}
       <img
+        ref={imgRef}
         alt={title}
-        src={url}
+        src={isBase64(url) ? `data:image/png;base64,${url}` : url}
         className='max-w-full max-h-full'
+        style={{
+          transform: `scale(${scale}) translate(${position.x}px, ${position.y}px)`,
+          transition: isDragging ? 'none' : 'transform 0.2s ease-in-out',
+        }}
       />
-      <div
-        className='absolute top-6 right-6 flex items-center justify-center w-8 h-8 bg-white/8 rounded-lg backdrop-blur-[2px] cursor-pointer'
-        onClick={onCancel}
-      >
-        <RiCloseLine className='w-4 h-4 text-white' />
-      </div>
-      <Tooltip
-        selector={selector.current}
-        content={(t('common.operation.openInNewTab') ?? 'Open in new tab')}
-        className='z-10'
-      >
+      <Tooltip popupContent={t('common.operation.copyImage')}>
+        <div className='absolute top-6 right-48 flex items-center justify-center w-8 h-8 rounded-lg cursor-pointer'
+          onClick={imageCopy}>
+          {isCopied
+            ? <RiFileCopyLine className='w-4 h-4 text-green-500'/>
+            : <RiFileCopyLine className='w-4 h-4 text-gray-500'/>}
+        </div>
+      </Tooltip>
+      <Tooltip popupContent={t('common.operation.zoomOut')}>
+        <div className='absolute top-6 right-40 flex items-center justify-center w-8 h-8 rounded-lg cursor-pointer'
+          onClick={zoomOut}>
+          <RiZoomOutLine className='w-4 h-4 text-gray-500'/>
+        </div>
+      </Tooltip>
+      <Tooltip popupContent={t('common.operation.zoomIn')}>
+        <div className='absolute top-6 right-32 flex items-center justify-center w-8 h-8 rounded-lg cursor-pointer'
+          onClick={zoomIn}>
+          <RiZoomInLine className='w-4 h-4 text-gray-500'/>
+        </div>
+      </Tooltip>
+      <Tooltip popupContent={t('common.operation.download')}>
+        <div className='absolute top-6 right-24 flex items-center justify-center w-8 h-8 rounded-lg cursor-pointer'
+          onClick={downloadImage}>
+          <RiDownloadCloud2Line className='w-4 h-4 text-gray-500'/>
+        </div>
+      </Tooltip>
+      <Tooltip popupContent={t('common.operation.openInNewTab')}>
+        <div className='absolute top-6 right-16 flex items-center justify-center w-8 h-8 rounded-lg cursor-pointer'
+          onClick={openInNewTab}>
+          <RiAddBoxLine className='w-4 h-4 text-gray-500'/>
+        </div>
+      </Tooltip>
+      <Tooltip popupContent={t('common.operation.close')}>
         <div
-          className='absolute top-6 right-16 flex items-center justify-center w-8 h-8 rounded-lg cursor-pointer'
-          onClick={openInNewTab}
-        >
-          <RiExternalLinkLine className='w-4 h-4 text-white' />
+          className='absolute top-6 right-6 flex items-center justify-center w-8 h-8 bg-white/8 rounded-lg backdrop-blur-[2px] cursor-pointer'
+          onClick={onCancel}>
+          <RiCloseLine className='w-4 h-4 text-gray-500'/>
         </div>
       </Tooltip>
     </div>,

+ 20 - 17
web/app/components/base/markdown.tsx

@@ -5,6 +5,7 @@ import RemarkMath from 'remark-math'
 import RemarkBreaks from 'remark-breaks'
 import RehypeKatex from 'rehype-katex'
 import RemarkGfm from 'remark-gfm'
+import RehypeRaw from 'rehype-raw'
 import SyntaxHighlighter from 'react-syntax-highlighter'
 import { atelierHeathLight } from 'react-syntax-highlighter/dist/esm/styles/hljs'
 import type { RefObject } from 'react'
@@ -18,6 +19,7 @@ import ImageGallery from '@/app/components/base/image-gallery'
 import { useChatContext } from '@/app/components/base/chat/chat/context'
 import VideoGallery from '@/app/components/base/video-gallery'
 import AudioGallery from '@/app/components/base/audio-gallery'
+import SVGRenderer from '@/app/components/base/svg-gallery'
 
 // Available language https://github.com/react-syntax-highlighter/react-syntax-highlighter/blob/master/AVAILABLE_LANGUAGES_HLJS.MD
 const capitalizationLanguageNameMap: Record<string, string> = {
@@ -40,6 +42,7 @@ const capitalizationLanguageNameMap: Record<string, string> = {
   powershell: 'PowerShell',
   json: 'JSON',
   latex: 'Latex',
+  svg: 'SVG',
 }
 const getCorrectCapitalizationLanguageName = (language: string) => {
   if (!language)
@@ -107,6 +110,7 @@ const useLazyLoad = (ref: RefObject<Element>): boolean => {
 // Error: Minified React error 185;
 // visit https://reactjs.org/docs/error-decoder.html?invariant=185 for the full message
 // or use the non-minified dev environment for full errors and additional helpful warnings.
+
 const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props }) => {
   const [isSVG, setIsSVG] = useState(true)
   const match = /language-(\w+)/.exec(className || '')
@@ -134,7 +138,7 @@ const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props }
           >
             <div className='text-[13px] text-gray-500 font-normal'>{languageShowName}</div>
             <div style={{ display: 'flex' }}>
-              {language === 'mermaid' && <SVGBtn isSVG={isSVG} setIsSVG={setIsSVG} />}
+              {language === 'mermaid' && <SVGBtn isSVG={isSVG} setIsSVG={setIsSVG}/>}
               <CopyBtn
                 className='mr-1'
                 value={String(children).replace(/\n$/, '')}
@@ -144,12 +148,10 @@ const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props }
           </div>
           {(language === 'mermaid' && isSVG)
             ? (<Flowchart PrimitiveCode={String(children).replace(/\n$/, '')} />)
-            : (
-              (language === 'echarts')
-                ? (<div style={{ minHeight: '250px', minWidth: '250px' }}><ErrorBoundary><ReactEcharts
-                  option={chartData}
-                >
-                </ReactEcharts></ErrorBoundary></div>)
+            : (language === 'echarts'
+              ? (<div style={{ minHeight: '350px', minWidth: '700px' }}><ErrorBoundary><ReactEcharts option={chartData} /></ErrorBoundary></div>)
+              : (language === 'svg'
+                ? (<ErrorBoundary><SVGRenderer content={String(children).replace(/\n$/, '')} /></ErrorBoundary>)
                 : (<SyntaxHighlighter
                   {...props}
                   style={atelierHeathLight}
@@ -162,17 +164,12 @@ const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props }
                   PreTag="div"
                 >
                   {String(children).replace(/\n$/, '')}
-                </SyntaxHighlighter>))}
+                </SyntaxHighlighter>)))}
         </div>
       )
-      : (
-        <code {...props} className={className}>
-          {children}
-        </code>
-      )
+      : (<code {...props} className={className}>{children}</code>)
   }, [chartData, children, className, inline, isSVG, language, languageShowName, match, props])
 })
-
 CodeBlock.displayName = 'CodeBlock'
 
 const VideoBlock: CodeComponent = memo(({ node }) => {
@@ -230,6 +227,7 @@ export function Markdown(props: { content: string; className?: string }) {
         remarkPlugins={[[RemarkGfm, RemarkMath, { singleDollarTextMath: false }], RemarkBreaks]}
         rehypePlugins={[
           RehypeKatex,
+          RehypeRaw as any,
           // The Rehype plug-in is used to remove the ref attribute of an element
           () => {
             return (tree) => {
@@ -244,6 +242,7 @@ export function Markdown(props: { content: string; className?: string }) {
             }
           },
         ]}
+        disallowedElements={['script', 'iframe', 'head', 'html', 'meta', 'link', 'style', 'body']}
         components={{
           code: CodeBlock,
           img: Img,
@@ -266,19 +265,23 @@ export function Markdown(props: { content: string; className?: string }) {
 // This can happen when a component attempts to access an undefined object that references an unregistered map, causing the program to crash.
 
 export default class ErrorBoundary extends Component {
-  constructor(props) {
+  constructor(props: any) {
     super(props)
     this.state = { hasError: false }
   }
 
-  componentDidCatch(error, errorInfo) {
+  componentDidCatch(error: any, errorInfo: any) {
     this.setState({ hasError: true })
     console.error(error, errorInfo)
   }
 
   render() {
+    // eslint-disable-next-line @typescript-eslint/ban-ts-comment
+    // @ts-expect-error
     if (this.state.hasError)
-      return <div>Oops! ECharts reported a runtime error. <br />(see the browser console for more information)</div>
+      return <div>Oops! An error occurred. This could be due to an ECharts runtime error or invalid SVG content. <br />(see the browser console for more information)</div>
+    // eslint-disable-next-line @typescript-eslint/ban-ts-comment
+    // @ts-expect-error
     return this.props.children
   }
 }

+ 79 - 0
web/app/components/base/svg-gallery/index.tsx

@@ -0,0 +1,79 @@
+import { useEffect, useRef, useState } from 'react'
+import { SVG } from '@svgdotjs/svg.js'
+import ImagePreview from '@/app/components/base/image-uploader/image-preview'
+
+export const SVGRenderer = ({ content }: { content: string }) => {
+  const svgRef = useRef<HTMLDivElement>(null)
+  const [imagePreview, setImagePreview] = useState('')
+  const [windowSize, setWindowSize] = useState({
+    width: typeof window !== 'undefined' ? window.innerWidth : 0,
+    height: typeof window !== 'undefined' ? window.innerHeight : 0,
+  })
+
+  const svgToDataURL = (svgElement: Element): string => {
+    const svgString = new XMLSerializer().serializeToString(svgElement)
+    const base64String = Buffer.from(svgString).toString('base64')
+    return `data:image/svg+xml;base64,${base64String}`
+  }
+
+  useEffect(() => {
+    const handleResize = () => {
+      setWindowSize({ width: window.innerWidth, height: window.innerHeight })
+    }
+
+    window.addEventListener('resize', handleResize)
+    return () => window.removeEventListener('resize', handleResize)
+  }, [])
+
+  useEffect(() => {
+    if (svgRef.current) {
+      try {
+        svgRef.current.innerHTML = ''
+        const draw = SVG().addTo(svgRef.current).size('100%', '100%')
+
+        const parser = new DOMParser()
+        const svgDoc = parser.parseFromString(content, 'image/svg+xml')
+        const svgElement = svgDoc.documentElement
+
+        if (!(svgElement instanceof SVGElement))
+          throw new Error('Invalid SVG content')
+
+        const originalWidth = parseInt(svgElement.getAttribute('width') || '400', 10)
+        const originalHeight = parseInt(svgElement.getAttribute('height') || '600', 10)
+        const scale = Math.min(windowSize.width / originalWidth, windowSize.height / originalHeight, 1)
+        const scaledWidth = originalWidth * scale
+        const scaledHeight = originalHeight * scale
+        draw.size(scaledWidth, scaledHeight)
+
+        const rootElement = draw.svg(content)
+        rootElement.scale(scale)
+
+        rootElement.click(() => {
+          setImagePreview(svgToDataURL(svgElement as Element))
+        })
+      }
+      catch (error) {
+        if (svgRef.current)
+          svgRef.current.innerHTML = 'Error rendering SVG. Wait for the image content to complete.'
+      }
+    }
+  }, [content, windowSize])
+
+  return (
+    <>
+      <div ref={svgRef} style={{
+        width: '100%',
+        height: '100%',
+        minHeight: '300px',
+        maxHeight: '80vh',
+        display: 'flex',
+        justifyContent: 'center',
+        alignItems: 'center',
+        cursor: 'pointer',
+      }} />
+      {imagePreview && (<ImagePreview url={imagePreview} title='Preview' onCancel={() => setImagePreview('')} />)}
+    </>
+  )
+}
+
+export default SVGRenderer

+ 18 - 0
web/app/components/datasets/create/step-two/escape.ts

@@ -0,0 +1,18 @@
+function escape(input: string): string {
+  if (!input || typeof input !== 'string')
+    return ''
+
+  const res = input
+    .replaceAll('\\', '\\\\')
+    .replaceAll('\0', '\\0')
+    .replaceAll('\b', '\\b')
+    .replaceAll('\f', '\\f')
+    .replaceAll('\n', '\\n')
+    .replaceAll('\r', '\\r')
+    .replaceAll('\t', '\\t')
+    .replaceAll('\v', '\\v')
+    .replaceAll('\'', '\\\'')
+  return res
+}
+
+export default escape

+ 27 - 10
web/app/components/datasets/create/step-two/index.tsx

@@ -1,5 +1,5 @@
 'use client'
-import React, { useEffect, useLayoutEffect, useRef, useState } from 'react'
+import React, { useCallback, useEffect, useLayoutEffect, useRef, useState } from 'react'
 import { useTranslation } from 'react-i18next'
 import { useContext } from 'use-context-selector'
 import { useBoolean } from 'ahooks'
@@ -13,6 +13,8 @@ import { groupBy } from 'lodash-es'
 import PreviewItem, { PreviewType } from './preview-item'
 import LanguageSelect from './language-select'
 import s from './index.module.css'
+import unescape from './unescape'
+import escape from './escape'
 import cn from '@/utils/classnames'
 import type { CrawlOptions, CrawlResultItem, CreateDocumentReq, CustomFile, FileIndexingEstimateResponse, FullDocumentDetail, IndexingEstimateParams, NotionInfo, PreProcessingRule, ProcessRule, Rules, createDocumentResponse } from '@/models/datasets'
 import {
@@ -78,6 +80,8 @@ enum IndexingType {
   ECONOMICAL = 'economy',
 }
 
+const DEFAULT_SEGMENT_IDENTIFIER = '\\n\\n'
+
 const StepTwo = ({
   isSetting,
   documentDetail,
@@ -110,8 +114,11 @@ const StepTwo = ({
   const previewScrollRef = useRef<HTMLDivElement>(null)
   const [previewScrolled, setPreviewScrolled] = useState(false)
   const [segmentationType, setSegmentationType] = useState<SegmentType>(SegmentType.AUTO)
-  const [segmentIdentifier, setSegmentIdentifier] = useState('\\n')
-  const [max, setMax] = useState(5000) // default chunk length
+  const [segmentIdentifier, doSetSegmentIdentifier] = useState(DEFAULT_SEGMENT_IDENTIFIER)
+  const setSegmentIdentifier = useCallback((value: string) => {
+    doSetSegmentIdentifier(value ? escape(value) : DEFAULT_SEGMENT_IDENTIFIER)
+  }, [])
+  const [max, setMax] = useState(4000) // default chunk length
   const [overlap, setOverlap] = useState(50)
   const [rules, setRules] = useState<PreProcessingRule[]>([])
   const [defaultConfig, setDefaultConfig] = useState<Rules>()
@@ -183,7 +190,7 @@ const StepTwo = ({
   }
   const resetRules = () => {
     if (defaultConfig) {
-      setSegmentIdentifier((defaultConfig.segmentation.separator === '\n' ? '\\n' : defaultConfig.segmentation.separator) || '\\n')
+      setSegmentIdentifier(defaultConfig.segmentation.separator)
       setMax(defaultConfig.segmentation.max_tokens)
       setOverlap(defaultConfig.segmentation.chunk_overlap)
       setRules(defaultConfig.pre_processing_rules)
@@ -217,7 +224,7 @@ const StepTwo = ({
       const ruleObj = {
         pre_processing_rules: rules,
         segmentation: {
-          separator: segmentIdentifier === '\\n' ? '\n' : segmentIdentifier,
+          separator: unescape(segmentIdentifier),
           max_tokens: max,
           chunk_overlap: overlap,
         },
@@ -394,7 +401,7 @@ const StepTwo = ({
     try {
       const res = await fetchDefaultProcessRule({ url: '/datasets/process-rule' })
       const separator = res.rules.segmentation.separator
-      setSegmentIdentifier((separator === '\n' ? '\\n' : separator) || '\\n')
+      setSegmentIdentifier(separator)
       setMax(res.rules.segmentation.max_tokens)
       setOverlap(res.rules.segmentation.chunk_overlap)
       setRules(res.rules.pre_processing_rules)
@@ -411,7 +418,7 @@ const StepTwo = ({
       const separator = rules.segmentation.separator
       const max = rules.segmentation.max_tokens
       const overlap = rules.segmentation.chunk_overlap
-      setSegmentIdentifier((separator === '\n' ? '\\n' : separator) || '\\n')
+      setSegmentIdentifier(separator)
       setMax(max)
       setOverlap(overlap)
       setRules(rules.pre_processing_rules)
@@ -616,12 +623,22 @@ const StepTwo = ({
                 <div className={s.typeFormBody}>
                   <div className={s.formRow}>
                     <div className='w-full'>
-                      <div className={s.label}>{t('datasetCreation.stepTwo.separator')}</div>
+                      <div className={s.label}>
+                        {t('datasetCreation.stepTwo.separator')}
+                        <Tooltip
+                          popupContent={
+                            <div className='max-w-[200px]'>
+                              {t('datasetCreation.stepTwo.separatorTip')}
+                            </div>
+                          }
+                        />
+                      </div>
                       <input
                         type="text"
                         className={s.input}
-                        placeholder={t('datasetCreation.stepTwo.separatorPlaceholder') || ''} value={segmentIdentifier}
-                        onChange={e => setSegmentIdentifier(e.target.value)}
+                        placeholder={t('datasetCreation.stepTwo.separatorPlaceholder') || ''}
+                        value={segmentIdentifier}
+                        onChange={e => doSetSegmentIdentifier(e.target.value)}
                       />
                     </div>
                   </div>

+ 54 - 0
web/app/components/datasets/create/step-two/unescape.ts

@@ -0,0 +1,54 @@
+// https://github.com/iamakulov/unescape-js/blob/master/src/index.js
+
+/**
+ * \\ - matches the backslash which indicates the beginning of an escape sequence
+ * (
+ *   u\{([0-9A-Fa-f]+)\} - first alternative; matches the variable-length hexadecimal escape sequence (\u{ABCD0})
+ * |
+ *   u([0-9A-Fa-f]{4}) - second alternative; matches the 4-digit hexadecimal escape sequence (\uABCD)
+ * |
+ *   x([0-9A-Fa-f]{2}) - third alternative; matches the 2-digit hexadecimal escape sequence (\xA5)
+ * |
+ *   ([1-7][0-7]{0,2}|[0-7]{2,3}) - fourth alternative; matches the up-to-3-digit octal escape sequence (\5 or \512)
+ * |
+ *   (['"tbrnfv0\\]) - fifth alternative; matches the special escape characters (\t, \n and so on)
+ * |
+ *   \U([0-9A-Fa-f]+) - sixth alternative; matches the 8-digit hexadecimal escape sequence used by python (\U0001F3B5)
+ * )
+ */
+const jsEscapeRegex = /\\(u\{([0-9A-Fa-f]+)\}|u([0-9A-Fa-f]{4})|x([0-9A-Fa-f]{2})|([1-7][0-7]{0,2}|[0-7]{2,3})|(['"tbrnfv0\\]))|\\U([0-9A-Fa-f]{8})/g
+
+const usualEscapeSequences: Record<string, string> = {
+  '0': '\0',
+  'b': '\b',
+  'f': '\f',
+  'n': '\n',
+  'r': '\r',
+  't': '\t',
+  'v': '\v',
+  '\'': '\'',
+  '"': '"',
+  '\\': '\\',
+}
+
+const fromHex = (str: string) => String.fromCodePoint(parseInt(str, 16))
+const fromOct = (str: string) => String.fromCodePoint(parseInt(str, 8))
+
+const unescape = (str: string) => {
+  return str.replace(jsEscapeRegex, (_, __, varHex, longHex, shortHex, octal, specialCharacter, python) => {
+    if (varHex !== undefined)
+      return fromHex(varHex)
+    else if (longHex !== undefined)
+      return fromHex(longHex)
+    else if (shortHex !== undefined)
+      return fromHex(shortHex)
+    else if (octal !== undefined)
+      return fromOct(octal)
+    else if (python !== undefined)
+      return fromHex(python)
+    else
+      return usualEscapeSequences[specialCharacter]
+  })
+}
+
+export default unescape

+ 9 - 3
web/app/components/share/text-generation/result/index.tsx

@@ -269,8 +269,10 @@ const Result: FC<IResultProps> = ({
             }))
           },
           onWorkflowFinished: ({ data }) => {
-            if (isTimeout)
+            if (isTimeout) {
+              notify({ type: 'warning', message: t('appDebug.warningMessage.timeoutExceeded') })
               return
+            }
             if (data.error) {
               notify({ type: 'error', message: data.error })
               setWorkflowProcessData(produce(getWorkflowProcessData()!, (draft) => {
@@ -326,8 +328,10 @@ const Result: FC<IResultProps> = ({
           setCompletionRes(res.join(''))
         },
         onCompleted: () => {
-          if (isTimeout)
+          if (isTimeout) {
+            notify({ type: 'warning', message: t('appDebug.warningMessage.timeoutExceeded') })
             return
+          }
           setRespondingFalse()
           setMessageId(tempMessageId)
           onCompleted(getCompletionRes(), taskId, true)
@@ -338,8 +342,10 @@ const Result: FC<IResultProps> = ({
           setCompletionRes(res.join(''))
         },
         onError() {
-          if (isTimeout)
+          if (isTimeout) {
+            notify({ type: 'warning', message: t('appDebug.warningMessage.timeoutExceeded') })
             return
+          }
           setRespondingFalse()
           onCompleted(getCompletionRes(), taskId, false)
           isEnd = true

+ 8 - 9
web/app/components/share/text-generation/run-once/index.tsx

@@ -1,4 +1,4 @@
-import type { FC } from 'react'
+import type { FC, FormEvent } from 'react'
 import React from 'react'
 import { useTranslation } from 'react-i18next'
 import {
@@ -39,11 +39,16 @@ const RunOnce: FC<IRunOnceProps> = ({
     onInputsChange(newInputs)
   }
 
+  const onSubmit = (e: FormEvent<HTMLFormElement>) => {
+    e.preventDefault()
+    onSend()
+  }
+
   return (
     <div className="">
       <section>
         {/* input form */}
-        <form>
+        <form onSubmit={onSubmit}>
           {promptConfig.prompt_variables.map(item => (
             <div className='w-full mt-4' key={item.key}>
               <label className='text-gray-900 text-sm font-medium'>{item.name}</label>
@@ -65,12 +70,6 @@ const RunOnce: FC<IRunOnceProps> = ({
                     placeholder={`${item.name}${!item.required ? `(${t('appDebug.variableTable.optional')})` : ''}`}
                     value={inputs[item.key]}
                     onChange={(e) => { onInputsChange({ ...inputs, [item.key]: e.target.value }) }}
-                    onKeyDown={(e) => {
-                      if (e.key === 'Enter') {
-                        e.preventDefault()
-                        onSend()
-                      }
-                    }}
                     maxLength={item.max_length || DEFAULT_VALUE_MAX_LEN}
                   />
                 )}
@@ -124,8 +123,8 @@ const RunOnce: FC<IRunOnceProps> = ({
                 <span className='text-[13px]'>{t('common.operation.clear')}</span>
               </Button>
               <Button
+                type='submit'
                 variant="primary"
-                onClick={onSend}
                 disabled={false}
               >
                 <PlayIcon className="shrink-0 w-4 h-4 mr-1" aria-hidden="true" />

+ 0 - 0
web/app/components/tools/add-tool-modal/tools.tsx


部分文件因为文件数量过多而无法显示