Forráskód Böngészése

feat: support plugin max package size

Yeuoly 7 hónapja%!(EXTRA string=óta)
szülő
commit
7754431a34

+ 1 - 0
api/.env.example

@@ -332,6 +332,7 @@ PLUGIN_API_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi+vRjI/+Xb
 PLUGIN_API_URL=http://127.0.0.1:5002
 PLUGIN_REMOTE_INSTALL_PORT=5003
 PLUGIN_REMOTE_INSTALL_HOST=localhost
+PLUGIN_MAX_PACKAGE_SIZE=15728640
 INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
 
 # Marketplace configuration

+ 5 - 0
api/configs/feature/__init__.py

@@ -137,6 +137,11 @@ class PluginConfig(BaseSettings):
         default=5003,
     )
 
+    PLUGIN_MAX_PACKAGE_SIZE: PositiveInt = Field(
+        description="Maximum allowed size for plugin packages in bytes",
+        default=15728640,
+    )
+
 
 class MarketplaceConfig(BaseSettings):
     """

+ 5 - 0
api/controllers/console/workspace/plugin.py

@@ -84,6 +84,11 @@ class PluginUploadFromPkgApi(Resource):
         tenant_id = user.current_tenant_id
 
         file = request.files["pkg"]
+
+        # check file size
+        if file.content_length > dify_config.PLUGIN_MAX_PACKAGE_SIZE:
+            raise ValueError("File size exceeds the maximum allowed size")
+
         content = file.read()
         response = PluginService.upload_pkg(tenant_id, content)
 

+ 4 - 5
api/core/helper/marketplace.py

@@ -5,12 +5,11 @@ from core.helper.download import download_with_size_limit
 
 
 def get_plugin_pkg_url(plugin_unique_identifier: str):
-    return (
-        URL(str(dify_config.MARKETPLACE_API_URL))
-        / "api/v1/plugins/download"
-    ).with_query(unique_identifier=plugin_unique_identifier)
+    return (URL(str(dify_config.MARKETPLACE_API_URL)) / "api/v1/plugins/download").with_query(
+        unique_identifier=plugin_unique_identifier
+    )
 
 
 def download_plugin_pkg(plugin_unique_identifier: str):
     url = str(get_plugin_pkg_url(plugin_unique_identifier))
-    return download_with_size_limit(url, 15 * 1024 * 1024)
+    return download_with_size_limit(url, dify_config.PLUGIN_MAX_PACKAGE_SIZE)

+ 168 - 0
api/core/model_runtime/model_providers/openai/moderation/moderation.py

@@ -0,0 +1,168 @@
+from collections.abc import Mapping
+from typing import Optional
+
+import openai
+from httpx import Timeout
+from openai import OpenAI
+from openai.types import ModerationCreateResponse
+
+from core.model_runtime.entities.model_entities import ModelPropertyKey
+from core.model_runtime.errors.invoke import (
+    InvokeAuthorizationError,
+    InvokeBadRequestError,
+    InvokeConnectionError,
+    InvokeError,
+    InvokeRateLimitError,
+    InvokeServerUnavailableError,
+)
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.moderation_model import ModerationModel
+
+
+class OpenAIModerationModel(ModerationModel):
+    """
+    Model class for OpenAI text moderation model.
+    """
+
+    def _invoke(self, model: str, credentials: dict, text: str, user: Optional[str] = None) -> bool:
+        """
+        Invoke moderation model
+
+        :param model: model name
+        :param credentials: model credentials
+        :param text: text to moderate
+        :param user: unique user id
+        :return: false if text is safe, true otherwise
+        """
+        # transform credentials to kwargs for model instance
+        credentials_kwargs = self._to_credential_kwargs(credentials)
+
+        # init model client
+        client = OpenAI(**credentials_kwargs)
+
+        # chars per chunk
+        length = self._get_max_characters_per_chunk(model, credentials)
+        text_chunks = [text[i : i + length] for i in range(0, len(text), length)]
+
+        max_text_chunks = self._get_max_chunks(model, credentials)
+        chunks = [text_chunks[i : i + max_text_chunks] for i in range(0, len(text_chunks), max_text_chunks)]
+
+        for text_chunk in chunks:
+            moderation_result = self._moderation_invoke(model=model, client=client, texts=text_chunk)
+
+            for result in moderation_result.results:
+                if result.flagged is True:
+                    return True
+
+        return False
+
+    def validate_credentials(self, model: str, credentials: dict) -> None:
+        """
+        Validate model credentials
+
+        :param model: model name
+        :param credentials: model credentials
+        :return:
+        """
+        try:
+            # transform credentials to kwargs for model instance
+            credentials_kwargs = self._to_credential_kwargs(credentials)
+            client = OpenAI(**credentials_kwargs)
+
+            # call moderation model
+            self._moderation_invoke(
+                model=model,
+                client=client,
+                texts=["ping"],
+            )
+        except Exception as ex:
+            raise CredentialsValidateFailedError(str(ex))
+
+    def _moderation_invoke(self, model: str, client: OpenAI, texts: list[str]) -> ModerationCreateResponse:
+        """
+        Invoke moderation model
+
+        :param model: model name
+        :param client: model client
+        :param texts: texts to moderate
+        :return: false if text is safe, true otherwise
+        """
+        # call moderation model
+        moderation_result = client.moderations.create(model=model, input=texts)
+
+        return moderation_result
+
+    def _get_max_characters_per_chunk(self, model: str, credentials: dict) -> int:
+        """
+        Get max characters per chunk
+
+        :param model: model name
+        :param credentials: model credentials
+        :return: max characters per chunk
+        """
+        model_schema = self.get_model_schema(model, credentials)
+
+        if model_schema and ModelPropertyKey.MAX_CHARACTERS_PER_CHUNK in model_schema.model_properties:
+            return model_schema.model_properties[ModelPropertyKey.MAX_CHARACTERS_PER_CHUNK]
+
+        return 2000
+
+    def _get_max_chunks(self, model: str, credentials: dict) -> int:
+        """
+        Get max chunks for given embedding model
+
+        :param model: model name
+        :param credentials: model credentials
+        :return: max chunks
+        """
+        model_schema = self.get_model_schema(model, credentials)
+
+        if model_schema and ModelPropertyKey.MAX_CHUNKS in model_schema.model_properties:
+            return model_schema.model_properties[ModelPropertyKey.MAX_CHUNKS]
+
+        return 1
+
+    def _to_credential_kwargs(self, credentials: Mapping) -> dict:
+        """
+        Transform credentials to kwargs for model instance
+
+        :param credentials:
+        :return:
+        """
+        credentials_kwargs = {
+            "api_key": credentials["openai_api_key"],
+            "timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0),
+            "max_retries": 1,
+        }
+
+        if credentials.get("openai_api_base"):
+            openai_api_base = credentials["openai_api_base"].rstrip("/")
+            credentials_kwargs["base_url"] = openai_api_base + "/v1"
+
+        if "openai_organization" in credentials:
+            credentials_kwargs["organization"] = credentials["openai_organization"]
+
+        return credentials_kwargs
+
+    @property
+    def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
+        """
+        Map model invoke error to unified error
+        The key is the error type thrown to the caller
+        The value is the error type thrown by the model,
+        which needs to be converted into a unified error type for the caller.
+
+        :return: Invoke error mapping
+        """
+        return {
+            InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError],
+            InvokeServerUnavailableError: [openai.InternalServerError],
+            InvokeRateLimitError: [openai.RateLimitError],
+            InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError],
+            InvokeBadRequestError: [
+                openai.BadRequestError,
+                openai.NotFoundError,
+                openai.UnprocessableEntityError,
+                openai.APIError,
+            ],
+        }

+ 1 - 0
api/services/feature_service.py

@@ -43,6 +43,7 @@ class SystemFeatureModel(BaseModel):
     sso_enforced_for_web_protocol: str = ""
     enable_web_sso_switch_component: bool = False
     enable_marketplace: bool = True
+    max_plugin_package_size: int = dify_config.PLUGIN_MAX_PACKAGE_SIZE
 
 
 class FeatureService:

+ 2 - 1
api/services/plugin/plugin_service.py

@@ -1,6 +1,7 @@
 from collections.abc import Sequence
 from mimetypes import guess_type
 
+from configs import dify_config
 from core.helper.download import download_with_size_limit
 from core.helper.marketplace import download_plugin_pkg
 from core.plugin.entities.plugin import PluginDeclaration, PluginEntity, PluginInstallationSource
@@ -84,7 +85,7 @@ class PluginService:
         returns plugin_unique_identifier
         """
         pkg = download_with_size_limit(
-            f"https://github.com/{repo}/releases/download/{version}/{package}", 15 * 1024 * 1024
+            f"https://github.com/{repo}/releases/download/{version}/{package}", dify_config.PLUGIN_MAX_PACKAGE_SIZE
         )
 
         manager = PluginInstallationManager()