moderation.py 2.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051
  1. import logging
  2. import random
  3. from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
  4. from core.model_runtime.errors.invoke import InvokeBadRequestError
  5. from core.model_runtime.model_providers.openai.moderation.moderation import OpenAIModerationModel
  6. from extensions.ext_hosting_provider import hosting_configuration
  7. from models.provider import ProviderType
  8. logger = logging.getLogger(__name__)
  9. def check_moderation(model_config: ModelConfigWithCredentialsEntity, text: str) -> bool:
  10. moderation_config = hosting_configuration.moderation_config
  11. if (
  12. moderation_config
  13. and moderation_config.enabled is True
  14. and "openai" in hosting_configuration.provider_map
  15. and hosting_configuration.provider_map["openai"].enabled is True
  16. ):
  17. using_provider_type = model_config.provider_model_bundle.configuration.using_provider_type
  18. provider_name = model_config.provider
  19. if using_provider_type == ProviderType.SYSTEM and provider_name in moderation_config.providers:
  20. hosting_openai_config = hosting_configuration.provider_map["openai"]
  21. if hosting_openai_config.credentials is None:
  22. return False
  23. # 2000 text per chunk
  24. length = 2000
  25. text_chunks = [text[i : i + length] for i in range(0, len(text), length)]
  26. if len(text_chunks) == 0:
  27. return True
  28. text_chunk = random.choice(text_chunks)
  29. try:
  30. model_type_instance = OpenAIModerationModel()
  31. moderation_result = model_type_instance.invoke(
  32. model="text-moderation-stable", credentials=hosting_openai_config.credentials, text=text_chunk
  33. )
  34. if moderation_result is True:
  35. return True
  36. except Exception as ex:
  37. logger.exception(ex)
  38. raise InvokeBadRequestError("Rate limit exceeded, please try again later.")
  39. return False