moderation.py 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637
  1. import logging
  2. import random
  3. import openai
  4. from core.model_providers.error import LLMBadRequestError
  5. from core.model_providers.providers.base import BaseModelProvider
  6. from core.model_providers.providers.hosted import hosted_config, hosted_model_providers
  7. from models.provider import ProviderType
  8. def check_moderation(model_provider: BaseModelProvider, text: str) -> bool:
  9. if hosted_config.moderation.enabled is True and hosted_model_providers.openai:
  10. if model_provider.provider.provider_type == ProviderType.SYSTEM.value \
  11. and model_provider.provider_name in hosted_config.moderation.providers:
  12. # 2000 text per chunk
  13. length = 2000
  14. text_chunks = [text[i:i + length] for i in range(0, len(text), length)]
  15. if len(text_chunks) == 0:
  16. return True
  17. text_chunk = random.choice(text_chunks)
  18. try:
  19. moderation_result = openai.Moderation.create(input=text_chunk,
  20. api_key=hosted_model_providers.openai.api_key)
  21. except Exception as ex:
  22. logging.exception(ex)
  23. raise LLMBadRequestError('Rate limit exceeded, please try again later.')
  24. for result in moderation_result.results:
  25. if result['flagged'] is True:
  26. return False
  27. return True