audio_service.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. import io
  2. import logging
  3. import uuid
  4. from typing import Optional
  5. from werkzeug.datastructures import FileStorage
  6. from core.model_manager import ModelManager
  7. from core.model_runtime.entities.model_entities import ModelType
  8. from models.model import App, AppMode, AppModelConfig, Message
  9. from services.errors.audio import (
  10. AudioTooLargeServiceError,
  11. NoAudioUploadedServiceError,
  12. ProviderNotSupportSpeechToTextServiceError,
  13. ProviderNotSupportTextToSpeechServiceError,
  14. UnsupportedAudioTypeServiceError,
  15. )
  16. FILE_SIZE = 30
  17. FILE_SIZE_LIMIT = FILE_SIZE * 1024 * 1024
  18. ALLOWED_EXTENSIONS = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm", "amr"]
  19. logger = logging.getLogger(__name__)
  20. class AudioService:
  21. @classmethod
  22. def transcript_asr(cls, app_model: App, file: FileStorage, end_user: Optional[str] = None):
  23. if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
  24. workflow = app_model.workflow
  25. if workflow is None:
  26. raise ValueError("Speech to text is not enabled")
  27. features_dict = workflow.features_dict
  28. if "speech_to_text" not in features_dict or not features_dict["speech_to_text"].get("enabled"):
  29. raise ValueError("Speech to text is not enabled")
  30. else:
  31. app_model_config: AppModelConfig = app_model.app_model_config
  32. if not app_model_config.speech_to_text_dict["enabled"]:
  33. raise ValueError("Speech to text is not enabled")
  34. if file is None:
  35. raise NoAudioUploadedServiceError()
  36. extension = file.mimetype
  37. if extension not in [f"audio/{ext}" for ext in ALLOWED_EXTENSIONS]:
  38. raise UnsupportedAudioTypeServiceError()
  39. file_content = file.read()
  40. file_size = len(file_content)
  41. if file_size > FILE_SIZE_LIMIT:
  42. message = f"Audio size larger than {FILE_SIZE} mb"
  43. raise AudioTooLargeServiceError(message)
  44. model_manager = ModelManager()
  45. model_instance = model_manager.get_default_model_instance(
  46. tenant_id=app_model.tenant_id, model_type=ModelType.SPEECH2TEXT
  47. )
  48. if model_instance is None:
  49. raise ProviderNotSupportSpeechToTextServiceError()
  50. buffer = io.BytesIO(file_content)
  51. buffer.name = "temp.mp3"
  52. return {"text": model_instance.invoke_speech2text(file=buffer, user=end_user)}
  53. @classmethod
  54. def transcript_tts(
  55. cls,
  56. app_model: App,
  57. text: Optional[str] = None,
  58. voice: Optional[str] = None,
  59. end_user: Optional[str] = None,
  60. message_id: Optional[str] = None,
  61. ):
  62. from collections.abc import Generator
  63. from flask import Response, stream_with_context
  64. from app import app
  65. from extensions.ext_database import db
  66. def invoke_tts(text_content: str, app_model: App, voice: Optional[str] = None):
  67. with app.app_context():
  68. if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
  69. workflow = app_model.workflow
  70. if workflow is None:
  71. raise ValueError("TTS is not enabled")
  72. features_dict = workflow.features_dict
  73. if "text_to_speech" not in features_dict or not features_dict["text_to_speech"].get("enabled"):
  74. raise ValueError("TTS is not enabled")
  75. voice = features_dict["text_to_speech"].get("voice") if voice is None else voice
  76. else:
  77. if app_model.app_model_config is None:
  78. raise ValueError("AppModelConfig not found")
  79. text_to_speech_dict = app_model.app_model_config.text_to_speech_dict
  80. if not text_to_speech_dict.get("enabled"):
  81. raise ValueError("TTS is not enabled")
  82. voice = text_to_speech_dict.get("voice") if voice is None else voice
  83. model_manager = ModelManager()
  84. model_instance = model_manager.get_default_model_instance(
  85. tenant_id=app_model.tenant_id, model_type=ModelType.TTS
  86. )
  87. try:
  88. if not voice:
  89. voices = model_instance.get_tts_voices()
  90. if voices:
  91. voice = voices[0].get("value")
  92. if not voice:
  93. raise ValueError("Sorry, no voice available.")
  94. else:
  95. raise ValueError("Sorry, no voice available.")
  96. return model_instance.invoke_tts(
  97. content_text=text_content.strip(), user=end_user, tenant_id=app_model.tenant_id, voice=voice
  98. )
  99. except Exception as e:
  100. raise e
  101. if message_id:
  102. try:
  103. uuid.UUID(message_id)
  104. except ValueError:
  105. return None
  106. message = db.session.query(Message).filter(Message.id == message_id).first()
  107. if message is None:
  108. return None
  109. if message.answer == "" and message.status == "normal":
  110. return None
  111. else:
  112. response = invoke_tts(message.answer, app_model=app_model, voice=voice)
  113. if isinstance(response, Generator):
  114. return Response(stream_with_context(response), content_type="audio/mpeg")
  115. return response
  116. else:
  117. if text is None:
  118. raise ValueError("Text is required")
  119. response = invoke_tts(text, app_model, voice)
  120. if isinstance(response, Generator):
  121. return Response(stream_with_context(response), content_type="audio/mpeg")
  122. return response
  123. @classmethod
  124. def transcript_tts_voices(cls, tenant_id: str, language: str):
  125. model_manager = ModelManager()
  126. model_instance = model_manager.get_default_model_instance(tenant_id=tenant_id, model_type=ModelType.TTS)
  127. if model_instance is None:
  128. raise ProviderNotSupportTextToSpeechServiceError()
  129. try:
  130. return model_instance.get_tts_voices(language)
  131. except Exception as e:
  132. raise e