generate_conversation_summary_task.py 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. import logging
  2. import time
  3. import click
  4. from celery import shared_task
  5. from werkzeug.exceptions import NotFound
  6. from core.generator.llm_generator import LLMGenerator
  7. from core.model_providers.error import LLMError, ProviderTokenNotInitError
  8. from extensions.ext_database import db
  9. from models.model import Conversation, Message
  10. @shared_task(queue='generation')
  11. def generate_conversation_summary_task(conversation_id: str):
  12. """
  13. Async Generate conversation summary
  14. :param conversation_id:
  15. Usage: generate_conversation_summary_task.delay(conversation_id)
  16. """
  17. logging.info(click.style('Start generate conversation summary: {}'.format(conversation_id), fg='green'))
  18. start_at = time.perf_counter()
  19. conversation = db.session.query(Conversation).filter(Conversation.id == conversation_id).first()
  20. if not conversation:
  21. raise NotFound('Conversation not found')
  22. try:
  23. # get conversation messages count
  24. history_message_count = conversation.message_count
  25. if history_message_count >= 5 and not conversation.summary:
  26. app_model = conversation.app
  27. if not app_model:
  28. return
  29. history_messages = db.session.query(Message).filter(Message.conversation_id == conversation.id) \
  30. .order_by(Message.created_at.asc()).all()
  31. conversation.summary = LLMGenerator.generate_conversation_summary(app_model.tenant_id, history_messages)
  32. db.session.add(conversation)
  33. db.session.commit()
  34. except (LLMError, ProviderTokenNotInitError):
  35. conversation.summary = '[No Summary]'
  36. db.session.commit()
  37. pass
  38. except Exception as e:
  39. conversation.summary = '[No Summary]'
  40. db.session.commit()
  41. logging.exception(e)
  42. end_at = time.perf_counter()
  43. logging.info(
  44. click.style('Conversation summary generated: {} latency: {}'.format(conversation_id, end_at - start_at),
  45. fg='green'))