logging_callback.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. import json
  2. import logging
  3. import sys
  4. from collections.abc import Sequence
  5. from typing import Optional, cast
  6. from core.model_runtime.callbacks.base_callback import Callback
  7. from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk
  8. from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool
  9. from core.model_runtime.model_providers.__base.ai_model import AIModel
  10. logger = logging.getLogger(__name__)
  11. class LoggingCallback(Callback):
  12. def on_before_invoke(
  13. self,
  14. llm_instance: AIModel,
  15. model: str,
  16. credentials: dict,
  17. prompt_messages: list[PromptMessage],
  18. model_parameters: dict,
  19. tools: Optional[list[PromptMessageTool]] = None,
  20. stop: Optional[Sequence[str]] = None,
  21. stream: bool = True,
  22. user: Optional[str] = None,
  23. ) -> None:
  24. """
  25. Before invoke callback
  26. :param llm_instance: LLM instance
  27. :param model: model name
  28. :param credentials: model credentials
  29. :param prompt_messages: prompt messages
  30. :param model_parameters: model parameters
  31. :param tools: tools for tool calling
  32. :param stop: stop words
  33. :param stream: is stream response
  34. :param user: unique user id
  35. """
  36. self.print_text("\n[on_llm_before_invoke]\n", color="blue")
  37. self.print_text(f"Model: {model}\n", color="blue")
  38. self.print_text("Parameters:\n", color="blue")
  39. for key, value in model_parameters.items():
  40. self.print_text(f"\t{key}: {value}\n", color="blue")
  41. if stop:
  42. self.print_text(f"\tstop: {stop}\n", color="blue")
  43. if tools:
  44. self.print_text("\tTools:\n", color="blue")
  45. for tool in tools:
  46. self.print_text(f"\t\t{tool.name}\n", color="blue")
  47. self.print_text(f"Stream: {stream}\n", color="blue")
  48. if user:
  49. self.print_text(f"User: {user}\n", color="blue")
  50. self.print_text("Prompt messages:\n", color="blue")
  51. for prompt_message in prompt_messages:
  52. if prompt_message.name:
  53. self.print_text(f"\tname: {prompt_message.name}\n", color="blue")
  54. self.print_text(f"\trole: {prompt_message.role.value}\n", color="blue")
  55. self.print_text(f"\tcontent: {prompt_message.content}\n", color="blue")
  56. if stream:
  57. self.print_text("\n[on_llm_new_chunk]")
  58. def on_new_chunk(
  59. self,
  60. llm_instance: AIModel,
  61. chunk: LLMResultChunk,
  62. model: str,
  63. credentials: dict,
  64. prompt_messages: list[PromptMessage],
  65. model_parameters: dict,
  66. tools: Optional[list[PromptMessageTool]] = None,
  67. stop: Optional[Sequence[str]] = None,
  68. stream: bool = True,
  69. user: Optional[str] = None,
  70. ):
  71. """
  72. On new chunk callback
  73. :param llm_instance: LLM instance
  74. :param chunk: chunk
  75. :param model: model name
  76. :param credentials: model credentials
  77. :param prompt_messages: prompt messages
  78. :param model_parameters: model parameters
  79. :param tools: tools for tool calling
  80. :param stop: stop words
  81. :param stream: is stream response
  82. :param user: unique user id
  83. """
  84. sys.stdout.write(cast(str, chunk.delta.message.content))
  85. sys.stdout.flush()
  86. def on_after_invoke(
  87. self,
  88. llm_instance: AIModel,
  89. result: LLMResult,
  90. model: str,
  91. credentials: dict,
  92. prompt_messages: list[PromptMessage],
  93. model_parameters: dict,
  94. tools: Optional[list[PromptMessageTool]] = None,
  95. stop: Optional[Sequence[str]] = None,
  96. stream: bool = True,
  97. user: Optional[str] = None,
  98. ) -> None:
  99. """
  100. After invoke callback
  101. :param llm_instance: LLM instance
  102. :param result: result
  103. :param model: model name
  104. :param credentials: model credentials
  105. :param prompt_messages: prompt messages
  106. :param model_parameters: model parameters
  107. :param tools: tools for tool calling
  108. :param stop: stop words
  109. :param stream: is stream response
  110. :param user: unique user id
  111. """
  112. self.print_text("\n[on_llm_after_invoke]\n", color="yellow")
  113. self.print_text(f"Content: {result.message.content}\n", color="yellow")
  114. if result.message.tool_calls:
  115. self.print_text("Tool calls:\n", color="yellow")
  116. for tool_call in result.message.tool_calls:
  117. self.print_text(f"\t{tool_call.id}\n", color="yellow")
  118. self.print_text(f"\t{tool_call.function.name}\n", color="yellow")
  119. self.print_text(f"\t{json.dumps(tool_call.function.arguments)}\n", color="yellow")
  120. self.print_text(f"Model: {result.model}\n", color="yellow")
  121. self.print_text(f"Usage: {result.usage}\n", color="yellow")
  122. self.print_text(f"System Fingerprint: {result.system_fingerprint}\n", color="yellow")
  123. def on_invoke_error(
  124. self,
  125. llm_instance: AIModel,
  126. ex: Exception,
  127. model: str,
  128. credentials: dict,
  129. prompt_messages: list[PromptMessage],
  130. model_parameters: dict,
  131. tools: Optional[list[PromptMessageTool]] = None,
  132. stop: Optional[Sequence[str]] = None,
  133. stream: bool = True,
  134. user: Optional[str] = None,
  135. ) -> None:
  136. """
  137. Invoke error callback
  138. :param llm_instance: LLM instance
  139. :param ex: exception
  140. :param model: model name
  141. :param credentials: model credentials
  142. :param prompt_messages: prompt messages
  143. :param model_parameters: model parameters
  144. :param tools: tools for tool calling
  145. :param stop: stop words
  146. :param stream: is stream response
  147. :param user: unique user id
  148. """
  149. self.print_text("\n[on_llm_invoke_error]\n", color="red")
  150. logger.exception(ex)