langfuse_trace.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. import json
  2. import logging
  3. import os
  4. from datetime import datetime, timedelta
  5. from typing import Optional
  6. from langfuse import Langfuse
  7. from core.ops.base_trace_instance import BaseTraceInstance
  8. from core.ops.entities.config_entity import LangfuseConfig
  9. from core.ops.entities.trace_entity import (
  10. BaseTraceInfo,
  11. DatasetRetrievalTraceInfo,
  12. GenerateNameTraceInfo,
  13. MessageTraceInfo,
  14. ModerationTraceInfo,
  15. SuggestedQuestionTraceInfo,
  16. ToolTraceInfo,
  17. WorkflowTraceInfo,
  18. )
  19. from core.ops.langfuse_trace.entities.langfuse_trace_entity import (
  20. GenerationUsage,
  21. LangfuseGeneration,
  22. LangfuseSpan,
  23. LangfuseTrace,
  24. LevelEnum,
  25. UnitEnum,
  26. )
  27. from core.ops.utils import filter_none_values
  28. from extensions.ext_database import db
  29. from models.model import EndUser
  30. from models.workflow import WorkflowNodeExecution
  31. logger = logging.getLogger(__name__)
  32. class LangFuseDataTrace(BaseTraceInstance):
  33. def __init__(
  34. self,
  35. langfuse_config: LangfuseConfig,
  36. ):
  37. super().__init__(langfuse_config)
  38. self.langfuse_client = Langfuse(
  39. public_key=langfuse_config.public_key,
  40. secret_key=langfuse_config.secret_key,
  41. host=langfuse_config.host,
  42. )
  43. self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001")
  44. def trace(self, trace_info: BaseTraceInfo):
  45. if isinstance(trace_info, WorkflowTraceInfo):
  46. self.workflow_trace(trace_info)
  47. if isinstance(trace_info, MessageTraceInfo):
  48. self.message_trace(trace_info)
  49. if isinstance(trace_info, ModerationTraceInfo):
  50. self.moderation_trace(trace_info)
  51. if isinstance(trace_info, SuggestedQuestionTraceInfo):
  52. self.suggested_question_trace(trace_info)
  53. if isinstance(trace_info, DatasetRetrievalTraceInfo):
  54. self.dataset_retrieval_trace(trace_info)
  55. if isinstance(trace_info, ToolTraceInfo):
  56. self.tool_trace(trace_info)
  57. if isinstance(trace_info, GenerateNameTraceInfo):
  58. self.generate_name_trace(trace_info)
  59. def workflow_trace(self, trace_info: WorkflowTraceInfo):
  60. trace_id = trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id
  61. user_id = trace_info.metadata.get("user_id")
  62. if trace_info.message_id:
  63. trace_id = trace_info.message_id
  64. name = f"message_{trace_info.message_id}"
  65. trace_data = LangfuseTrace(
  66. id=trace_info.message_id,
  67. user_id=user_id,
  68. name=name,
  69. input=trace_info.workflow_run_inputs,
  70. output=trace_info.workflow_run_outputs,
  71. metadata=trace_info.metadata,
  72. session_id=trace_info.conversation_id,
  73. tags=["message", "workflow"],
  74. )
  75. self.add_trace(langfuse_trace_data=trace_data)
  76. workflow_span_data = LangfuseSpan(
  77. id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id,
  78. name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}",
  79. input=trace_info.workflow_run_inputs,
  80. output=trace_info.workflow_run_outputs,
  81. trace_id=trace_id,
  82. start_time=trace_info.start_time,
  83. end_time=trace_info.end_time,
  84. metadata=trace_info.metadata,
  85. level=LevelEnum.DEFAULT if trace_info.error == "" else LevelEnum.ERROR,
  86. status_message=trace_info.error if trace_info.error else "",
  87. )
  88. self.add_span(langfuse_span_data=workflow_span_data)
  89. else:
  90. trace_data = LangfuseTrace(
  91. id=trace_id,
  92. user_id=user_id,
  93. name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}",
  94. input=trace_info.workflow_run_inputs,
  95. output=trace_info.workflow_run_outputs,
  96. metadata=trace_info.metadata,
  97. session_id=trace_info.conversation_id,
  98. tags=["workflow"],
  99. )
  100. self.add_trace(langfuse_trace_data=trace_data)
  101. # through workflow_run_id get all_nodes_execution
  102. workflow_nodes_executions = (
  103. db.session.query(
  104. WorkflowNodeExecution.id,
  105. WorkflowNodeExecution.tenant_id,
  106. WorkflowNodeExecution.app_id,
  107. WorkflowNodeExecution.title,
  108. WorkflowNodeExecution.node_type,
  109. WorkflowNodeExecution.status,
  110. WorkflowNodeExecution.inputs,
  111. WorkflowNodeExecution.outputs,
  112. WorkflowNodeExecution.created_at,
  113. WorkflowNodeExecution.elapsed_time,
  114. WorkflowNodeExecution.process_data,
  115. WorkflowNodeExecution.execution_metadata,
  116. )
  117. .filter(WorkflowNodeExecution.workflow_run_id == trace_info.workflow_run_id)
  118. .all()
  119. )
  120. for node_execution in workflow_nodes_executions:
  121. node_execution_id = node_execution.id
  122. tenant_id = node_execution.tenant_id
  123. app_id = node_execution.app_id
  124. node_name = node_execution.title
  125. node_type = node_execution.node_type
  126. status = node_execution.status
  127. if node_type == "llm":
  128. inputs = json.loads(node_execution.process_data).get(
  129. "prompts", {}
  130. ) if node_execution.process_data else {}
  131. else:
  132. inputs = json.loads(node_execution.inputs) if node_execution.inputs else {}
  133. outputs = (
  134. json.loads(node_execution.outputs) if node_execution.outputs else {}
  135. )
  136. created_at = node_execution.created_at if node_execution.created_at else datetime.now()
  137. elapsed_time = node_execution.elapsed_time
  138. finished_at = created_at + timedelta(seconds=elapsed_time)
  139. metadata = json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {}
  140. metadata.update(
  141. {
  142. "workflow_run_id": trace_info.workflow_run_id,
  143. "node_execution_id": node_execution_id,
  144. "tenant_id": tenant_id,
  145. "app_id": app_id,
  146. "node_name": node_name,
  147. "node_type": node_type,
  148. "status": status,
  149. }
  150. )
  151. # add span
  152. if trace_info.message_id:
  153. span_data = LangfuseSpan(
  154. id=node_execution_id,
  155. name=f"{node_name}_{node_execution_id}",
  156. input=inputs,
  157. output=outputs,
  158. trace_id=trace_id,
  159. start_time=created_at,
  160. end_time=finished_at,
  161. metadata=metadata,
  162. level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR,
  163. status_message=trace_info.error if trace_info.error else "",
  164. parent_observation_id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id,
  165. )
  166. else:
  167. span_data = LangfuseSpan(
  168. id=node_execution_id,
  169. name=f"{node_name}_{node_execution_id}",
  170. input=inputs,
  171. output=outputs,
  172. trace_id=trace_id,
  173. start_time=created_at,
  174. end_time=finished_at,
  175. metadata=metadata,
  176. level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR,
  177. status_message=trace_info.error if trace_info.error else "",
  178. )
  179. self.add_span(langfuse_span_data=span_data)
  180. process_data = json.loads(node_execution.process_data) if node_execution.process_data else {}
  181. if process_data and process_data.get("model_mode") == "chat":
  182. total_token = metadata.get("total_tokens", 0)
  183. # add generation
  184. generation_usage = GenerationUsage(
  185. totalTokens=total_token,
  186. )
  187. node_generation_data = LangfuseGeneration(
  188. name=f"generation_{node_execution_id}",
  189. trace_id=trace_id,
  190. parent_observation_id=node_execution_id,
  191. start_time=created_at,
  192. end_time=finished_at,
  193. input=inputs,
  194. output=outputs,
  195. metadata=metadata,
  196. level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR,
  197. status_message=trace_info.error if trace_info.error else "",
  198. usage=generation_usage,
  199. )
  200. self.add_generation(langfuse_generation_data=node_generation_data)
  201. def message_trace(
  202. self, trace_info: MessageTraceInfo, **kwargs
  203. ):
  204. # get message file data
  205. file_list = trace_info.file_list
  206. metadata = trace_info.metadata
  207. message_data = trace_info.message_data
  208. message_id = message_data.id
  209. user_id = message_data.from_account_id
  210. if message_data.from_end_user_id:
  211. end_user_data: EndUser = db.session.query(EndUser).filter(
  212. EndUser.id == message_data.from_end_user_id
  213. ).first()
  214. if end_user_data is not None:
  215. user_id = end_user_data.session_id
  216. metadata["user_id"] = user_id
  217. trace_data = LangfuseTrace(
  218. id=message_id,
  219. user_id=user_id,
  220. name=f"message_{message_id}",
  221. input={
  222. "message": trace_info.inputs,
  223. "files": file_list,
  224. "message_tokens": trace_info.message_tokens,
  225. "answer_tokens": trace_info.answer_tokens,
  226. "total_tokens": trace_info.total_tokens,
  227. "error": trace_info.error,
  228. "provider_response_latency": message_data.provider_response_latency,
  229. "created_at": trace_info.start_time,
  230. },
  231. output=trace_info.outputs,
  232. metadata=metadata,
  233. session_id=message_data.conversation_id,
  234. tags=["message", str(trace_info.conversation_mode)],
  235. version=None,
  236. release=None,
  237. public=None,
  238. )
  239. self.add_trace(langfuse_trace_data=trace_data)
  240. # start add span
  241. generation_usage = GenerationUsage(
  242. totalTokens=trace_info.total_tokens,
  243. input=trace_info.message_tokens,
  244. output=trace_info.answer_tokens,
  245. total=trace_info.total_tokens,
  246. unit=UnitEnum.TOKENS,
  247. totalCost=message_data.total_price,
  248. )
  249. langfuse_generation_data = LangfuseGeneration(
  250. name=f"generation_{message_id}",
  251. trace_id=message_id,
  252. start_time=trace_info.start_time,
  253. end_time=trace_info.end_time,
  254. model=message_data.model_id,
  255. input=trace_info.inputs,
  256. output=message_data.answer,
  257. metadata=metadata,
  258. level=LevelEnum.DEFAULT if message_data.status != 'error' else LevelEnum.ERROR,
  259. status_message=message_data.error if message_data.error else "",
  260. usage=generation_usage,
  261. )
  262. self.add_generation(langfuse_generation_data)
  263. def moderation_trace(self, trace_info: ModerationTraceInfo):
  264. span_data = LangfuseSpan(
  265. name="moderation",
  266. input=trace_info.inputs,
  267. output={
  268. "action": trace_info.action,
  269. "flagged": trace_info.flagged,
  270. "preset_response": trace_info.preset_response,
  271. "inputs": trace_info.inputs,
  272. },
  273. trace_id=trace_info.message_id,
  274. start_time=trace_info.start_time or trace_info.message_data.created_at,
  275. end_time=trace_info.end_time or trace_info.message_data.created_at,
  276. metadata=trace_info.metadata,
  277. )
  278. self.add_span(langfuse_span_data=span_data)
  279. def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo):
  280. message_data = trace_info.message_data
  281. generation_usage = GenerationUsage(
  282. totalTokens=len(str(trace_info.suggested_question)),
  283. input=len(trace_info.inputs),
  284. output=len(trace_info.suggested_question),
  285. total=len(trace_info.suggested_question),
  286. unit=UnitEnum.CHARACTERS,
  287. )
  288. generation_data = LangfuseGeneration(
  289. name="suggested_question",
  290. input=trace_info.inputs,
  291. output=str(trace_info.suggested_question),
  292. trace_id=trace_info.message_id,
  293. start_time=trace_info.start_time,
  294. end_time=trace_info.end_time,
  295. metadata=trace_info.metadata,
  296. level=LevelEnum.DEFAULT if message_data.status != 'error' else LevelEnum.ERROR,
  297. status_message=message_data.error if message_data.error else "",
  298. usage=generation_usage,
  299. )
  300. self.add_generation(langfuse_generation_data=generation_data)
  301. def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo):
  302. dataset_retrieval_span_data = LangfuseSpan(
  303. name="dataset_retrieval",
  304. input=trace_info.inputs,
  305. output={"documents": trace_info.documents},
  306. trace_id=trace_info.message_id,
  307. start_time=trace_info.start_time or trace_info.message_data.created_at,
  308. end_time=trace_info.end_time or trace_info.message_data.updated_at,
  309. metadata=trace_info.metadata,
  310. )
  311. self.add_span(langfuse_span_data=dataset_retrieval_span_data)
  312. def tool_trace(self, trace_info: ToolTraceInfo):
  313. tool_span_data = LangfuseSpan(
  314. name=trace_info.tool_name,
  315. input=trace_info.tool_inputs,
  316. output=trace_info.tool_outputs,
  317. trace_id=trace_info.message_id,
  318. start_time=trace_info.start_time,
  319. end_time=trace_info.end_time,
  320. metadata=trace_info.metadata,
  321. level=LevelEnum.DEFAULT if trace_info.error == "" or trace_info.error is None else LevelEnum.ERROR,
  322. status_message=trace_info.error,
  323. )
  324. self.add_span(langfuse_span_data=tool_span_data)
  325. def generate_name_trace(self, trace_info: GenerateNameTraceInfo):
  326. name_generation_trace_data = LangfuseTrace(
  327. name="generate_name",
  328. input=trace_info.inputs,
  329. output=trace_info.outputs,
  330. user_id=trace_info.tenant_id,
  331. metadata=trace_info.metadata,
  332. session_id=trace_info.conversation_id,
  333. )
  334. self.add_trace(langfuse_trace_data=name_generation_trace_data)
  335. name_generation_span_data = LangfuseSpan(
  336. name="generate_name",
  337. input=trace_info.inputs,
  338. output=trace_info.outputs,
  339. trace_id=trace_info.conversation_id,
  340. start_time=trace_info.start_time,
  341. end_time=trace_info.end_time,
  342. metadata=trace_info.metadata,
  343. )
  344. self.add_span(langfuse_span_data=name_generation_span_data)
  345. def add_trace(self, langfuse_trace_data: Optional[LangfuseTrace] = None):
  346. format_trace_data = (
  347. filter_none_values(langfuse_trace_data.model_dump()) if langfuse_trace_data else {}
  348. )
  349. try:
  350. self.langfuse_client.trace(**format_trace_data)
  351. logger.debug("LangFuse Trace created successfully")
  352. except Exception as e:
  353. raise ValueError(f"LangFuse Failed to create trace: {str(e)}")
  354. def add_span(self, langfuse_span_data: Optional[LangfuseSpan] = None):
  355. format_span_data = (
  356. filter_none_values(langfuse_span_data.model_dump()) if langfuse_span_data else {}
  357. )
  358. try:
  359. self.langfuse_client.span(**format_span_data)
  360. logger.debug("LangFuse Span created successfully")
  361. except Exception as e:
  362. raise ValueError(f"LangFuse Failed to create span: {str(e)}")
  363. def update_span(self, span, langfuse_span_data: Optional[LangfuseSpan] = None):
  364. format_span_data = (
  365. filter_none_values(langfuse_span_data.model_dump()) if langfuse_span_data else {}
  366. )
  367. span.end(**format_span_data)
  368. def add_generation(
  369. self, langfuse_generation_data: Optional[LangfuseGeneration] = None
  370. ):
  371. format_generation_data = (
  372. filter_none_values(langfuse_generation_data.model_dump())
  373. if langfuse_generation_data
  374. else {}
  375. )
  376. try:
  377. self.langfuse_client.generation(**format_generation_data)
  378. logger.debug("LangFuse Generation created successfully")
  379. except Exception as e:
  380. raise ValueError(f"LangFuse Failed to create generation: {str(e)}")
  381. def update_generation(
  382. self, generation, langfuse_generation_data: Optional[LangfuseGeneration] = None
  383. ):
  384. format_generation_data = (
  385. filter_none_values(langfuse_generation_data.model_dump())
  386. if langfuse_generation_data
  387. else {}
  388. )
  389. generation.end(**format_generation_data)
  390. def api_check(self):
  391. try:
  392. return self.langfuse_client.auth_check()
  393. except Exception as e:
  394. logger.debug(f"LangFuse API check failed: {str(e)}")
  395. raise ValueError(f"LangFuse API check failed: {str(e)}")