langsmith_trace.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. import json
  2. import logging
  3. import os
  4. from datetime import datetime, timedelta
  5. from langsmith import Client
  6. from core.ops.base_trace_instance import BaseTraceInstance
  7. from core.ops.entities.config_entity import LangSmithConfig
  8. from core.ops.entities.trace_entity import (
  9. BaseTraceInfo,
  10. DatasetRetrievalTraceInfo,
  11. GenerateNameTraceInfo,
  12. MessageTraceInfo,
  13. ModerationTraceInfo,
  14. SuggestedQuestionTraceInfo,
  15. ToolTraceInfo,
  16. WorkflowTraceInfo,
  17. )
  18. from core.ops.langsmith_trace.entities.langsmith_trace_entity import (
  19. LangSmithRunModel,
  20. LangSmithRunType,
  21. LangSmithRunUpdateModel,
  22. )
  23. from core.ops.utils import filter_none_values
  24. from extensions.ext_database import db
  25. from models.model import EndUser, MessageFile
  26. from models.workflow import WorkflowNodeExecution
  27. logger = logging.getLogger(__name__)
  28. class LangSmithDataTrace(BaseTraceInstance):
  29. def __init__(
  30. self,
  31. langsmith_config: LangSmithConfig,
  32. ):
  33. super().__init__(langsmith_config)
  34. self.langsmith_key = langsmith_config.api_key
  35. self.project_name = langsmith_config.project
  36. self.project_id = None
  37. self.langsmith_client = Client(
  38. api_key=langsmith_config.api_key, api_url=langsmith_config.endpoint
  39. )
  40. self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001")
  41. def trace(self, trace_info: BaseTraceInfo):
  42. if isinstance(trace_info, WorkflowTraceInfo):
  43. self.workflow_trace(trace_info)
  44. if isinstance(trace_info, MessageTraceInfo):
  45. self.message_trace(trace_info)
  46. if isinstance(trace_info, ModerationTraceInfo):
  47. self.moderation_trace(trace_info)
  48. if isinstance(trace_info, SuggestedQuestionTraceInfo):
  49. self.suggested_question_trace(trace_info)
  50. if isinstance(trace_info, DatasetRetrievalTraceInfo):
  51. self.dataset_retrieval_trace(trace_info)
  52. if isinstance(trace_info, ToolTraceInfo):
  53. self.tool_trace(trace_info)
  54. if isinstance(trace_info, GenerateNameTraceInfo):
  55. self.generate_name_trace(trace_info)
  56. def workflow_trace(self, trace_info: WorkflowTraceInfo):
  57. if trace_info.message_id:
  58. message_run = LangSmithRunModel(
  59. id=trace_info.message_id,
  60. name=f"message_{trace_info.message_id}",
  61. inputs=trace_info.workflow_run_inputs,
  62. outputs=trace_info.workflow_run_outputs,
  63. run_type=LangSmithRunType.chain,
  64. start_time=trace_info.start_time,
  65. end_time=trace_info.end_time,
  66. extra={
  67. "metadata": trace_info.metadata,
  68. },
  69. tags=["message"],
  70. error=trace_info.error
  71. )
  72. self.add_run(message_run)
  73. langsmith_run = LangSmithRunModel(
  74. file_list=trace_info.file_list,
  75. total_tokens=trace_info.total_tokens,
  76. id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id,
  77. name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}",
  78. inputs=trace_info.workflow_run_inputs,
  79. run_type=LangSmithRunType.tool,
  80. start_time=trace_info.workflow_data.created_at,
  81. end_time=trace_info.workflow_data.finished_at,
  82. outputs=trace_info.workflow_run_outputs,
  83. extra={
  84. "metadata": trace_info.metadata,
  85. },
  86. error=trace_info.error,
  87. tags=["workflow"],
  88. parent_run_id=trace_info.message_id if trace_info.message_id else None,
  89. )
  90. self.add_run(langsmith_run)
  91. # through workflow_run_id get all_nodes_execution
  92. workflow_nodes_executions = (
  93. db.session.query(WorkflowNodeExecution)
  94. .filter(WorkflowNodeExecution.workflow_run_id == trace_info.workflow_run_id)
  95. .order_by(WorkflowNodeExecution.index.desc())
  96. .all()
  97. )
  98. for node_execution in workflow_nodes_executions:
  99. node_execution_id = node_execution.id
  100. tenant_id = node_execution.tenant_id
  101. app_id = node_execution.app_id
  102. node_name = node_execution.title
  103. node_type = node_execution.node_type
  104. status = node_execution.status
  105. if node_type == "llm":
  106. inputs = json.loads(node_execution.process_data).get("prompts", {})
  107. else:
  108. inputs = json.loads(node_execution.inputs) if node_execution.inputs else {}
  109. outputs = (
  110. json.loads(node_execution.outputs) if node_execution.outputs else {}
  111. )
  112. created_at = node_execution.created_at if node_execution.created_at else datetime.now()
  113. elapsed_time = node_execution.elapsed_time
  114. finished_at = created_at + timedelta(seconds=elapsed_time)
  115. execution_metadata = (
  116. json.loads(node_execution.execution_metadata)
  117. if node_execution.execution_metadata
  118. else {}
  119. )
  120. node_total_tokens = execution_metadata.get("total_tokens", 0)
  121. metadata = json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {}
  122. metadata.update(
  123. {
  124. "workflow_run_id": trace_info.workflow_run_id,
  125. "node_execution_id": node_execution_id,
  126. "tenant_id": tenant_id,
  127. "app_id": app_id,
  128. "app_name": node_name,
  129. "node_type": node_type,
  130. "status": status,
  131. }
  132. )
  133. process_data = json.loads(node_execution.process_data) if node_execution.process_data else {}
  134. if process_data and process_data.get("model_mode") == "chat":
  135. run_type = LangSmithRunType.llm
  136. elif node_type == "knowledge-retrieval":
  137. run_type = LangSmithRunType.retriever
  138. else:
  139. run_type = LangSmithRunType.tool
  140. langsmith_run = LangSmithRunModel(
  141. total_tokens=node_total_tokens,
  142. name=f"{node_name}_{node_execution_id}",
  143. inputs=inputs,
  144. run_type=run_type,
  145. start_time=created_at,
  146. end_time=finished_at,
  147. outputs=outputs,
  148. file_list=trace_info.file_list,
  149. extra={
  150. "metadata": metadata,
  151. },
  152. parent_run_id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id,
  153. tags=["node_execution"],
  154. )
  155. self.add_run(langsmith_run)
  156. def message_trace(self, trace_info: MessageTraceInfo):
  157. # get message file data
  158. file_list = trace_info.file_list
  159. message_file_data: MessageFile = trace_info.message_file_data
  160. file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else ""
  161. file_list.append(file_url)
  162. metadata = trace_info.metadata
  163. message_data = trace_info.message_data
  164. message_id = message_data.id
  165. user_id = message_data.from_account_id
  166. if message_data.from_end_user_id:
  167. end_user_data: EndUser = db.session.query(EndUser).filter(
  168. EndUser.id == message_data.from_end_user_id
  169. ).first().session_id
  170. end_user_id = end_user_data.session_id
  171. metadata["end_user_id"] = end_user_id
  172. metadata["user_id"] = user_id
  173. message_run = LangSmithRunModel(
  174. input_tokens=trace_info.message_tokens,
  175. output_tokens=trace_info.answer_tokens,
  176. total_tokens=trace_info.total_tokens,
  177. id=message_id,
  178. name=f"message_{message_id}",
  179. inputs=trace_info.inputs,
  180. run_type=LangSmithRunType.chain,
  181. start_time=trace_info.start_time,
  182. end_time=trace_info.end_time,
  183. outputs=message_data.answer,
  184. extra={
  185. "metadata": metadata,
  186. },
  187. tags=["message", str(trace_info.conversation_mode)],
  188. error=trace_info.error,
  189. file_list=file_list,
  190. )
  191. self.add_run(message_run)
  192. # create llm run parented to message run
  193. llm_run = LangSmithRunModel(
  194. input_tokens=trace_info.message_tokens,
  195. output_tokens=trace_info.answer_tokens,
  196. total_tokens=trace_info.total_tokens,
  197. name=f"llm_{message_id}",
  198. inputs=trace_info.inputs,
  199. run_type=LangSmithRunType.llm,
  200. start_time=trace_info.start_time,
  201. end_time=trace_info.end_time,
  202. outputs=message_data.answer,
  203. extra={
  204. "metadata": metadata,
  205. },
  206. parent_run_id=message_id,
  207. tags=["llm", str(trace_info.conversation_mode)],
  208. error=trace_info.error,
  209. file_list=file_list,
  210. )
  211. self.add_run(llm_run)
  212. def moderation_trace(self, trace_info: ModerationTraceInfo):
  213. langsmith_run = LangSmithRunModel(
  214. name="moderation",
  215. inputs=trace_info.inputs,
  216. outputs={
  217. "action": trace_info.action,
  218. "flagged": trace_info.flagged,
  219. "preset_response": trace_info.preset_response,
  220. "inputs": trace_info.inputs,
  221. },
  222. run_type=LangSmithRunType.tool,
  223. extra={
  224. "metadata": trace_info.metadata,
  225. },
  226. tags=["moderation"],
  227. parent_run_id=trace_info.message_id,
  228. start_time=trace_info.start_time or trace_info.message_data.created_at,
  229. end_time=trace_info.end_time or trace_info.message_data.updated_at,
  230. )
  231. self.add_run(langsmith_run)
  232. def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo):
  233. message_data = trace_info.message_data
  234. suggested_question_run = LangSmithRunModel(
  235. name="suggested_question",
  236. inputs=trace_info.inputs,
  237. outputs=trace_info.suggested_question,
  238. run_type=LangSmithRunType.tool,
  239. extra={
  240. "metadata": trace_info.metadata,
  241. },
  242. tags=["suggested_question"],
  243. parent_run_id=trace_info.message_id,
  244. start_time=trace_info.start_time or message_data.created_at,
  245. end_time=trace_info.end_time or message_data.updated_at,
  246. )
  247. self.add_run(suggested_question_run)
  248. def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo):
  249. dataset_retrieval_run = LangSmithRunModel(
  250. name="dataset_retrieval",
  251. inputs=trace_info.inputs,
  252. outputs={"documents": trace_info.documents},
  253. run_type=LangSmithRunType.retriever,
  254. extra={
  255. "metadata": trace_info.metadata,
  256. },
  257. tags=["dataset_retrieval"],
  258. parent_run_id=trace_info.message_id,
  259. start_time=trace_info.start_time or trace_info.message_data.created_at,
  260. end_time=trace_info.end_time or trace_info.message_data.updated_at,
  261. )
  262. self.add_run(dataset_retrieval_run)
  263. def tool_trace(self, trace_info: ToolTraceInfo):
  264. tool_run = LangSmithRunModel(
  265. name=trace_info.tool_name,
  266. inputs=trace_info.tool_inputs,
  267. outputs=trace_info.tool_outputs,
  268. run_type=LangSmithRunType.tool,
  269. extra={
  270. "metadata": trace_info.metadata,
  271. },
  272. tags=["tool", trace_info.tool_name],
  273. parent_run_id=trace_info.message_id,
  274. start_time=trace_info.start_time,
  275. end_time=trace_info.end_time,
  276. file_list=[trace_info.file_url],
  277. )
  278. self.add_run(tool_run)
  279. def generate_name_trace(self, trace_info: GenerateNameTraceInfo):
  280. name_run = LangSmithRunModel(
  281. name="generate_name",
  282. inputs=trace_info.inputs,
  283. outputs=trace_info.outputs,
  284. run_type=LangSmithRunType.tool,
  285. extra={
  286. "metadata": trace_info.metadata,
  287. },
  288. tags=["generate_name"],
  289. start_time=trace_info.start_time or datetime.now(),
  290. end_time=trace_info.end_time or datetime.now(),
  291. )
  292. self.add_run(name_run)
  293. def add_run(self, run_data: LangSmithRunModel):
  294. data = run_data.model_dump()
  295. if self.project_id:
  296. data["session_id"] = self.project_id
  297. elif self.project_name:
  298. data["session_name"] = self.project_name
  299. data = filter_none_values(data)
  300. try:
  301. self.langsmith_client.create_run(**data)
  302. logger.debug("LangSmith Run created successfully.")
  303. except Exception as e:
  304. raise ValueError(f"LangSmith Failed to create run: {str(e)}")
  305. def update_run(self, update_run_data: LangSmithRunUpdateModel):
  306. data = update_run_data.model_dump()
  307. data = filter_none_values(data)
  308. try:
  309. self.langsmith_client.update_run(**data)
  310. logger.debug("LangSmith Run updated successfully.")
  311. except Exception as e:
  312. raise ValueError(f"LangSmith Failed to update run: {str(e)}")
  313. def api_check(self):
  314. try:
  315. random_project_name = f"test_project_{datetime.now().strftime('%Y%m%d%H%M%S')}"
  316. self.langsmith_client.create_project(project_name=random_project_name)
  317. self.langsmith_client.delete_project(project_name=random_project_name)
  318. return True
  319. except Exception as e:
  320. logger.debug(f"LangSmith API check failed: {str(e)}")
  321. raise ValueError(f"LangSmith API check failed: {str(e)}")