node.py 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051
  1. import json
  2. import logging
  3. from collections.abc import Generator, Mapping, Sequence
  4. from typing import TYPE_CHECKING, Any, Optional, cast
  5. from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
  6. from core.entities.model_entities import ModelStatus
  7. from core.entities.provider_entities import QuotaUnit
  8. from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
  9. from core.file import FileType, file_manager
  10. from core.helper.code_executor import CodeExecutor, CodeLanguage
  11. from core.memory.token_buffer_memory import TokenBufferMemory
  12. from core.model_manager import ModelInstance, ModelManager
  13. from core.model_runtime.entities import (
  14. ImagePromptMessageContent,
  15. PromptMessage,
  16. PromptMessageContentType,
  17. TextPromptMessageContent,
  18. )
  19. from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
  20. from core.model_runtime.entities.message_entities import (
  21. AssistantPromptMessage,
  22. PromptMessageContent,
  23. PromptMessageRole,
  24. SystemPromptMessage,
  25. UserPromptMessage,
  26. )
  27. from core.model_runtime.entities.model_entities import ModelFeature, ModelPropertyKey, ModelType
  28. from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
  29. from core.model_runtime.utils.encoders import jsonable_encoder
  30. from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig
  31. from core.prompt.utils.prompt_message_util import PromptMessageUtil
  32. from core.variables import (
  33. ArrayAnySegment,
  34. ArrayFileSegment,
  35. ArraySegment,
  36. FileSegment,
  37. NoneSegment,
  38. ObjectSegment,
  39. StringSegment,
  40. )
  41. from core.workflow.constants import SYSTEM_VARIABLE_NODE_ID
  42. from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult
  43. from core.workflow.entities.variable_entities import VariableSelector
  44. from core.workflow.entities.variable_pool import VariablePool
  45. from core.workflow.enums import SystemVariableKey
  46. from core.workflow.graph_engine.entities.event import InNodeEvent
  47. from core.workflow.nodes.base import BaseNode
  48. from core.workflow.nodes.enums import NodeType
  49. from core.workflow.nodes.event import (
  50. ModelInvokeCompletedEvent,
  51. NodeEvent,
  52. RunCompletedEvent,
  53. RunRetrieverResourceEvent,
  54. RunStreamChunkEvent,
  55. )
  56. from core.workflow.utils.variable_template_parser import VariableTemplateParser
  57. from extensions.ext_database import db
  58. from models.model import Conversation
  59. from models.provider import Provider, ProviderType
  60. from models.workflow import WorkflowNodeExecutionStatus
  61. from .entities import (
  62. LLMNodeChatModelMessage,
  63. LLMNodeCompletionModelPromptTemplate,
  64. LLMNodeData,
  65. ModelConfig,
  66. )
  67. from .exc import (
  68. InvalidContextStructureError,
  69. InvalidVariableTypeError,
  70. LLMModeRequiredError,
  71. LLMNodeError,
  72. MemoryRolePrefixRequiredError,
  73. ModelNotExistError,
  74. NoPromptFoundError,
  75. TemplateTypeNotSupportError,
  76. VariableNotFoundError,
  77. )
  78. if TYPE_CHECKING:
  79. from core.file.models import File
  80. logger = logging.getLogger(__name__)
  81. class LLMNode(BaseNode[LLMNodeData]):
  82. _node_data_cls = LLMNodeData
  83. _node_type = NodeType.LLM
  84. def _run(self) -> Generator[NodeEvent | InNodeEvent, None, None]:
  85. node_inputs: Optional[dict[str, Any]] = None
  86. process_data = None
  87. try:
  88. # init messages template
  89. self.node_data.prompt_template = self._transform_chat_messages(self.node_data.prompt_template)
  90. # fetch variables and fetch values from variable pool
  91. inputs = self._fetch_inputs(node_data=self.node_data)
  92. # fetch jinja2 inputs
  93. jinja_inputs = self._fetch_jinja_inputs(node_data=self.node_data)
  94. # merge inputs
  95. inputs.update(jinja_inputs)
  96. node_inputs = {}
  97. # fetch files
  98. files = (
  99. self._fetch_files(selector=self.node_data.vision.configs.variable_selector)
  100. if self.node_data.vision.enabled
  101. else []
  102. )
  103. if files:
  104. node_inputs["#files#"] = [file.to_dict() for file in files]
  105. # fetch context value
  106. generator = self._fetch_context(node_data=self.node_data)
  107. context = None
  108. for event in generator:
  109. if isinstance(event, RunRetrieverResourceEvent):
  110. context = event.context
  111. yield event
  112. if context:
  113. node_inputs["#context#"] = context
  114. # fetch model config
  115. model_instance, model_config = self._fetch_model_config(self.node_data.model)
  116. # fetch memory
  117. memory = self._fetch_memory(node_data_memory=self.node_data.memory, model_instance=model_instance)
  118. query = None
  119. if self.node_data.memory:
  120. query = self.node_data.memory.query_prompt_template
  121. if not query and (
  122. query_variable := self.graph_runtime_state.variable_pool.get(
  123. (SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY)
  124. )
  125. ):
  126. query = query_variable.text
  127. prompt_messages, stop = self._fetch_prompt_messages(
  128. sys_query=query,
  129. sys_files=files,
  130. context=context,
  131. memory=memory,
  132. model_config=model_config,
  133. prompt_template=self.node_data.prompt_template,
  134. memory_config=self.node_data.memory,
  135. vision_enabled=self.node_data.vision.enabled,
  136. vision_detail=self.node_data.vision.configs.detail,
  137. variable_pool=self.graph_runtime_state.variable_pool,
  138. jinja2_variables=self.node_data.prompt_config.jinja2_variables,
  139. )
  140. process_data = {
  141. "model_mode": model_config.mode,
  142. "prompts": PromptMessageUtil.prompt_messages_to_prompt_for_saving(
  143. model_mode=model_config.mode, prompt_messages=prompt_messages
  144. ),
  145. "model_provider": model_config.provider,
  146. "model_name": model_config.model,
  147. }
  148. # handle invoke result
  149. generator = self._invoke_llm(
  150. node_data_model=self.node_data.model,
  151. model_instance=model_instance,
  152. prompt_messages=prompt_messages,
  153. stop=stop,
  154. )
  155. result_text = ""
  156. usage = LLMUsage.empty_usage()
  157. finish_reason = None
  158. for event in generator:
  159. if isinstance(event, RunStreamChunkEvent):
  160. yield event
  161. elif isinstance(event, ModelInvokeCompletedEvent):
  162. result_text = event.text
  163. usage = event.usage
  164. finish_reason = event.finish_reason
  165. break
  166. except LLMNodeError as e:
  167. yield RunCompletedEvent(
  168. run_result=NodeRunResult(
  169. status=WorkflowNodeExecutionStatus.FAILED,
  170. error=str(e),
  171. inputs=node_inputs,
  172. process_data=process_data,
  173. error_type=type(e).__name__,
  174. )
  175. )
  176. except Exception as e:
  177. yield RunCompletedEvent(
  178. run_result=NodeRunResult(
  179. status=WorkflowNodeExecutionStatus.FAILED,
  180. error=str(e),
  181. inputs=node_inputs,
  182. process_data=process_data,
  183. )
  184. )
  185. outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
  186. yield RunCompletedEvent(
  187. run_result=NodeRunResult(
  188. status=WorkflowNodeExecutionStatus.SUCCEEDED,
  189. inputs=node_inputs,
  190. process_data=process_data,
  191. outputs=outputs,
  192. metadata={
  193. NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens,
  194. NodeRunMetadataKey.TOTAL_PRICE: usage.total_price,
  195. NodeRunMetadataKey.CURRENCY: usage.currency,
  196. },
  197. llm_usage=usage,
  198. )
  199. )
  200. def _invoke_llm(
  201. self,
  202. node_data_model: ModelConfig,
  203. model_instance: ModelInstance,
  204. prompt_messages: Sequence[PromptMessage],
  205. stop: Optional[Sequence[str]] = None,
  206. ) -> Generator[NodeEvent, None, None]:
  207. db.session.close()
  208. invoke_result = model_instance.invoke_llm(
  209. prompt_messages=prompt_messages,
  210. model_parameters=node_data_model.completion_params,
  211. stop=stop,
  212. stream=True,
  213. user=self.user_id,
  214. )
  215. # handle invoke result
  216. generator = self._handle_invoke_result(invoke_result=invoke_result)
  217. usage = LLMUsage.empty_usage()
  218. for event in generator:
  219. yield event
  220. if isinstance(event, ModelInvokeCompletedEvent):
  221. usage = event.usage
  222. # deduct quota
  223. self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
  224. def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
  225. if isinstance(invoke_result, LLMResult):
  226. return
  227. model = None
  228. prompt_messages: list[PromptMessage] = []
  229. full_text = ""
  230. usage = None
  231. finish_reason = None
  232. for result in invoke_result:
  233. text = result.delta.message.content
  234. full_text += text
  235. yield RunStreamChunkEvent(chunk_content=text, from_variable_selector=[self.node_id, "text"])
  236. if not model:
  237. model = result.model
  238. if not prompt_messages:
  239. prompt_messages = result.prompt_messages
  240. if not usage and result.delta.usage:
  241. usage = result.delta.usage
  242. if not finish_reason and result.delta.finish_reason:
  243. finish_reason = result.delta.finish_reason
  244. if not usage:
  245. usage = LLMUsage.empty_usage()
  246. yield ModelInvokeCompletedEvent(text=full_text, usage=usage, finish_reason=finish_reason)
  247. def _transform_chat_messages(
  248. self, messages: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate, /
  249. ) -> Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate:
  250. if isinstance(messages, LLMNodeCompletionModelPromptTemplate):
  251. if messages.edition_type == "jinja2" and messages.jinja2_text:
  252. messages.text = messages.jinja2_text
  253. return messages
  254. for message in messages:
  255. if message.edition_type == "jinja2" and message.jinja2_text:
  256. message.text = message.jinja2_text
  257. return messages
  258. def _fetch_jinja_inputs(self, node_data: LLMNodeData) -> dict[str, str]:
  259. variables: dict[str, Any] = {}
  260. if not node_data.prompt_config:
  261. return variables
  262. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  263. variable_name = variable_selector.variable
  264. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  265. if variable is None:
  266. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  267. def parse_dict(input_dict: Mapping[str, Any]) -> str:
  268. """
  269. Parse dict into string
  270. """
  271. # check if it's a context structure
  272. if "metadata" in input_dict and "_source" in input_dict["metadata"] and "content" in input_dict:
  273. return str(input_dict["content"])
  274. # else, parse the dict
  275. try:
  276. return json.dumps(input_dict, ensure_ascii=False)
  277. except Exception:
  278. return str(input_dict)
  279. if isinstance(variable, ArraySegment):
  280. result = ""
  281. for item in variable.value:
  282. if isinstance(item, dict):
  283. result += parse_dict(item)
  284. else:
  285. result += str(item)
  286. result += "\n"
  287. value = result.strip()
  288. elif isinstance(variable, ObjectSegment):
  289. value = parse_dict(variable.value)
  290. else:
  291. value = variable.text
  292. variables[variable_name] = value
  293. return variables
  294. def _fetch_inputs(self, node_data: LLMNodeData) -> dict[str, Any]:
  295. inputs = {}
  296. prompt_template = node_data.prompt_template
  297. variable_selectors = []
  298. if isinstance(prompt_template, list):
  299. for prompt in prompt_template:
  300. variable_template_parser = VariableTemplateParser(template=prompt.text)
  301. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  302. elif isinstance(prompt_template, CompletionModelPromptTemplate):
  303. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  304. variable_selectors = variable_template_parser.extract_variable_selectors()
  305. for variable_selector in variable_selectors:
  306. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  307. if variable is None:
  308. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  309. if isinstance(variable, NoneSegment):
  310. inputs[variable_selector.variable] = ""
  311. inputs[variable_selector.variable] = variable.to_object()
  312. memory = node_data.memory
  313. if memory and memory.query_prompt_template:
  314. query_variable_selectors = VariableTemplateParser(
  315. template=memory.query_prompt_template
  316. ).extract_variable_selectors()
  317. for variable_selector in query_variable_selectors:
  318. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  319. if variable is None:
  320. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  321. if isinstance(variable, NoneSegment):
  322. continue
  323. inputs[variable_selector.variable] = variable.to_object()
  324. return inputs
  325. def _fetch_files(self, *, selector: Sequence[str]) -> Sequence["File"]:
  326. variable = self.graph_runtime_state.variable_pool.get(selector)
  327. if variable is None:
  328. return []
  329. elif isinstance(variable, FileSegment):
  330. return [variable.value]
  331. elif isinstance(variable, ArrayFileSegment):
  332. return variable.value
  333. elif isinstance(variable, NoneSegment | ArrayAnySegment):
  334. return []
  335. raise InvalidVariableTypeError(f"Invalid variable type: {type(variable)}")
  336. def _fetch_context(self, node_data: LLMNodeData):
  337. if not node_data.context.enabled:
  338. return
  339. if not node_data.context.variable_selector:
  340. return
  341. context_value_variable = self.graph_runtime_state.variable_pool.get(node_data.context.variable_selector)
  342. if context_value_variable:
  343. if isinstance(context_value_variable, StringSegment):
  344. yield RunRetrieverResourceEvent(retriever_resources=[], context=context_value_variable.value)
  345. elif isinstance(context_value_variable, ArraySegment):
  346. context_str = ""
  347. original_retriever_resource = []
  348. for item in context_value_variable.value:
  349. if isinstance(item, str):
  350. context_str += item + "\n"
  351. else:
  352. if "content" not in item:
  353. raise InvalidContextStructureError(f"Invalid context structure: {item}")
  354. context_str += item["content"] + "\n"
  355. retriever_resource = self._convert_to_original_retriever_resource(item)
  356. if retriever_resource:
  357. original_retriever_resource.append(retriever_resource)
  358. yield RunRetrieverResourceEvent(
  359. retriever_resources=original_retriever_resource, context=context_str.strip()
  360. )
  361. def _convert_to_original_retriever_resource(self, context_dict: dict) -> Optional[dict]:
  362. if (
  363. "metadata" in context_dict
  364. and "_source" in context_dict["metadata"]
  365. and context_dict["metadata"]["_source"] == "knowledge"
  366. ):
  367. metadata = context_dict.get("metadata", {})
  368. source = {
  369. "position": metadata.get("position"),
  370. "dataset_id": metadata.get("dataset_id"),
  371. "dataset_name": metadata.get("dataset_name"),
  372. "document_id": metadata.get("document_id"),
  373. "document_name": metadata.get("document_name"),
  374. "data_source_type": metadata.get("document_data_source_type"),
  375. "segment_id": metadata.get("segment_id"),
  376. "retriever_from": metadata.get("retriever_from"),
  377. "score": metadata.get("score"),
  378. "hit_count": metadata.get("segment_hit_count"),
  379. "word_count": metadata.get("segment_word_count"),
  380. "segment_position": metadata.get("segment_position"),
  381. "index_node_hash": metadata.get("segment_index_node_hash"),
  382. "content": context_dict.get("content"),
  383. "page": metadata.get("page"),
  384. }
  385. return source
  386. return None
  387. def _fetch_model_config(
  388. self, node_data_model: ModelConfig
  389. ) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]:
  390. model_name = node_data_model.name
  391. provider_name = node_data_model.provider
  392. model_manager = ModelManager()
  393. model_instance = model_manager.get_model_instance(
  394. tenant_id=self.tenant_id, model_type=ModelType.LLM, provider=provider_name, model=model_name
  395. )
  396. provider_model_bundle = model_instance.provider_model_bundle
  397. model_type_instance = model_instance.model_type_instance
  398. model_type_instance = cast(LargeLanguageModel, model_type_instance)
  399. model_credentials = model_instance.credentials
  400. # check model
  401. provider_model = provider_model_bundle.configuration.get_provider_model(
  402. model=model_name, model_type=ModelType.LLM
  403. )
  404. if provider_model is None:
  405. raise ModelNotExistError(f"Model {model_name} not exist.")
  406. if provider_model.status == ModelStatus.NO_CONFIGURE:
  407. raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.")
  408. elif provider_model.status == ModelStatus.NO_PERMISSION:
  409. raise ModelCurrentlyNotSupportError(f"Dify Hosted OpenAI {model_name} currently not support.")
  410. elif provider_model.status == ModelStatus.QUOTA_EXCEEDED:
  411. raise QuotaExceededError(f"Model provider {provider_name} quota exceeded.")
  412. # model config
  413. completion_params = node_data_model.completion_params
  414. stop = []
  415. if "stop" in completion_params:
  416. stop = completion_params["stop"]
  417. del completion_params["stop"]
  418. # get model mode
  419. model_mode = node_data_model.mode
  420. if not model_mode:
  421. raise LLMModeRequiredError("LLM mode is required.")
  422. model_schema = model_type_instance.get_model_schema(model_name, model_credentials)
  423. if not model_schema:
  424. raise ModelNotExistError(f"Model {model_name} not exist.")
  425. return model_instance, ModelConfigWithCredentialsEntity(
  426. provider=provider_name,
  427. model=model_name,
  428. model_schema=model_schema,
  429. mode=model_mode,
  430. provider_model_bundle=provider_model_bundle,
  431. credentials=model_credentials,
  432. parameters=completion_params,
  433. stop=stop,
  434. )
  435. def _fetch_memory(
  436. self, node_data_memory: Optional[MemoryConfig], model_instance: ModelInstance
  437. ) -> Optional[TokenBufferMemory]:
  438. if not node_data_memory:
  439. return None
  440. # get conversation id
  441. conversation_id_variable = self.graph_runtime_state.variable_pool.get(
  442. ["sys", SystemVariableKey.CONVERSATION_ID.value]
  443. )
  444. if not isinstance(conversation_id_variable, StringSegment):
  445. return None
  446. conversation_id = conversation_id_variable.value
  447. # get conversation
  448. conversation = (
  449. db.session.query(Conversation)
  450. .filter(Conversation.app_id == self.app_id, Conversation.id == conversation_id)
  451. .first()
  452. )
  453. if not conversation:
  454. return None
  455. memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
  456. return memory
  457. def _fetch_prompt_messages(
  458. self,
  459. *,
  460. sys_query: str | None = None,
  461. sys_files: Sequence["File"],
  462. context: str | None = None,
  463. memory: TokenBufferMemory | None = None,
  464. model_config: ModelConfigWithCredentialsEntity,
  465. prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate,
  466. memory_config: MemoryConfig | None = None,
  467. vision_enabled: bool = False,
  468. vision_detail: ImagePromptMessageContent.DETAIL,
  469. variable_pool: VariablePool,
  470. jinja2_variables: Sequence[VariableSelector],
  471. ) -> tuple[Sequence[PromptMessage], Optional[Sequence[str]]]:
  472. # FIXME: fix the type error cause prompt_messages is type quick a few times
  473. prompt_messages: list[Any] = []
  474. if isinstance(prompt_template, list):
  475. # For chat model
  476. prompt_messages.extend(
  477. self._handle_list_messages(
  478. messages=prompt_template,
  479. context=context,
  480. jinja2_variables=jinja2_variables,
  481. variable_pool=variable_pool,
  482. vision_detail_config=vision_detail,
  483. )
  484. )
  485. # Get memory messages for chat mode
  486. memory_messages = _handle_memory_chat_mode(
  487. memory=memory,
  488. memory_config=memory_config,
  489. model_config=model_config,
  490. )
  491. # Extend prompt_messages with memory messages
  492. prompt_messages.extend(memory_messages)
  493. # Add current query to the prompt messages
  494. if sys_query:
  495. message = LLMNodeChatModelMessage(
  496. text=sys_query,
  497. role=PromptMessageRole.USER,
  498. edition_type="basic",
  499. )
  500. prompt_messages.extend(
  501. self._handle_list_messages(
  502. messages=[message],
  503. context="",
  504. jinja2_variables=[],
  505. variable_pool=variable_pool,
  506. vision_detail_config=vision_detail,
  507. )
  508. )
  509. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  510. # For completion model
  511. prompt_messages.extend(
  512. _handle_completion_template(
  513. template=prompt_template,
  514. context=context,
  515. jinja2_variables=jinja2_variables,
  516. variable_pool=variable_pool,
  517. )
  518. )
  519. # Get memory text for completion model
  520. memory_text = _handle_memory_completion_mode(
  521. memory=memory,
  522. memory_config=memory_config,
  523. model_config=model_config,
  524. )
  525. # Insert histories into the prompt
  526. prompt_content = prompt_messages[0].content
  527. # For issue #11247 - Check if prompt content is a string or a list
  528. prompt_content_type = type(prompt_content)
  529. if prompt_content_type == str:
  530. if "#histories#" in prompt_content:
  531. prompt_content = prompt_content.replace("#histories#", memory_text)
  532. else:
  533. prompt_content = memory_text + "\n" + prompt_content
  534. prompt_messages[0].content = prompt_content
  535. elif prompt_content_type == list:
  536. for content_item in prompt_content:
  537. if content_item.type == PromptMessageContentType.TEXT:
  538. if "#histories#" in content_item.data:
  539. content_item.data = content_item.data.replace("#histories#", memory_text)
  540. else:
  541. content_item.data = memory_text + "\n" + content_item.data
  542. else:
  543. raise ValueError("Invalid prompt content type")
  544. # Add current query to the prompt message
  545. if sys_query:
  546. if prompt_content_type == str:
  547. prompt_content = prompt_messages[0].content.replace("#sys.query#", sys_query)
  548. prompt_messages[0].content = prompt_content
  549. elif prompt_content_type == list:
  550. for content_item in prompt_content:
  551. if content_item.type == PromptMessageContentType.TEXT:
  552. content_item.data = sys_query + "\n" + content_item.data
  553. else:
  554. raise ValueError("Invalid prompt content type")
  555. else:
  556. raise TemplateTypeNotSupportError(type_name=str(type(prompt_template)))
  557. # The sys_files will be deprecated later
  558. if vision_enabled and sys_files:
  559. file_prompts = []
  560. for file in sys_files:
  561. file_prompt = file_manager.to_prompt_message_content(file, image_detail_config=vision_detail)
  562. file_prompts.append(file_prompt)
  563. # If last prompt is a user prompt, add files into its contents,
  564. # otherwise append a new user prompt
  565. if (
  566. len(prompt_messages) > 0
  567. and isinstance(prompt_messages[-1], UserPromptMessage)
  568. and isinstance(prompt_messages[-1].content, list)
  569. ):
  570. prompt_messages[-1] = UserPromptMessage(content=prompt_messages[-1].content + file_prompts)
  571. else:
  572. prompt_messages.append(UserPromptMessage(content=file_prompts))
  573. # Remove empty messages and filter unsupported content
  574. filtered_prompt_messages = []
  575. for prompt_message in prompt_messages:
  576. if isinstance(prompt_message.content, list):
  577. prompt_message_content = []
  578. for content_item in prompt_message.content:
  579. # Skip content if features are not defined
  580. if not model_config.model_schema.features:
  581. if content_item.type != PromptMessageContentType.TEXT:
  582. continue
  583. prompt_message_content.append(content_item)
  584. continue
  585. # Skip content if corresponding feature is not supported
  586. if (
  587. (
  588. content_item.type == PromptMessageContentType.IMAGE
  589. and ModelFeature.VISION not in model_config.model_schema.features
  590. )
  591. or (
  592. content_item.type == PromptMessageContentType.DOCUMENT
  593. and ModelFeature.DOCUMENT not in model_config.model_schema.features
  594. )
  595. or (
  596. content_item.type == PromptMessageContentType.VIDEO
  597. and ModelFeature.VIDEO not in model_config.model_schema.features
  598. )
  599. or (
  600. content_item.type == PromptMessageContentType.AUDIO
  601. and ModelFeature.AUDIO not in model_config.model_schema.features
  602. )
  603. ):
  604. continue
  605. prompt_message_content.append(content_item)
  606. if len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT:
  607. prompt_message.content = prompt_message_content[0].data
  608. else:
  609. prompt_message.content = prompt_message_content
  610. if prompt_message.is_empty():
  611. continue
  612. filtered_prompt_messages.append(prompt_message)
  613. if len(filtered_prompt_messages) == 0:
  614. raise NoPromptFoundError(
  615. "No prompt found in the LLM configuration. "
  616. "Please ensure a prompt is properly configured before proceeding."
  617. )
  618. stop = model_config.stop
  619. return filtered_prompt_messages, stop
  620. @classmethod
  621. def deduct_llm_quota(cls, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
  622. provider_model_bundle = model_instance.provider_model_bundle
  623. provider_configuration = provider_model_bundle.configuration
  624. if provider_configuration.using_provider_type != ProviderType.SYSTEM:
  625. return
  626. system_configuration = provider_configuration.system_configuration
  627. quota_unit = None
  628. for quota_configuration in system_configuration.quota_configurations:
  629. if quota_configuration.quota_type == system_configuration.current_quota_type:
  630. quota_unit = quota_configuration.quota_unit
  631. if quota_configuration.quota_limit == -1:
  632. return
  633. break
  634. used_quota = None
  635. if quota_unit:
  636. if quota_unit == QuotaUnit.TOKENS:
  637. used_quota = usage.total_tokens
  638. elif quota_unit == QuotaUnit.CREDITS:
  639. used_quota = 1
  640. if "gpt-4" in model_instance.model:
  641. used_quota = 20
  642. else:
  643. used_quota = 1
  644. if used_quota is not None and system_configuration.current_quota_type is not None:
  645. db.session.query(Provider).filter(
  646. Provider.tenant_id == tenant_id,
  647. Provider.provider_name == model_instance.provider,
  648. Provider.provider_type == ProviderType.SYSTEM.value,
  649. Provider.quota_type == system_configuration.current_quota_type.value,
  650. Provider.quota_limit > Provider.quota_used,
  651. ).update({"quota_used": Provider.quota_used + used_quota})
  652. db.session.commit()
  653. @classmethod
  654. def _extract_variable_selector_to_variable_mapping(
  655. cls,
  656. *,
  657. graph_config: Mapping[str, Any],
  658. node_id: str,
  659. node_data: LLMNodeData,
  660. ) -> Mapping[str, Sequence[str]]:
  661. prompt_template = node_data.prompt_template
  662. variable_selectors = []
  663. if isinstance(prompt_template, list) and all(
  664. isinstance(prompt, LLMNodeChatModelMessage) for prompt in prompt_template
  665. ):
  666. for prompt in prompt_template:
  667. if prompt.edition_type != "jinja2":
  668. variable_template_parser = VariableTemplateParser(template=prompt.text)
  669. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  670. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  671. if prompt_template.edition_type != "jinja2":
  672. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  673. variable_selectors = variable_template_parser.extract_variable_selectors()
  674. else:
  675. raise InvalidVariableTypeError(f"Invalid prompt template type: {type(prompt_template)}")
  676. variable_mapping: dict[str, Any] = {}
  677. for variable_selector in variable_selectors:
  678. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  679. memory = node_data.memory
  680. if memory and memory.query_prompt_template:
  681. query_variable_selectors = VariableTemplateParser(
  682. template=memory.query_prompt_template
  683. ).extract_variable_selectors()
  684. for variable_selector in query_variable_selectors:
  685. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  686. if node_data.context.enabled:
  687. variable_mapping["#context#"] = node_data.context.variable_selector
  688. if node_data.vision.enabled:
  689. variable_mapping["#files#"] = ["sys", SystemVariableKey.FILES.value]
  690. if node_data.memory:
  691. variable_mapping["#sys.query#"] = ["sys", SystemVariableKey.QUERY.value]
  692. if node_data.prompt_config:
  693. enable_jinja = False
  694. if isinstance(prompt_template, list):
  695. for prompt in prompt_template:
  696. if prompt.edition_type == "jinja2":
  697. enable_jinja = True
  698. break
  699. else:
  700. if prompt_template.edition_type == "jinja2":
  701. enable_jinja = True
  702. if enable_jinja:
  703. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  704. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  705. variable_mapping = {node_id + "." + key: value for key, value in variable_mapping.items()}
  706. return variable_mapping
  707. @classmethod
  708. def get_default_config(cls, filters: Optional[dict] = None) -> dict:
  709. return {
  710. "type": "llm",
  711. "config": {
  712. "prompt_templates": {
  713. "chat_model": {
  714. "prompts": [
  715. {"role": "system", "text": "You are a helpful AI assistant.", "edition_type": "basic"}
  716. ]
  717. },
  718. "completion_model": {
  719. "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
  720. "prompt": {
  721. "text": "Here are the chat histories between human and assistant, inside "
  722. "<histories></histories> XML tags.\n\n<histories>\n{{"
  723. "#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
  724. "edition_type": "basic",
  725. },
  726. "stop": ["Human:"],
  727. },
  728. }
  729. },
  730. }
  731. def _handle_list_messages(
  732. self,
  733. *,
  734. messages: Sequence[LLMNodeChatModelMessage],
  735. context: Optional[str],
  736. jinja2_variables: Sequence[VariableSelector],
  737. variable_pool: VariablePool,
  738. vision_detail_config: ImagePromptMessageContent.DETAIL,
  739. ) -> Sequence[PromptMessage]:
  740. prompt_messages: list[PromptMessage] = []
  741. for message in messages:
  742. if message.edition_type == "jinja2":
  743. result_text = _render_jinja2_message(
  744. template=message.jinja2_text or "",
  745. jinjia2_variables=jinja2_variables,
  746. variable_pool=variable_pool,
  747. )
  748. prompt_message = _combine_message_content_with_role(
  749. contents=[TextPromptMessageContent(data=result_text)], role=message.role
  750. )
  751. prompt_messages.append(prompt_message)
  752. else:
  753. # Get segment group from basic message
  754. if context:
  755. template = message.text.replace("{#context#}", context)
  756. else:
  757. template = message.text
  758. segment_group = variable_pool.convert_template(template)
  759. # Process segments for images
  760. file_contents = []
  761. for segment in segment_group.value:
  762. if isinstance(segment, ArrayFileSegment):
  763. for file in segment.value:
  764. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  765. file_content = file_manager.to_prompt_message_content(
  766. file, image_detail_config=vision_detail_config
  767. )
  768. file_contents.append(file_content)
  769. elif isinstance(segment, FileSegment):
  770. file = segment.value
  771. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  772. file_content = file_manager.to_prompt_message_content(
  773. file, image_detail_config=vision_detail_config
  774. )
  775. file_contents.append(file_content)
  776. # Create message with text from all segments
  777. plain_text = segment_group.text
  778. if plain_text:
  779. prompt_message = _combine_message_content_with_role(
  780. contents=[TextPromptMessageContent(data=plain_text)], role=message.role
  781. )
  782. prompt_messages.append(prompt_message)
  783. if file_contents:
  784. # Create message with image contents
  785. prompt_message = _combine_message_content_with_role(contents=file_contents, role=message.role)
  786. prompt_messages.append(prompt_message)
  787. return prompt_messages
  788. def _combine_message_content_with_role(*, contents: Sequence[PromptMessageContent], role: PromptMessageRole):
  789. match role:
  790. case PromptMessageRole.USER:
  791. return UserPromptMessage(content=contents)
  792. case PromptMessageRole.ASSISTANT:
  793. return AssistantPromptMessage(content=contents)
  794. case PromptMessageRole.SYSTEM:
  795. return SystemPromptMessage(content=contents)
  796. raise NotImplementedError(f"Role {role} is not supported")
  797. def _render_jinja2_message(
  798. *,
  799. template: str,
  800. jinjia2_variables: Sequence[VariableSelector],
  801. variable_pool: VariablePool,
  802. ):
  803. if not template:
  804. return ""
  805. jinjia2_inputs = {}
  806. for jinja2_variable in jinjia2_variables:
  807. variable = variable_pool.get(jinja2_variable.value_selector)
  808. jinjia2_inputs[jinja2_variable.variable] = variable.to_object() if variable else ""
  809. code_execute_resp = CodeExecutor.execute_workflow_code_template(
  810. language=CodeLanguage.JINJA2,
  811. code=template,
  812. inputs=jinjia2_inputs,
  813. )
  814. result_text = code_execute_resp["result"]
  815. return result_text
  816. def _calculate_rest_token(
  817. *, prompt_messages: list[PromptMessage], model_config: ModelConfigWithCredentialsEntity
  818. ) -> int:
  819. rest_tokens = 2000
  820. model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
  821. if model_context_tokens:
  822. model_instance = ModelInstance(
  823. provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
  824. )
  825. curr_message_tokens = model_instance.get_llm_num_tokens(prompt_messages)
  826. max_tokens = 0
  827. for parameter_rule in model_config.model_schema.parameter_rules:
  828. if parameter_rule.name == "max_tokens" or (
  829. parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
  830. ):
  831. max_tokens = (
  832. model_config.parameters.get(parameter_rule.name)
  833. or model_config.parameters.get(str(parameter_rule.use_template))
  834. or 0
  835. )
  836. rest_tokens = model_context_tokens - max_tokens - curr_message_tokens
  837. rest_tokens = max(rest_tokens, 0)
  838. return rest_tokens
  839. def _handle_memory_chat_mode(
  840. *,
  841. memory: TokenBufferMemory | None,
  842. memory_config: MemoryConfig | None,
  843. model_config: ModelConfigWithCredentialsEntity,
  844. ) -> Sequence[PromptMessage]:
  845. memory_messages: Sequence[PromptMessage] = []
  846. # Get messages from memory for chat model
  847. if memory and memory_config:
  848. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  849. memory_messages = memory.get_history_prompt_messages(
  850. max_token_limit=rest_tokens,
  851. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  852. )
  853. return memory_messages
  854. def _handle_memory_completion_mode(
  855. *,
  856. memory: TokenBufferMemory | None,
  857. memory_config: MemoryConfig | None,
  858. model_config: ModelConfigWithCredentialsEntity,
  859. ) -> str:
  860. memory_text = ""
  861. # Get history text from memory for completion model
  862. if memory and memory_config:
  863. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  864. if not memory_config.role_prefix:
  865. raise MemoryRolePrefixRequiredError("Memory role prefix is required for completion model.")
  866. memory_text = memory.get_history_prompt_text(
  867. max_token_limit=rest_tokens,
  868. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  869. human_prefix=memory_config.role_prefix.user,
  870. ai_prefix=memory_config.role_prefix.assistant,
  871. )
  872. return memory_text
  873. def _handle_completion_template(
  874. *,
  875. template: LLMNodeCompletionModelPromptTemplate,
  876. context: Optional[str],
  877. jinja2_variables: Sequence[VariableSelector],
  878. variable_pool: VariablePool,
  879. ) -> Sequence[PromptMessage]:
  880. """Handle completion template processing outside of LLMNode class.
  881. Args:
  882. template: The completion model prompt template
  883. context: Optional context string
  884. jinja2_variables: Variables for jinja2 template rendering
  885. variable_pool: Variable pool for template conversion
  886. Returns:
  887. Sequence of prompt messages
  888. """
  889. prompt_messages = []
  890. if template.edition_type == "jinja2":
  891. result_text = _render_jinja2_message(
  892. template=template.jinja2_text or "",
  893. jinjia2_variables=jinja2_variables,
  894. variable_pool=variable_pool,
  895. )
  896. else:
  897. if context:
  898. template_text = template.text.replace("{#context#}", context)
  899. else:
  900. template_text = template.text
  901. result_text = variable_pool.convert_template(template_text).text
  902. prompt_message = _combine_message_content_with_role(
  903. contents=[TextPromptMessageContent(data=result_text)], role=PromptMessageRole.USER
  904. )
  905. prompt_messages.append(prompt_message)
  906. return prompt_messages