node.py 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067
  1. import json
  2. import logging
  3. from collections.abc import Generator, Mapping, Sequence
  4. from datetime import UTC, datetime
  5. from typing import TYPE_CHECKING, Any, Optional, cast
  6. from configs import dify_config
  7. from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
  8. from core.entities.model_entities import ModelStatus
  9. from core.entities.provider_entities import QuotaUnit
  10. from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
  11. from core.file import FileType, file_manager
  12. from core.helper.code_executor import CodeExecutor, CodeLanguage
  13. from core.memory.token_buffer_memory import TokenBufferMemory
  14. from core.model_manager import ModelInstance, ModelManager
  15. from core.model_runtime.entities import (
  16. ImagePromptMessageContent,
  17. PromptMessage,
  18. PromptMessageContentType,
  19. TextPromptMessageContent,
  20. )
  21. from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
  22. from core.model_runtime.entities.message_entities import (
  23. AssistantPromptMessage,
  24. PromptMessageContent,
  25. PromptMessageRole,
  26. SystemPromptMessage,
  27. UserPromptMessage,
  28. )
  29. from core.model_runtime.entities.model_entities import ModelFeature, ModelPropertyKey, ModelType
  30. from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
  31. from core.model_runtime.utils.encoders import jsonable_encoder
  32. from core.plugin.entities.plugin import ModelProviderID
  33. from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig
  34. from core.prompt.utils.prompt_message_util import PromptMessageUtil
  35. from core.variables import (
  36. ArrayAnySegment,
  37. ArrayFileSegment,
  38. ArraySegment,
  39. FileSegment,
  40. NoneSegment,
  41. ObjectSegment,
  42. StringSegment,
  43. )
  44. from core.workflow.constants import SYSTEM_VARIABLE_NODE_ID
  45. from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult
  46. from core.workflow.entities.variable_entities import VariableSelector
  47. from core.workflow.entities.variable_pool import VariablePool
  48. from core.workflow.enums import SystemVariableKey
  49. from core.workflow.graph_engine.entities.event import InNodeEvent
  50. from core.workflow.nodes.base import BaseNode
  51. from core.workflow.nodes.enums import NodeType
  52. from core.workflow.nodes.event import (
  53. ModelInvokeCompletedEvent,
  54. NodeEvent,
  55. RunCompletedEvent,
  56. RunRetrieverResourceEvent,
  57. RunStreamChunkEvent,
  58. )
  59. from core.workflow.utils.variable_template_parser import VariableTemplateParser
  60. from extensions.ext_database import db
  61. from models.model import Conversation
  62. from models.provider import Provider, ProviderType
  63. from models.workflow import WorkflowNodeExecutionStatus
  64. from .entities import (
  65. LLMNodeChatModelMessage,
  66. LLMNodeCompletionModelPromptTemplate,
  67. LLMNodeData,
  68. ModelConfig,
  69. )
  70. from .exc import (
  71. InvalidContextStructureError,
  72. InvalidVariableTypeError,
  73. LLMModeRequiredError,
  74. LLMNodeError,
  75. MemoryRolePrefixRequiredError,
  76. ModelNotExistError,
  77. NoPromptFoundError,
  78. TemplateTypeNotSupportError,
  79. VariableNotFoundError,
  80. )
  81. if TYPE_CHECKING:
  82. from core.file.models import File
  83. logger = logging.getLogger(__name__)
  84. class LLMNode(BaseNode[LLMNodeData]):
  85. _node_data_cls = LLMNodeData
  86. _node_type = NodeType.LLM
  87. def _run(self) -> Generator[NodeEvent | InNodeEvent, None, None]:
  88. node_inputs: Optional[dict[str, Any]] = None
  89. process_data = None
  90. result_text = ""
  91. usage = LLMUsage.empty_usage()
  92. finish_reason = None
  93. try:
  94. # init messages template
  95. self.node_data.prompt_template = self._transform_chat_messages(self.node_data.prompt_template)
  96. # fetch variables and fetch values from variable pool
  97. inputs = self._fetch_inputs(node_data=self.node_data)
  98. # fetch jinja2 inputs
  99. jinja_inputs = self._fetch_jinja_inputs(node_data=self.node_data)
  100. # merge inputs
  101. inputs.update(jinja_inputs)
  102. node_inputs = {}
  103. # fetch files
  104. files = (
  105. self._fetch_files(selector=self.node_data.vision.configs.variable_selector)
  106. if self.node_data.vision.enabled
  107. else []
  108. )
  109. if files:
  110. node_inputs["#files#"] = [file.to_dict() for file in files]
  111. # fetch context value
  112. generator = self._fetch_context(node_data=self.node_data)
  113. context = None
  114. for event in generator:
  115. if isinstance(event, RunRetrieverResourceEvent):
  116. context = event.context
  117. yield event
  118. if context:
  119. node_inputs["#context#"] = context
  120. # fetch model config
  121. model_instance, model_config = self._fetch_model_config(self.node_data.model)
  122. # fetch memory
  123. memory = self._fetch_memory(node_data_memory=self.node_data.memory, model_instance=model_instance)
  124. query = None
  125. if self.node_data.memory:
  126. query = self.node_data.memory.query_prompt_template
  127. if not query and (
  128. query_variable := self.graph_runtime_state.variable_pool.get(
  129. (SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY)
  130. )
  131. ):
  132. query = query_variable.text
  133. prompt_messages, stop = self._fetch_prompt_messages(
  134. sys_query=query,
  135. sys_files=files,
  136. context=context,
  137. memory=memory,
  138. model_config=model_config,
  139. prompt_template=self.node_data.prompt_template,
  140. memory_config=self.node_data.memory,
  141. vision_enabled=self.node_data.vision.enabled,
  142. vision_detail=self.node_data.vision.configs.detail,
  143. variable_pool=self.graph_runtime_state.variable_pool,
  144. jinja2_variables=self.node_data.prompt_config.jinja2_variables,
  145. )
  146. process_data = {
  147. "model_mode": model_config.mode,
  148. "prompts": PromptMessageUtil.prompt_messages_to_prompt_for_saving(
  149. model_mode=model_config.mode, prompt_messages=prompt_messages
  150. ),
  151. "model_provider": model_config.provider,
  152. "model_name": model_config.model,
  153. }
  154. # handle invoke result
  155. generator = self._invoke_llm(
  156. node_data_model=self.node_data.model,
  157. model_instance=model_instance,
  158. prompt_messages=prompt_messages,
  159. stop=stop,
  160. )
  161. for event in generator:
  162. if isinstance(event, RunStreamChunkEvent):
  163. yield event
  164. elif isinstance(event, ModelInvokeCompletedEvent):
  165. result_text = event.text
  166. usage = event.usage
  167. finish_reason = event.finish_reason
  168. # deduct quota
  169. self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
  170. break
  171. outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
  172. yield RunCompletedEvent(
  173. run_result=NodeRunResult(
  174. status=WorkflowNodeExecutionStatus.SUCCEEDED,
  175. inputs=node_inputs,
  176. process_data=process_data,
  177. outputs=outputs,
  178. metadata={
  179. NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens,
  180. NodeRunMetadataKey.TOTAL_PRICE: usage.total_price,
  181. NodeRunMetadataKey.CURRENCY: usage.currency,
  182. },
  183. llm_usage=usage,
  184. )
  185. )
  186. except LLMNodeError as e:
  187. yield RunCompletedEvent(
  188. run_result=NodeRunResult(
  189. status=WorkflowNodeExecutionStatus.FAILED,
  190. error=str(e),
  191. inputs=node_inputs,
  192. process_data=process_data,
  193. error_type=type(e).__name__,
  194. )
  195. )
  196. except Exception as e:
  197. yield RunCompletedEvent(
  198. run_result=NodeRunResult(
  199. status=WorkflowNodeExecutionStatus.FAILED,
  200. error=str(e),
  201. inputs=node_inputs,
  202. process_data=process_data,
  203. )
  204. )
  205. def _invoke_llm(
  206. self,
  207. node_data_model: ModelConfig,
  208. model_instance: ModelInstance,
  209. prompt_messages: Sequence[PromptMessage],
  210. stop: Optional[Sequence[str]] = None,
  211. ) -> Generator[NodeEvent, None, None]:
  212. db.session.close()
  213. invoke_result = model_instance.invoke_llm(
  214. prompt_messages=list(prompt_messages),
  215. model_parameters=node_data_model.completion_params,
  216. stop=list(stop or []),
  217. stream=True,
  218. user=self.user_id,
  219. )
  220. return self._handle_invoke_result(invoke_result=invoke_result)
  221. def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
  222. if isinstance(invoke_result, LLMResult):
  223. content = invoke_result.message.content
  224. if content is None:
  225. message_text = ""
  226. elif isinstance(content, str):
  227. message_text = content
  228. elif isinstance(content, list):
  229. # Assuming the list contains PromptMessageContent objects with a "data" attribute
  230. message_text = "".join(
  231. item.data if hasattr(item, "data") and isinstance(item.data, str) else str(item) for item in content
  232. )
  233. else:
  234. message_text = str(content)
  235. yield ModelInvokeCompletedEvent(
  236. text=message_text,
  237. usage=invoke_result.usage,
  238. finish_reason=None,
  239. )
  240. return
  241. model = None
  242. prompt_messages: list[PromptMessage] = []
  243. full_text = ""
  244. usage = None
  245. finish_reason = None
  246. for result in invoke_result:
  247. text = result.delta.message.content
  248. full_text += text
  249. yield RunStreamChunkEvent(chunk_content=text, from_variable_selector=[self.node_id, "text"])
  250. if not model:
  251. model = result.model
  252. if not prompt_messages:
  253. prompt_messages = result.prompt_messages
  254. if not usage and result.delta.usage:
  255. usage = result.delta.usage
  256. if not finish_reason and result.delta.finish_reason:
  257. finish_reason = result.delta.finish_reason
  258. if not usage:
  259. usage = LLMUsage.empty_usage()
  260. yield ModelInvokeCompletedEvent(text=full_text, usage=usage, finish_reason=finish_reason)
  261. def _transform_chat_messages(
  262. self, messages: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate, /
  263. ) -> Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate:
  264. if isinstance(messages, LLMNodeCompletionModelPromptTemplate):
  265. if messages.edition_type == "jinja2" and messages.jinja2_text:
  266. messages.text = messages.jinja2_text
  267. return messages
  268. for message in messages:
  269. if message.edition_type == "jinja2" and message.jinja2_text:
  270. message.text = message.jinja2_text
  271. return messages
  272. def _fetch_jinja_inputs(self, node_data: LLMNodeData) -> dict[str, str]:
  273. variables: dict[str, Any] = {}
  274. if not node_data.prompt_config:
  275. return variables
  276. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  277. variable_name = variable_selector.variable
  278. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  279. if variable is None:
  280. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  281. def parse_dict(input_dict: Mapping[str, Any]) -> str:
  282. """
  283. Parse dict into string
  284. """
  285. # check if it's a context structure
  286. if "metadata" in input_dict and "_source" in input_dict["metadata"] and "content" in input_dict:
  287. return str(input_dict["content"])
  288. # else, parse the dict
  289. try:
  290. return json.dumps(input_dict, ensure_ascii=False)
  291. except Exception:
  292. return str(input_dict)
  293. if isinstance(variable, ArraySegment):
  294. result = ""
  295. for item in variable.value:
  296. if isinstance(item, dict):
  297. result += parse_dict(item)
  298. else:
  299. result += str(item)
  300. result += "\n"
  301. value = result.strip()
  302. elif isinstance(variable, ObjectSegment):
  303. value = parse_dict(variable.value)
  304. else:
  305. value = variable.text
  306. variables[variable_name] = value
  307. return variables
  308. def _fetch_inputs(self, node_data: LLMNodeData) -> dict[str, Any]:
  309. inputs = {}
  310. prompt_template = node_data.prompt_template
  311. variable_selectors = []
  312. if isinstance(prompt_template, list):
  313. for prompt in prompt_template:
  314. variable_template_parser = VariableTemplateParser(template=prompt.text)
  315. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  316. elif isinstance(prompt_template, CompletionModelPromptTemplate):
  317. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  318. variable_selectors = variable_template_parser.extract_variable_selectors()
  319. for variable_selector in variable_selectors:
  320. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  321. if variable is None:
  322. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  323. if isinstance(variable, NoneSegment):
  324. inputs[variable_selector.variable] = ""
  325. inputs[variable_selector.variable] = variable.to_object()
  326. memory = node_data.memory
  327. if memory and memory.query_prompt_template:
  328. query_variable_selectors = VariableTemplateParser(
  329. template=memory.query_prompt_template
  330. ).extract_variable_selectors()
  331. for variable_selector in query_variable_selectors:
  332. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  333. if variable is None:
  334. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  335. if isinstance(variable, NoneSegment):
  336. continue
  337. inputs[variable_selector.variable] = variable.to_object()
  338. return inputs
  339. def _fetch_files(self, *, selector: Sequence[str]) -> Sequence["File"]:
  340. variable = self.graph_runtime_state.variable_pool.get(selector)
  341. if variable is None:
  342. return []
  343. elif isinstance(variable, FileSegment):
  344. return [variable.value]
  345. elif isinstance(variable, ArrayFileSegment):
  346. return variable.value
  347. elif isinstance(variable, NoneSegment | ArrayAnySegment):
  348. return []
  349. raise InvalidVariableTypeError(f"Invalid variable type: {type(variable)}")
  350. def _fetch_context(self, node_data: LLMNodeData):
  351. if not node_data.context.enabled:
  352. return
  353. if not node_data.context.variable_selector:
  354. return
  355. context_value_variable = self.graph_runtime_state.variable_pool.get(node_data.context.variable_selector)
  356. if context_value_variable:
  357. if isinstance(context_value_variable, StringSegment):
  358. yield RunRetrieverResourceEvent(retriever_resources=[], context=context_value_variable.value)
  359. elif isinstance(context_value_variable, ArraySegment):
  360. context_str = ""
  361. original_retriever_resource = []
  362. for item in context_value_variable.value:
  363. if isinstance(item, str):
  364. context_str += item + "\n"
  365. else:
  366. if "content" not in item:
  367. raise InvalidContextStructureError(f"Invalid context structure: {item}")
  368. context_str += item["content"] + "\n"
  369. retriever_resource = self._convert_to_original_retriever_resource(item)
  370. if retriever_resource:
  371. original_retriever_resource.append(retriever_resource)
  372. yield RunRetrieverResourceEvent(
  373. retriever_resources=original_retriever_resource, context=context_str.strip()
  374. )
  375. def _convert_to_original_retriever_resource(self, context_dict: dict) -> Optional[dict]:
  376. if (
  377. "metadata" in context_dict
  378. and "_source" in context_dict["metadata"]
  379. and context_dict["metadata"]["_source"] == "knowledge"
  380. ):
  381. metadata = context_dict.get("metadata", {})
  382. source = {
  383. "position": metadata.get("position"),
  384. "dataset_id": metadata.get("dataset_id"),
  385. "dataset_name": metadata.get("dataset_name"),
  386. "document_id": metadata.get("document_id"),
  387. "document_name": metadata.get("document_name"),
  388. "data_source_type": metadata.get("document_data_source_type"),
  389. "segment_id": metadata.get("segment_id"),
  390. "retriever_from": metadata.get("retriever_from"),
  391. "score": metadata.get("score"),
  392. "hit_count": metadata.get("segment_hit_count"),
  393. "word_count": metadata.get("segment_word_count"),
  394. "segment_position": metadata.get("segment_position"),
  395. "index_node_hash": metadata.get("segment_index_node_hash"),
  396. "content": context_dict.get("content"),
  397. "page": metadata.get("page"),
  398. "doc_metadata": metadata.get("doc_metadata"),
  399. }
  400. return source
  401. return None
  402. def _fetch_model_config(
  403. self, node_data_model: ModelConfig
  404. ) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]:
  405. model_name = node_data_model.name
  406. provider_name = node_data_model.provider
  407. model_manager = ModelManager()
  408. model_instance = model_manager.get_model_instance(
  409. tenant_id=self.tenant_id, model_type=ModelType.LLM, provider=provider_name, model=model_name
  410. )
  411. provider_model_bundle = model_instance.provider_model_bundle
  412. model_type_instance = model_instance.model_type_instance
  413. model_type_instance = cast(LargeLanguageModel, model_type_instance)
  414. model_credentials = model_instance.credentials
  415. # check model
  416. provider_model = provider_model_bundle.configuration.get_provider_model(
  417. model=model_name, model_type=ModelType.LLM
  418. )
  419. if provider_model is None:
  420. raise ModelNotExistError(f"Model {model_name} not exist.")
  421. if provider_model.status == ModelStatus.NO_CONFIGURE:
  422. raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.")
  423. elif provider_model.status == ModelStatus.NO_PERMISSION:
  424. raise ModelCurrentlyNotSupportError(f"Dify Hosted OpenAI {model_name} currently not support.")
  425. elif provider_model.status == ModelStatus.QUOTA_EXCEEDED:
  426. raise QuotaExceededError(f"Model provider {provider_name} quota exceeded.")
  427. # model config
  428. completion_params = node_data_model.completion_params
  429. stop = []
  430. if "stop" in completion_params:
  431. stop = completion_params["stop"]
  432. del completion_params["stop"]
  433. # get model mode
  434. model_mode = node_data_model.mode
  435. if not model_mode:
  436. raise LLMModeRequiredError("LLM mode is required.")
  437. model_schema = model_type_instance.get_model_schema(model_name, model_credentials)
  438. if not model_schema:
  439. raise ModelNotExistError(f"Model {model_name} not exist.")
  440. return model_instance, ModelConfigWithCredentialsEntity(
  441. provider=provider_name,
  442. model=model_name,
  443. model_schema=model_schema,
  444. mode=model_mode,
  445. provider_model_bundle=provider_model_bundle,
  446. credentials=model_credentials,
  447. parameters=completion_params,
  448. stop=stop,
  449. )
  450. def _fetch_memory(
  451. self, node_data_memory: Optional[MemoryConfig], model_instance: ModelInstance
  452. ) -> Optional[TokenBufferMemory]:
  453. if not node_data_memory:
  454. return None
  455. # get conversation id
  456. conversation_id_variable = self.graph_runtime_state.variable_pool.get(
  457. ["sys", SystemVariableKey.CONVERSATION_ID.value]
  458. )
  459. if not isinstance(conversation_id_variable, StringSegment):
  460. return None
  461. conversation_id = conversation_id_variable.value
  462. # get conversation
  463. conversation = (
  464. db.session.query(Conversation)
  465. .filter(Conversation.app_id == self.app_id, Conversation.id == conversation_id)
  466. .first()
  467. )
  468. if not conversation:
  469. return None
  470. memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
  471. return memory
  472. def _fetch_prompt_messages(
  473. self,
  474. *,
  475. sys_query: str | None = None,
  476. sys_files: Sequence["File"],
  477. context: str | None = None,
  478. memory: TokenBufferMemory | None = None,
  479. model_config: ModelConfigWithCredentialsEntity,
  480. prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate,
  481. memory_config: MemoryConfig | None = None,
  482. vision_enabled: bool = False,
  483. vision_detail: ImagePromptMessageContent.DETAIL,
  484. variable_pool: VariablePool,
  485. jinja2_variables: Sequence[VariableSelector],
  486. ) -> tuple[Sequence[PromptMessage], Optional[Sequence[str]]]:
  487. # FIXME: fix the type error cause prompt_messages is type quick a few times
  488. prompt_messages: list[Any] = []
  489. if isinstance(prompt_template, list):
  490. # For chat model
  491. prompt_messages.extend(
  492. self._handle_list_messages(
  493. messages=prompt_template,
  494. context=context,
  495. jinja2_variables=jinja2_variables,
  496. variable_pool=variable_pool,
  497. vision_detail_config=vision_detail,
  498. )
  499. )
  500. # Get memory messages for chat mode
  501. memory_messages = _handle_memory_chat_mode(
  502. memory=memory,
  503. memory_config=memory_config,
  504. model_config=model_config,
  505. )
  506. # Extend prompt_messages with memory messages
  507. prompt_messages.extend(memory_messages)
  508. # Add current query to the prompt messages
  509. if sys_query:
  510. message = LLMNodeChatModelMessage(
  511. text=sys_query,
  512. role=PromptMessageRole.USER,
  513. edition_type="basic",
  514. )
  515. prompt_messages.extend(
  516. self._handle_list_messages(
  517. messages=[message],
  518. context="",
  519. jinja2_variables=[],
  520. variable_pool=variable_pool,
  521. vision_detail_config=vision_detail,
  522. )
  523. )
  524. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  525. # For completion model
  526. prompt_messages.extend(
  527. _handle_completion_template(
  528. template=prompt_template,
  529. context=context,
  530. jinja2_variables=jinja2_variables,
  531. variable_pool=variable_pool,
  532. )
  533. )
  534. # Get memory text for completion model
  535. memory_text = _handle_memory_completion_mode(
  536. memory=memory,
  537. memory_config=memory_config,
  538. model_config=model_config,
  539. )
  540. # Insert histories into the prompt
  541. prompt_content = prompt_messages[0].content
  542. # For issue #11247 - Check if prompt content is a string or a list
  543. prompt_content_type = type(prompt_content)
  544. if prompt_content_type == str:
  545. if "#histories#" in prompt_content:
  546. prompt_content = prompt_content.replace("#histories#", memory_text)
  547. else:
  548. prompt_content = memory_text + "\n" + prompt_content
  549. prompt_messages[0].content = prompt_content
  550. elif prompt_content_type == list:
  551. for content_item in prompt_content:
  552. if content_item.type == PromptMessageContentType.TEXT:
  553. if "#histories#" in content_item.data:
  554. content_item.data = content_item.data.replace("#histories#", memory_text)
  555. else:
  556. content_item.data = memory_text + "\n" + content_item.data
  557. else:
  558. raise ValueError("Invalid prompt content type")
  559. # Add current query to the prompt message
  560. if sys_query:
  561. if prompt_content_type == str:
  562. prompt_content = prompt_messages[0].content.replace("#sys.query#", sys_query)
  563. prompt_messages[0].content = prompt_content
  564. elif prompt_content_type == list:
  565. for content_item in prompt_content:
  566. if content_item.type == PromptMessageContentType.TEXT:
  567. content_item.data = sys_query + "\n" + content_item.data
  568. else:
  569. raise ValueError("Invalid prompt content type")
  570. else:
  571. raise TemplateTypeNotSupportError(type_name=str(type(prompt_template)))
  572. # The sys_files will be deprecated later
  573. if vision_enabled and sys_files:
  574. file_prompts = []
  575. for file in sys_files:
  576. file_prompt = file_manager.to_prompt_message_content(file, image_detail_config=vision_detail)
  577. file_prompts.append(file_prompt)
  578. # If last prompt is a user prompt, add files into its contents,
  579. # otherwise append a new user prompt
  580. if (
  581. len(prompt_messages) > 0
  582. and isinstance(prompt_messages[-1], UserPromptMessage)
  583. and isinstance(prompt_messages[-1].content, list)
  584. ):
  585. prompt_messages[-1] = UserPromptMessage(content=prompt_messages[-1].content + file_prompts)
  586. else:
  587. prompt_messages.append(UserPromptMessage(content=file_prompts))
  588. # Remove empty messages and filter unsupported content
  589. filtered_prompt_messages = []
  590. for prompt_message in prompt_messages:
  591. if isinstance(prompt_message.content, list):
  592. prompt_message_content = []
  593. for content_item in prompt_message.content:
  594. # Skip content if features are not defined
  595. if not model_config.model_schema.features:
  596. if content_item.type != PromptMessageContentType.TEXT:
  597. continue
  598. prompt_message_content.append(content_item)
  599. continue
  600. # Skip content if corresponding feature is not supported
  601. if (
  602. (
  603. content_item.type == PromptMessageContentType.IMAGE
  604. and ModelFeature.VISION not in model_config.model_schema.features
  605. )
  606. or (
  607. content_item.type == PromptMessageContentType.DOCUMENT
  608. and ModelFeature.DOCUMENT not in model_config.model_schema.features
  609. )
  610. or (
  611. content_item.type == PromptMessageContentType.VIDEO
  612. and ModelFeature.VIDEO not in model_config.model_schema.features
  613. )
  614. or (
  615. content_item.type == PromptMessageContentType.AUDIO
  616. and ModelFeature.AUDIO not in model_config.model_schema.features
  617. )
  618. ):
  619. continue
  620. prompt_message_content.append(content_item)
  621. if len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT:
  622. prompt_message.content = prompt_message_content[0].data
  623. else:
  624. prompt_message.content = prompt_message_content
  625. if prompt_message.is_empty():
  626. continue
  627. filtered_prompt_messages.append(prompt_message)
  628. if len(filtered_prompt_messages) == 0:
  629. raise NoPromptFoundError(
  630. "No prompt found in the LLM configuration. "
  631. "Please ensure a prompt is properly configured before proceeding."
  632. )
  633. stop = model_config.stop
  634. return filtered_prompt_messages, stop
  635. @classmethod
  636. def deduct_llm_quota(cls, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
  637. provider_model_bundle = model_instance.provider_model_bundle
  638. provider_configuration = provider_model_bundle.configuration
  639. if provider_configuration.using_provider_type != ProviderType.SYSTEM:
  640. return
  641. system_configuration = provider_configuration.system_configuration
  642. quota_unit = None
  643. for quota_configuration in system_configuration.quota_configurations:
  644. if quota_configuration.quota_type == system_configuration.current_quota_type:
  645. quota_unit = quota_configuration.quota_unit
  646. if quota_configuration.quota_limit == -1:
  647. return
  648. break
  649. used_quota = None
  650. if quota_unit:
  651. if quota_unit == QuotaUnit.TOKENS:
  652. used_quota = usage.total_tokens
  653. elif quota_unit == QuotaUnit.CREDITS:
  654. used_quota = dify_config.get_model_credits(model_instance.model)
  655. else:
  656. used_quota = 1
  657. if used_quota is not None and system_configuration.current_quota_type is not None:
  658. db.session.query(Provider).filter(
  659. Provider.tenant_id == tenant_id,
  660. # TODO: Use provider name with prefix after the data migration.
  661. Provider.provider_name == ModelProviderID(model_instance.provider).provider_name,
  662. Provider.provider_type == ProviderType.SYSTEM.value,
  663. Provider.quota_type == system_configuration.current_quota_type.value,
  664. Provider.quota_limit > Provider.quota_used,
  665. ).update(
  666. {
  667. "quota_used": Provider.quota_used + used_quota,
  668. "last_used": datetime.now(tz=UTC).replace(tzinfo=None),
  669. }
  670. )
  671. db.session.commit()
  672. @classmethod
  673. def _extract_variable_selector_to_variable_mapping(
  674. cls,
  675. *,
  676. graph_config: Mapping[str, Any],
  677. node_id: str,
  678. node_data: LLMNodeData,
  679. ) -> Mapping[str, Sequence[str]]:
  680. prompt_template = node_data.prompt_template
  681. variable_selectors = []
  682. if isinstance(prompt_template, list) and all(
  683. isinstance(prompt, LLMNodeChatModelMessage) for prompt in prompt_template
  684. ):
  685. for prompt in prompt_template:
  686. if prompt.edition_type != "jinja2":
  687. variable_template_parser = VariableTemplateParser(template=prompt.text)
  688. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  689. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  690. if prompt_template.edition_type != "jinja2":
  691. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  692. variable_selectors = variable_template_parser.extract_variable_selectors()
  693. else:
  694. raise InvalidVariableTypeError(f"Invalid prompt template type: {type(prompt_template)}")
  695. variable_mapping: dict[str, Any] = {}
  696. for variable_selector in variable_selectors:
  697. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  698. memory = node_data.memory
  699. if memory and memory.query_prompt_template:
  700. query_variable_selectors = VariableTemplateParser(
  701. template=memory.query_prompt_template
  702. ).extract_variable_selectors()
  703. for variable_selector in query_variable_selectors:
  704. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  705. if node_data.context.enabled:
  706. variable_mapping["#context#"] = node_data.context.variable_selector
  707. if node_data.vision.enabled:
  708. variable_mapping["#files#"] = ["sys", SystemVariableKey.FILES.value]
  709. if node_data.memory:
  710. variable_mapping["#sys.query#"] = ["sys", SystemVariableKey.QUERY.value]
  711. if node_data.prompt_config:
  712. enable_jinja = False
  713. if isinstance(prompt_template, list):
  714. for prompt in prompt_template:
  715. if prompt.edition_type == "jinja2":
  716. enable_jinja = True
  717. break
  718. else:
  719. if prompt_template.edition_type == "jinja2":
  720. enable_jinja = True
  721. if enable_jinja:
  722. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  723. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  724. variable_mapping = {node_id + "." + key: value for key, value in variable_mapping.items()}
  725. return variable_mapping
  726. @classmethod
  727. def get_default_config(cls, filters: Optional[dict] = None) -> dict:
  728. return {
  729. "type": "llm",
  730. "config": {
  731. "prompt_templates": {
  732. "chat_model": {
  733. "prompts": [
  734. {"role": "system", "text": "You are a helpful AI assistant.", "edition_type": "basic"}
  735. ]
  736. },
  737. "completion_model": {
  738. "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
  739. "prompt": {
  740. "text": "Here are the chat histories between human and assistant, inside "
  741. "<histories></histories> XML tags.\n\n<histories>\n{{"
  742. "#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
  743. "edition_type": "basic",
  744. },
  745. "stop": ["Human:"],
  746. },
  747. }
  748. },
  749. }
  750. def _handle_list_messages(
  751. self,
  752. *,
  753. messages: Sequence[LLMNodeChatModelMessage],
  754. context: Optional[str],
  755. jinja2_variables: Sequence[VariableSelector],
  756. variable_pool: VariablePool,
  757. vision_detail_config: ImagePromptMessageContent.DETAIL,
  758. ) -> Sequence[PromptMessage]:
  759. prompt_messages: list[PromptMessage] = []
  760. for message in messages:
  761. if message.edition_type == "jinja2":
  762. result_text = _render_jinja2_message(
  763. template=message.jinja2_text or "",
  764. jinjia2_variables=jinja2_variables,
  765. variable_pool=variable_pool,
  766. )
  767. prompt_message = _combine_message_content_with_role(
  768. contents=[TextPromptMessageContent(data=result_text)], role=message.role
  769. )
  770. prompt_messages.append(prompt_message)
  771. else:
  772. # Get segment group from basic message
  773. if context:
  774. template = message.text.replace("{#context#}", context)
  775. else:
  776. template = message.text
  777. segment_group = variable_pool.convert_template(template)
  778. # Process segments for images
  779. file_contents = []
  780. for segment in segment_group.value:
  781. if isinstance(segment, ArrayFileSegment):
  782. for file in segment.value:
  783. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  784. file_content = file_manager.to_prompt_message_content(
  785. file, image_detail_config=vision_detail_config
  786. )
  787. file_contents.append(file_content)
  788. elif isinstance(segment, FileSegment):
  789. file = segment.value
  790. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  791. file_content = file_manager.to_prompt_message_content(
  792. file, image_detail_config=vision_detail_config
  793. )
  794. file_contents.append(file_content)
  795. # Create message with text from all segments
  796. plain_text = segment_group.text
  797. if plain_text:
  798. prompt_message = _combine_message_content_with_role(
  799. contents=[TextPromptMessageContent(data=plain_text)], role=message.role
  800. )
  801. prompt_messages.append(prompt_message)
  802. if file_contents:
  803. # Create message with image contents
  804. prompt_message = _combine_message_content_with_role(contents=file_contents, role=message.role)
  805. prompt_messages.append(prompt_message)
  806. return prompt_messages
  807. def _combine_message_content_with_role(*, contents: Sequence[PromptMessageContent], role: PromptMessageRole):
  808. match role:
  809. case PromptMessageRole.USER:
  810. return UserPromptMessage(content=contents)
  811. case PromptMessageRole.ASSISTANT:
  812. return AssistantPromptMessage(content=contents)
  813. case PromptMessageRole.SYSTEM:
  814. return SystemPromptMessage(content=contents)
  815. raise NotImplementedError(f"Role {role} is not supported")
  816. def _render_jinja2_message(
  817. *,
  818. template: str,
  819. jinjia2_variables: Sequence[VariableSelector],
  820. variable_pool: VariablePool,
  821. ):
  822. if not template:
  823. return ""
  824. jinjia2_inputs = {}
  825. for jinja2_variable in jinjia2_variables:
  826. variable = variable_pool.get(jinja2_variable.value_selector)
  827. jinjia2_inputs[jinja2_variable.variable] = variable.to_object() if variable else ""
  828. code_execute_resp = CodeExecutor.execute_workflow_code_template(
  829. language=CodeLanguage.JINJA2,
  830. code=template,
  831. inputs=jinjia2_inputs,
  832. )
  833. result_text = code_execute_resp["result"]
  834. return result_text
  835. def _calculate_rest_token(
  836. *, prompt_messages: list[PromptMessage], model_config: ModelConfigWithCredentialsEntity
  837. ) -> int:
  838. rest_tokens = 2000
  839. model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
  840. if model_context_tokens:
  841. model_instance = ModelInstance(
  842. provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
  843. )
  844. curr_message_tokens = model_instance.get_llm_num_tokens(prompt_messages)
  845. max_tokens = 0
  846. for parameter_rule in model_config.model_schema.parameter_rules:
  847. if parameter_rule.name == "max_tokens" or (
  848. parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
  849. ):
  850. max_tokens = (
  851. model_config.parameters.get(parameter_rule.name)
  852. or model_config.parameters.get(str(parameter_rule.use_template))
  853. or 0
  854. )
  855. rest_tokens = model_context_tokens - max_tokens - curr_message_tokens
  856. rest_tokens = max(rest_tokens, 0)
  857. return rest_tokens
  858. def _handle_memory_chat_mode(
  859. *,
  860. memory: TokenBufferMemory | None,
  861. memory_config: MemoryConfig | None,
  862. model_config: ModelConfigWithCredentialsEntity,
  863. ) -> Sequence[PromptMessage]:
  864. memory_messages: Sequence[PromptMessage] = []
  865. # Get messages from memory for chat model
  866. if memory and memory_config:
  867. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  868. memory_messages = memory.get_history_prompt_messages(
  869. max_token_limit=rest_tokens,
  870. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  871. )
  872. return memory_messages
  873. def _handle_memory_completion_mode(
  874. *,
  875. memory: TokenBufferMemory | None,
  876. memory_config: MemoryConfig | None,
  877. model_config: ModelConfigWithCredentialsEntity,
  878. ) -> str:
  879. memory_text = ""
  880. # Get history text from memory for completion model
  881. if memory and memory_config:
  882. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  883. if not memory_config.role_prefix:
  884. raise MemoryRolePrefixRequiredError("Memory role prefix is required for completion model.")
  885. memory_text = memory.get_history_prompt_text(
  886. max_token_limit=rest_tokens,
  887. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  888. human_prefix=memory_config.role_prefix.user,
  889. ai_prefix=memory_config.role_prefix.assistant,
  890. )
  891. return memory_text
  892. def _handle_completion_template(
  893. *,
  894. template: LLMNodeCompletionModelPromptTemplate,
  895. context: Optional[str],
  896. jinja2_variables: Sequence[VariableSelector],
  897. variable_pool: VariablePool,
  898. ) -> Sequence[PromptMessage]:
  899. """Handle completion template processing outside of LLMNode class.
  900. Args:
  901. template: The completion model prompt template
  902. context: Optional context string
  903. jinja2_variables: Variables for jinja2 template rendering
  904. variable_pool: Variable pool for template conversion
  905. Returns:
  906. Sequence of prompt messages
  907. """
  908. prompt_messages = []
  909. if template.edition_type == "jinja2":
  910. result_text = _render_jinja2_message(
  911. template=template.jinja2_text or "",
  912. jinjia2_variables=jinja2_variables,
  913. variable_pool=variable_pool,
  914. )
  915. else:
  916. if context:
  917. template_text = template.text.replace("{#context#}", context)
  918. else:
  919. template_text = template.text
  920. result_text = variable_pool.convert_template(template_text).text
  921. prompt_message = _combine_message_content_with_role(
  922. contents=[TextPromptMessageContent(data=result_text)], role=PromptMessageRole.USER
  923. )
  924. prompt_messages.append(prompt_message)
  925. return prompt_messages