node.py 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059
  1. import json
  2. import logging
  3. from collections.abc import Generator, Mapping, Sequence
  4. from typing import TYPE_CHECKING, Any, Optional, cast
  5. from configs import dify_config
  6. from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
  7. from core.entities.model_entities import ModelStatus
  8. from core.entities.provider_entities import QuotaUnit
  9. from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
  10. from core.file import FileType, file_manager
  11. from core.helper.code_executor import CodeExecutor, CodeLanguage
  12. from core.memory.token_buffer_memory import TokenBufferMemory
  13. from core.model_manager import ModelInstance, ModelManager
  14. from core.model_runtime.entities import (
  15. ImagePromptMessageContent,
  16. PromptMessage,
  17. PromptMessageContentType,
  18. TextPromptMessageContent,
  19. )
  20. from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
  21. from core.model_runtime.entities.message_entities import (
  22. AssistantPromptMessage,
  23. PromptMessageContent,
  24. PromptMessageRole,
  25. SystemPromptMessage,
  26. UserPromptMessage,
  27. )
  28. from core.model_runtime.entities.model_entities import ModelFeature, ModelPropertyKey, ModelType
  29. from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
  30. from core.model_runtime.utils.encoders import jsonable_encoder
  31. from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig
  32. from core.prompt.utils.prompt_message_util import PromptMessageUtil
  33. from core.variables import (
  34. ArrayAnySegment,
  35. ArrayFileSegment,
  36. ArraySegment,
  37. FileSegment,
  38. NoneSegment,
  39. ObjectSegment,
  40. StringSegment,
  41. )
  42. from core.workflow.constants import SYSTEM_VARIABLE_NODE_ID
  43. from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult
  44. from core.workflow.entities.variable_entities import VariableSelector
  45. from core.workflow.entities.variable_pool import VariablePool
  46. from core.workflow.enums import SystemVariableKey
  47. from core.workflow.graph_engine.entities.event import InNodeEvent
  48. from core.workflow.nodes.base import BaseNode
  49. from core.workflow.nodes.enums import NodeType
  50. from core.workflow.nodes.event import (
  51. ModelInvokeCompletedEvent,
  52. NodeEvent,
  53. RunCompletedEvent,
  54. RunRetrieverResourceEvent,
  55. RunStreamChunkEvent,
  56. )
  57. from core.workflow.utils.variable_template_parser import VariableTemplateParser
  58. from extensions.ext_database import db
  59. from models.model import Conversation
  60. from models.provider import Provider, ProviderType
  61. from models.workflow import WorkflowNodeExecutionStatus
  62. from .entities import (
  63. LLMNodeChatModelMessage,
  64. LLMNodeCompletionModelPromptTemplate,
  65. LLMNodeData,
  66. ModelConfig,
  67. )
  68. from .exc import (
  69. InvalidContextStructureError,
  70. InvalidVariableTypeError,
  71. LLMModeRequiredError,
  72. LLMNodeError,
  73. MemoryRolePrefixRequiredError,
  74. ModelNotExistError,
  75. NoPromptFoundError,
  76. TemplateTypeNotSupportError,
  77. VariableNotFoundError,
  78. )
  79. if TYPE_CHECKING:
  80. from core.file.models import File
  81. logger = logging.getLogger(__name__)
  82. class LLMNode(BaseNode[LLMNodeData]):
  83. _node_data_cls = LLMNodeData
  84. _node_type = NodeType.LLM
  85. def _run(self) -> Generator[NodeEvent | InNodeEvent, None, None]:
  86. node_inputs: Optional[dict[str, Any]] = None
  87. process_data = None
  88. try:
  89. # init messages template
  90. self.node_data.prompt_template = self._transform_chat_messages(self.node_data.prompt_template)
  91. # fetch variables and fetch values from variable pool
  92. inputs = self._fetch_inputs(node_data=self.node_data)
  93. # fetch jinja2 inputs
  94. jinja_inputs = self._fetch_jinja_inputs(node_data=self.node_data)
  95. # merge inputs
  96. inputs.update(jinja_inputs)
  97. node_inputs = {}
  98. # fetch files
  99. files = (
  100. self._fetch_files(selector=self.node_data.vision.configs.variable_selector)
  101. if self.node_data.vision.enabled
  102. else []
  103. )
  104. if files:
  105. node_inputs["#files#"] = [file.to_dict() for file in files]
  106. # fetch context value
  107. generator = self._fetch_context(node_data=self.node_data)
  108. context = None
  109. for event in generator:
  110. if isinstance(event, RunRetrieverResourceEvent):
  111. context = event.context
  112. yield event
  113. if context:
  114. node_inputs["#context#"] = context
  115. # fetch model config
  116. model_instance, model_config = self._fetch_model_config(self.node_data.model)
  117. # fetch memory
  118. memory = self._fetch_memory(node_data_memory=self.node_data.memory, model_instance=model_instance)
  119. query = None
  120. if self.node_data.memory:
  121. query = self.node_data.memory.query_prompt_template
  122. if not query and (
  123. query_variable := self.graph_runtime_state.variable_pool.get(
  124. (SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY)
  125. )
  126. ):
  127. query = query_variable.text
  128. prompt_messages, stop = self._fetch_prompt_messages(
  129. sys_query=query,
  130. sys_files=files,
  131. context=context,
  132. memory=memory,
  133. model_config=model_config,
  134. prompt_template=self.node_data.prompt_template,
  135. memory_config=self.node_data.memory,
  136. vision_enabled=self.node_data.vision.enabled,
  137. vision_detail=self.node_data.vision.configs.detail,
  138. variable_pool=self.graph_runtime_state.variable_pool,
  139. jinja2_variables=self.node_data.prompt_config.jinja2_variables,
  140. )
  141. process_data = {
  142. "model_mode": model_config.mode,
  143. "prompts": PromptMessageUtil.prompt_messages_to_prompt_for_saving(
  144. model_mode=model_config.mode, prompt_messages=prompt_messages
  145. ),
  146. "model_provider": model_config.provider,
  147. "model_name": model_config.model,
  148. }
  149. # handle invoke result
  150. generator = self._invoke_llm(
  151. node_data_model=self.node_data.model,
  152. model_instance=model_instance,
  153. prompt_messages=prompt_messages,
  154. stop=stop,
  155. )
  156. result_text = ""
  157. usage = LLMUsage.empty_usage()
  158. finish_reason = None
  159. for event in generator:
  160. if isinstance(event, RunStreamChunkEvent):
  161. yield event
  162. elif isinstance(event, ModelInvokeCompletedEvent):
  163. result_text = event.text
  164. usage = event.usage
  165. finish_reason = event.finish_reason
  166. # deduct quota
  167. self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
  168. break
  169. except LLMNodeError as e:
  170. yield RunCompletedEvent(
  171. run_result=NodeRunResult(
  172. status=WorkflowNodeExecutionStatus.FAILED,
  173. error=str(e),
  174. inputs=node_inputs,
  175. process_data=process_data,
  176. error_type=type(e).__name__,
  177. )
  178. )
  179. except Exception as e:
  180. yield RunCompletedEvent(
  181. run_result=NodeRunResult(
  182. status=WorkflowNodeExecutionStatus.FAILED,
  183. error=str(e),
  184. inputs=node_inputs,
  185. process_data=process_data,
  186. )
  187. )
  188. outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
  189. yield RunCompletedEvent(
  190. run_result=NodeRunResult(
  191. status=WorkflowNodeExecutionStatus.SUCCEEDED,
  192. inputs=node_inputs,
  193. process_data=process_data,
  194. outputs=outputs,
  195. metadata={
  196. NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens,
  197. NodeRunMetadataKey.TOTAL_PRICE: usage.total_price,
  198. NodeRunMetadataKey.CURRENCY: usage.currency,
  199. },
  200. llm_usage=usage,
  201. )
  202. )
  203. def _invoke_llm(
  204. self,
  205. node_data_model: ModelConfig,
  206. model_instance: ModelInstance,
  207. prompt_messages: Sequence[PromptMessage],
  208. stop: Optional[Sequence[str]] = None,
  209. ) -> Generator[NodeEvent, None, None]:
  210. db.session.close()
  211. invoke_result = model_instance.invoke_llm(
  212. prompt_messages=list(prompt_messages),
  213. model_parameters=node_data_model.completion_params,
  214. stop=list(stop or []),
  215. stream=True,
  216. user=self.user_id,
  217. )
  218. return self._handle_invoke_result(invoke_result=invoke_result)
  219. def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
  220. if isinstance(invoke_result, LLMResult):
  221. content = invoke_result.message.content
  222. if content is None:
  223. message_text = ""
  224. elif isinstance(content, str):
  225. message_text = content
  226. elif isinstance(content, list):
  227. # Assuming the list contains PromptMessageContent objects with a "data" attribute
  228. message_text = "".join(
  229. item.data if hasattr(item, "data") and isinstance(item.data, str) else str(item) for item in content
  230. )
  231. else:
  232. message_text = str(content)
  233. yield ModelInvokeCompletedEvent(
  234. text=message_text,
  235. usage=invoke_result.usage,
  236. finish_reason=None,
  237. )
  238. return
  239. model = None
  240. prompt_messages: list[PromptMessage] = []
  241. full_text = ""
  242. usage = None
  243. finish_reason = None
  244. for result in invoke_result:
  245. text = result.delta.message.content
  246. full_text += text
  247. yield RunStreamChunkEvent(chunk_content=text, from_variable_selector=[self.node_id, "text"])
  248. if not model:
  249. model = result.model
  250. if not prompt_messages:
  251. prompt_messages = result.prompt_messages
  252. if not usage and result.delta.usage:
  253. usage = result.delta.usage
  254. if not finish_reason and result.delta.finish_reason:
  255. finish_reason = result.delta.finish_reason
  256. if not usage:
  257. usage = LLMUsage.empty_usage()
  258. yield ModelInvokeCompletedEvent(text=full_text, usage=usage, finish_reason=finish_reason)
  259. def _transform_chat_messages(
  260. self, messages: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate, /
  261. ) -> Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate:
  262. if isinstance(messages, LLMNodeCompletionModelPromptTemplate):
  263. if messages.edition_type == "jinja2" and messages.jinja2_text:
  264. messages.text = messages.jinja2_text
  265. return messages
  266. for message in messages:
  267. if message.edition_type == "jinja2" and message.jinja2_text:
  268. message.text = message.jinja2_text
  269. return messages
  270. def _fetch_jinja_inputs(self, node_data: LLMNodeData) -> dict[str, str]:
  271. variables: dict[str, Any] = {}
  272. if not node_data.prompt_config:
  273. return variables
  274. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  275. variable_name = variable_selector.variable
  276. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  277. if variable is None:
  278. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  279. def parse_dict(input_dict: Mapping[str, Any]) -> str:
  280. """
  281. Parse dict into string
  282. """
  283. # check if it's a context structure
  284. if "metadata" in input_dict and "_source" in input_dict["metadata"] and "content" in input_dict:
  285. return str(input_dict["content"])
  286. # else, parse the dict
  287. try:
  288. return json.dumps(input_dict, ensure_ascii=False)
  289. except Exception:
  290. return str(input_dict)
  291. if isinstance(variable, ArraySegment):
  292. result = ""
  293. for item in variable.value:
  294. if isinstance(item, dict):
  295. result += parse_dict(item)
  296. else:
  297. result += str(item)
  298. result += "\n"
  299. value = result.strip()
  300. elif isinstance(variable, ObjectSegment):
  301. value = parse_dict(variable.value)
  302. else:
  303. value = variable.text
  304. variables[variable_name] = value
  305. return variables
  306. def _fetch_inputs(self, node_data: LLMNodeData) -> dict[str, Any]:
  307. inputs = {}
  308. prompt_template = node_data.prompt_template
  309. variable_selectors = []
  310. if isinstance(prompt_template, list):
  311. for prompt in prompt_template:
  312. variable_template_parser = VariableTemplateParser(template=prompt.text)
  313. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  314. elif isinstance(prompt_template, CompletionModelPromptTemplate):
  315. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  316. variable_selectors = variable_template_parser.extract_variable_selectors()
  317. for variable_selector in variable_selectors:
  318. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  319. if variable is None:
  320. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  321. if isinstance(variable, NoneSegment):
  322. inputs[variable_selector.variable] = ""
  323. inputs[variable_selector.variable] = variable.to_object()
  324. memory = node_data.memory
  325. if memory and memory.query_prompt_template:
  326. query_variable_selectors = VariableTemplateParser(
  327. template=memory.query_prompt_template
  328. ).extract_variable_selectors()
  329. for variable_selector in query_variable_selectors:
  330. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  331. if variable is None:
  332. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  333. if isinstance(variable, NoneSegment):
  334. continue
  335. inputs[variable_selector.variable] = variable.to_object()
  336. return inputs
  337. def _fetch_files(self, *, selector: Sequence[str]) -> Sequence["File"]:
  338. variable = self.graph_runtime_state.variable_pool.get(selector)
  339. if variable is None:
  340. return []
  341. elif isinstance(variable, FileSegment):
  342. return [variable.value]
  343. elif isinstance(variable, ArrayFileSegment):
  344. return variable.value
  345. elif isinstance(variable, NoneSegment | ArrayAnySegment):
  346. return []
  347. raise InvalidVariableTypeError(f"Invalid variable type: {type(variable)}")
  348. def _fetch_context(self, node_data: LLMNodeData):
  349. if not node_data.context.enabled:
  350. return
  351. if not node_data.context.variable_selector:
  352. return
  353. context_value_variable = self.graph_runtime_state.variable_pool.get(node_data.context.variable_selector)
  354. if context_value_variable:
  355. if isinstance(context_value_variable, StringSegment):
  356. yield RunRetrieverResourceEvent(retriever_resources=[], context=context_value_variable.value)
  357. elif isinstance(context_value_variable, ArraySegment):
  358. context_str = ""
  359. original_retriever_resource = []
  360. for item in context_value_variable.value:
  361. if isinstance(item, str):
  362. context_str += item + "\n"
  363. else:
  364. if "content" not in item:
  365. raise InvalidContextStructureError(f"Invalid context structure: {item}")
  366. context_str += item["content"] + "\n"
  367. retriever_resource = self._convert_to_original_retriever_resource(item)
  368. if retriever_resource:
  369. original_retriever_resource.append(retriever_resource)
  370. yield RunRetrieverResourceEvent(
  371. retriever_resources=original_retriever_resource, context=context_str.strip()
  372. )
  373. def _convert_to_original_retriever_resource(self, context_dict: dict) -> Optional[dict]:
  374. if (
  375. "metadata" in context_dict
  376. and "_source" in context_dict["metadata"]
  377. and context_dict["metadata"]["_source"] == "knowledge"
  378. ):
  379. metadata = context_dict.get("metadata", {})
  380. source = {
  381. "position": metadata.get("position"),
  382. "dataset_id": metadata.get("dataset_id"),
  383. "dataset_name": metadata.get("dataset_name"),
  384. "document_id": metadata.get("document_id"),
  385. "document_name": metadata.get("document_name"),
  386. "data_source_type": metadata.get("document_data_source_type"),
  387. "segment_id": metadata.get("segment_id"),
  388. "retriever_from": metadata.get("retriever_from"),
  389. "score": metadata.get("score"),
  390. "hit_count": metadata.get("segment_hit_count"),
  391. "word_count": metadata.get("segment_word_count"),
  392. "segment_position": metadata.get("segment_position"),
  393. "index_node_hash": metadata.get("segment_index_node_hash"),
  394. "content": context_dict.get("content"),
  395. "page": metadata.get("page"),
  396. }
  397. return source
  398. return None
  399. def _fetch_model_config(
  400. self, node_data_model: ModelConfig
  401. ) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]:
  402. model_name = node_data_model.name
  403. provider_name = node_data_model.provider
  404. model_manager = ModelManager()
  405. model_instance = model_manager.get_model_instance(
  406. tenant_id=self.tenant_id, model_type=ModelType.LLM, provider=provider_name, model=model_name
  407. )
  408. provider_model_bundle = model_instance.provider_model_bundle
  409. model_type_instance = model_instance.model_type_instance
  410. model_type_instance = cast(LargeLanguageModel, model_type_instance)
  411. model_credentials = model_instance.credentials
  412. # check model
  413. provider_model = provider_model_bundle.configuration.get_provider_model(
  414. model=model_name, model_type=ModelType.LLM
  415. )
  416. if provider_model is None:
  417. raise ModelNotExistError(f"Model {model_name} not exist.")
  418. if provider_model.status == ModelStatus.NO_CONFIGURE:
  419. raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.")
  420. elif provider_model.status == ModelStatus.NO_PERMISSION:
  421. raise ModelCurrentlyNotSupportError(f"Dify Hosted OpenAI {model_name} currently not support.")
  422. elif provider_model.status == ModelStatus.QUOTA_EXCEEDED:
  423. raise QuotaExceededError(f"Model provider {provider_name} quota exceeded.")
  424. # model config
  425. completion_params = node_data_model.completion_params
  426. stop = []
  427. if "stop" in completion_params:
  428. stop = completion_params["stop"]
  429. del completion_params["stop"]
  430. # get model mode
  431. model_mode = node_data_model.mode
  432. if not model_mode:
  433. raise LLMModeRequiredError("LLM mode is required.")
  434. model_schema = model_type_instance.get_model_schema(model_name, model_credentials)
  435. if not model_schema:
  436. raise ModelNotExistError(f"Model {model_name} not exist.")
  437. return model_instance, ModelConfigWithCredentialsEntity(
  438. provider=provider_name,
  439. model=model_name,
  440. model_schema=model_schema,
  441. mode=model_mode,
  442. provider_model_bundle=provider_model_bundle,
  443. credentials=model_credentials,
  444. parameters=completion_params,
  445. stop=stop,
  446. )
  447. def _fetch_memory(
  448. self, node_data_memory: Optional[MemoryConfig], model_instance: ModelInstance
  449. ) -> Optional[TokenBufferMemory]:
  450. if not node_data_memory:
  451. return None
  452. # get conversation id
  453. conversation_id_variable = self.graph_runtime_state.variable_pool.get(
  454. ["sys", SystemVariableKey.CONVERSATION_ID.value]
  455. )
  456. if not isinstance(conversation_id_variable, StringSegment):
  457. return None
  458. conversation_id = conversation_id_variable.value
  459. # get conversation
  460. conversation = (
  461. db.session.query(Conversation)
  462. .filter(Conversation.app_id == self.app_id, Conversation.id == conversation_id)
  463. .first()
  464. )
  465. if not conversation:
  466. return None
  467. memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
  468. return memory
  469. def _fetch_prompt_messages(
  470. self,
  471. *,
  472. sys_query: str | None = None,
  473. sys_files: Sequence["File"],
  474. context: str | None = None,
  475. memory: TokenBufferMemory | None = None,
  476. model_config: ModelConfigWithCredentialsEntity,
  477. prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate,
  478. memory_config: MemoryConfig | None = None,
  479. vision_enabled: bool = False,
  480. vision_detail: ImagePromptMessageContent.DETAIL,
  481. variable_pool: VariablePool,
  482. jinja2_variables: Sequence[VariableSelector],
  483. ) -> tuple[Sequence[PromptMessage], Optional[Sequence[str]]]:
  484. # FIXME: fix the type error cause prompt_messages is type quick a few times
  485. prompt_messages: list[Any] = []
  486. if isinstance(prompt_template, list):
  487. # For chat model
  488. prompt_messages.extend(
  489. self._handle_list_messages(
  490. messages=prompt_template,
  491. context=context,
  492. jinja2_variables=jinja2_variables,
  493. variable_pool=variable_pool,
  494. vision_detail_config=vision_detail,
  495. )
  496. )
  497. # Get memory messages for chat mode
  498. memory_messages = _handle_memory_chat_mode(
  499. memory=memory,
  500. memory_config=memory_config,
  501. model_config=model_config,
  502. )
  503. # Extend prompt_messages with memory messages
  504. prompt_messages.extend(memory_messages)
  505. # Add current query to the prompt messages
  506. if sys_query:
  507. message = LLMNodeChatModelMessage(
  508. text=sys_query,
  509. role=PromptMessageRole.USER,
  510. edition_type="basic",
  511. )
  512. prompt_messages.extend(
  513. self._handle_list_messages(
  514. messages=[message],
  515. context="",
  516. jinja2_variables=[],
  517. variable_pool=variable_pool,
  518. vision_detail_config=vision_detail,
  519. )
  520. )
  521. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  522. # For completion model
  523. prompt_messages.extend(
  524. _handle_completion_template(
  525. template=prompt_template,
  526. context=context,
  527. jinja2_variables=jinja2_variables,
  528. variable_pool=variable_pool,
  529. )
  530. )
  531. # Get memory text for completion model
  532. memory_text = _handle_memory_completion_mode(
  533. memory=memory,
  534. memory_config=memory_config,
  535. model_config=model_config,
  536. )
  537. # Insert histories into the prompt
  538. prompt_content = prompt_messages[0].content
  539. # For issue #11247 - Check if prompt content is a string or a list
  540. prompt_content_type = type(prompt_content)
  541. if prompt_content_type == str:
  542. if "#histories#" in prompt_content:
  543. prompt_content = prompt_content.replace("#histories#", memory_text)
  544. else:
  545. prompt_content = memory_text + "\n" + prompt_content
  546. prompt_messages[0].content = prompt_content
  547. elif prompt_content_type == list:
  548. for content_item in prompt_content:
  549. if content_item.type == PromptMessageContentType.TEXT:
  550. if "#histories#" in content_item.data:
  551. content_item.data = content_item.data.replace("#histories#", memory_text)
  552. else:
  553. content_item.data = memory_text + "\n" + content_item.data
  554. else:
  555. raise ValueError("Invalid prompt content type")
  556. # Add current query to the prompt message
  557. if sys_query:
  558. if prompt_content_type == str:
  559. prompt_content = prompt_messages[0].content.replace("#sys.query#", sys_query)
  560. prompt_messages[0].content = prompt_content
  561. elif prompt_content_type == list:
  562. for content_item in prompt_content:
  563. if content_item.type == PromptMessageContentType.TEXT:
  564. content_item.data = sys_query + "\n" + content_item.data
  565. else:
  566. raise ValueError("Invalid prompt content type")
  567. else:
  568. raise TemplateTypeNotSupportError(type_name=str(type(prompt_template)))
  569. # The sys_files will be deprecated later
  570. if vision_enabled and sys_files:
  571. file_prompts = []
  572. for file in sys_files:
  573. file_prompt = file_manager.to_prompt_message_content(file, image_detail_config=vision_detail)
  574. file_prompts.append(file_prompt)
  575. # If last prompt is a user prompt, add files into its contents,
  576. # otherwise append a new user prompt
  577. if (
  578. len(prompt_messages) > 0
  579. and isinstance(prompt_messages[-1], UserPromptMessage)
  580. and isinstance(prompt_messages[-1].content, list)
  581. ):
  582. prompt_messages[-1] = UserPromptMessage(content=prompt_messages[-1].content + file_prompts)
  583. else:
  584. prompt_messages.append(UserPromptMessage(content=file_prompts))
  585. # Remove empty messages and filter unsupported content
  586. filtered_prompt_messages = []
  587. for prompt_message in prompt_messages:
  588. if isinstance(prompt_message.content, list):
  589. prompt_message_content = []
  590. for content_item in prompt_message.content:
  591. # Skip content if features are not defined
  592. if not model_config.model_schema.features:
  593. if content_item.type != PromptMessageContentType.TEXT:
  594. continue
  595. prompt_message_content.append(content_item)
  596. continue
  597. # Skip content if corresponding feature is not supported
  598. if (
  599. (
  600. content_item.type == PromptMessageContentType.IMAGE
  601. and ModelFeature.VISION not in model_config.model_schema.features
  602. )
  603. or (
  604. content_item.type == PromptMessageContentType.DOCUMENT
  605. and ModelFeature.DOCUMENT not in model_config.model_schema.features
  606. )
  607. or (
  608. content_item.type == PromptMessageContentType.VIDEO
  609. and ModelFeature.VIDEO not in model_config.model_schema.features
  610. )
  611. or (
  612. content_item.type == PromptMessageContentType.AUDIO
  613. and ModelFeature.AUDIO not in model_config.model_schema.features
  614. )
  615. ):
  616. continue
  617. prompt_message_content.append(content_item)
  618. if len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT:
  619. prompt_message.content = prompt_message_content[0].data
  620. else:
  621. prompt_message.content = prompt_message_content
  622. if prompt_message.is_empty():
  623. continue
  624. filtered_prompt_messages.append(prompt_message)
  625. if len(filtered_prompt_messages) == 0:
  626. raise NoPromptFoundError(
  627. "No prompt found in the LLM configuration. "
  628. "Please ensure a prompt is properly configured before proceeding."
  629. )
  630. stop = model_config.stop
  631. return filtered_prompt_messages, stop
  632. @classmethod
  633. def deduct_llm_quota(cls, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
  634. provider_model_bundle = model_instance.provider_model_bundle
  635. provider_configuration = provider_model_bundle.configuration
  636. if provider_configuration.using_provider_type != ProviderType.SYSTEM:
  637. return
  638. system_configuration = provider_configuration.system_configuration
  639. quota_unit = None
  640. for quota_configuration in system_configuration.quota_configurations:
  641. if quota_configuration.quota_type == system_configuration.current_quota_type:
  642. quota_unit = quota_configuration.quota_unit
  643. if quota_configuration.quota_limit == -1:
  644. return
  645. break
  646. used_quota = None
  647. if quota_unit:
  648. if quota_unit == QuotaUnit.TOKENS:
  649. used_quota = usage.total_tokens
  650. elif quota_unit == QuotaUnit.CREDITS:
  651. used_quota = dify_config.get_model_credits(model_instance.model)
  652. else:
  653. used_quota = 1
  654. if used_quota is not None and system_configuration.current_quota_type is not None:
  655. db.session.query(Provider).filter(
  656. Provider.tenant_id == tenant_id,
  657. Provider.provider_name == model_instance.provider,
  658. Provider.provider_type == ProviderType.SYSTEM.value,
  659. Provider.quota_type == system_configuration.current_quota_type.value,
  660. Provider.quota_limit > Provider.quota_used,
  661. ).update({"quota_used": Provider.quota_used + used_quota})
  662. db.session.commit()
  663. @classmethod
  664. def _extract_variable_selector_to_variable_mapping(
  665. cls,
  666. *,
  667. graph_config: Mapping[str, Any],
  668. node_id: str,
  669. node_data: LLMNodeData,
  670. ) -> Mapping[str, Sequence[str]]:
  671. prompt_template = node_data.prompt_template
  672. variable_selectors = []
  673. if isinstance(prompt_template, list) and all(
  674. isinstance(prompt, LLMNodeChatModelMessage) for prompt in prompt_template
  675. ):
  676. for prompt in prompt_template:
  677. if prompt.edition_type != "jinja2":
  678. variable_template_parser = VariableTemplateParser(template=prompt.text)
  679. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  680. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  681. if prompt_template.edition_type != "jinja2":
  682. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  683. variable_selectors = variable_template_parser.extract_variable_selectors()
  684. else:
  685. raise InvalidVariableTypeError(f"Invalid prompt template type: {type(prompt_template)}")
  686. variable_mapping: dict[str, Any] = {}
  687. for variable_selector in variable_selectors:
  688. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  689. memory = node_data.memory
  690. if memory and memory.query_prompt_template:
  691. query_variable_selectors = VariableTemplateParser(
  692. template=memory.query_prompt_template
  693. ).extract_variable_selectors()
  694. for variable_selector in query_variable_selectors:
  695. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  696. if node_data.context.enabled:
  697. variable_mapping["#context#"] = node_data.context.variable_selector
  698. if node_data.vision.enabled:
  699. variable_mapping["#files#"] = ["sys", SystemVariableKey.FILES.value]
  700. if node_data.memory:
  701. variable_mapping["#sys.query#"] = ["sys", SystemVariableKey.QUERY.value]
  702. if node_data.prompt_config:
  703. enable_jinja = False
  704. if isinstance(prompt_template, list):
  705. for prompt in prompt_template:
  706. if prompt.edition_type == "jinja2":
  707. enable_jinja = True
  708. break
  709. else:
  710. if prompt_template.edition_type == "jinja2":
  711. enable_jinja = True
  712. if enable_jinja:
  713. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  714. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  715. variable_mapping = {node_id + "." + key: value for key, value in variable_mapping.items()}
  716. return variable_mapping
  717. @classmethod
  718. def get_default_config(cls, filters: Optional[dict] = None) -> dict:
  719. return {
  720. "type": "llm",
  721. "config": {
  722. "prompt_templates": {
  723. "chat_model": {
  724. "prompts": [
  725. {"role": "system", "text": "You are a helpful AI assistant.", "edition_type": "basic"}
  726. ]
  727. },
  728. "completion_model": {
  729. "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
  730. "prompt": {
  731. "text": "Here are the chat histories between human and assistant, inside "
  732. "<histories></histories> XML tags.\n\n<histories>\n{{"
  733. "#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
  734. "edition_type": "basic",
  735. },
  736. "stop": ["Human:"],
  737. },
  738. }
  739. },
  740. }
  741. def _handle_list_messages(
  742. self,
  743. *,
  744. messages: Sequence[LLMNodeChatModelMessage],
  745. context: Optional[str],
  746. jinja2_variables: Sequence[VariableSelector],
  747. variable_pool: VariablePool,
  748. vision_detail_config: ImagePromptMessageContent.DETAIL,
  749. ) -> Sequence[PromptMessage]:
  750. prompt_messages: list[PromptMessage] = []
  751. for message in messages:
  752. if message.edition_type == "jinja2":
  753. result_text = _render_jinja2_message(
  754. template=message.jinja2_text or "",
  755. jinjia2_variables=jinja2_variables,
  756. variable_pool=variable_pool,
  757. )
  758. prompt_message = _combine_message_content_with_role(
  759. contents=[TextPromptMessageContent(data=result_text)], role=message.role
  760. )
  761. prompt_messages.append(prompt_message)
  762. else:
  763. # Get segment group from basic message
  764. if context:
  765. template = message.text.replace("{#context#}", context)
  766. else:
  767. template = message.text
  768. segment_group = variable_pool.convert_template(template)
  769. # Process segments for images
  770. file_contents = []
  771. for segment in segment_group.value:
  772. if isinstance(segment, ArrayFileSegment):
  773. for file in segment.value:
  774. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  775. file_content = file_manager.to_prompt_message_content(
  776. file, image_detail_config=vision_detail_config
  777. )
  778. file_contents.append(file_content)
  779. elif isinstance(segment, FileSegment):
  780. file = segment.value
  781. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  782. file_content = file_manager.to_prompt_message_content(
  783. file, image_detail_config=vision_detail_config
  784. )
  785. file_contents.append(file_content)
  786. # Create message with text from all segments
  787. plain_text = segment_group.text
  788. if plain_text:
  789. prompt_message = _combine_message_content_with_role(
  790. contents=[TextPromptMessageContent(data=plain_text)], role=message.role
  791. )
  792. prompt_messages.append(prompt_message)
  793. if file_contents:
  794. # Create message with image contents
  795. prompt_message = _combine_message_content_with_role(contents=file_contents, role=message.role)
  796. prompt_messages.append(prompt_message)
  797. return prompt_messages
  798. def _combine_message_content_with_role(*, contents: Sequence[PromptMessageContent], role: PromptMessageRole):
  799. match role:
  800. case PromptMessageRole.USER:
  801. return UserPromptMessage(content=contents)
  802. case PromptMessageRole.ASSISTANT:
  803. return AssistantPromptMessage(content=contents)
  804. case PromptMessageRole.SYSTEM:
  805. return SystemPromptMessage(content=contents)
  806. raise NotImplementedError(f"Role {role} is not supported")
  807. def _render_jinja2_message(
  808. *,
  809. template: str,
  810. jinjia2_variables: Sequence[VariableSelector],
  811. variable_pool: VariablePool,
  812. ):
  813. if not template:
  814. return ""
  815. jinjia2_inputs = {}
  816. for jinja2_variable in jinjia2_variables:
  817. variable = variable_pool.get(jinja2_variable.value_selector)
  818. jinjia2_inputs[jinja2_variable.variable] = variable.to_object() if variable else ""
  819. code_execute_resp = CodeExecutor.execute_workflow_code_template(
  820. language=CodeLanguage.JINJA2,
  821. code=template,
  822. inputs=jinjia2_inputs,
  823. )
  824. result_text = code_execute_resp["result"]
  825. return result_text
  826. def _calculate_rest_token(
  827. *, prompt_messages: list[PromptMessage], model_config: ModelConfigWithCredentialsEntity
  828. ) -> int:
  829. rest_tokens = 2000
  830. model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
  831. if model_context_tokens:
  832. model_instance = ModelInstance(
  833. provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
  834. )
  835. curr_message_tokens = model_instance.get_llm_num_tokens(prompt_messages)
  836. max_tokens = 0
  837. for parameter_rule in model_config.model_schema.parameter_rules:
  838. if parameter_rule.name == "max_tokens" or (
  839. parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
  840. ):
  841. max_tokens = (
  842. model_config.parameters.get(parameter_rule.name)
  843. or model_config.parameters.get(str(parameter_rule.use_template))
  844. or 0
  845. )
  846. rest_tokens = model_context_tokens - max_tokens - curr_message_tokens
  847. rest_tokens = max(rest_tokens, 0)
  848. return rest_tokens
  849. def _handle_memory_chat_mode(
  850. *,
  851. memory: TokenBufferMemory | None,
  852. memory_config: MemoryConfig | None,
  853. model_config: ModelConfigWithCredentialsEntity,
  854. ) -> Sequence[PromptMessage]:
  855. memory_messages: Sequence[PromptMessage] = []
  856. # Get messages from memory for chat model
  857. if memory and memory_config:
  858. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  859. memory_messages = memory.get_history_prompt_messages(
  860. max_token_limit=rest_tokens,
  861. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  862. )
  863. return memory_messages
  864. def _handle_memory_completion_mode(
  865. *,
  866. memory: TokenBufferMemory | None,
  867. memory_config: MemoryConfig | None,
  868. model_config: ModelConfigWithCredentialsEntity,
  869. ) -> str:
  870. memory_text = ""
  871. # Get history text from memory for completion model
  872. if memory and memory_config:
  873. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  874. if not memory_config.role_prefix:
  875. raise MemoryRolePrefixRequiredError("Memory role prefix is required for completion model.")
  876. memory_text = memory.get_history_prompt_text(
  877. max_token_limit=rest_tokens,
  878. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  879. human_prefix=memory_config.role_prefix.user,
  880. ai_prefix=memory_config.role_prefix.assistant,
  881. )
  882. return memory_text
  883. def _handle_completion_template(
  884. *,
  885. template: LLMNodeCompletionModelPromptTemplate,
  886. context: Optional[str],
  887. jinja2_variables: Sequence[VariableSelector],
  888. variable_pool: VariablePool,
  889. ) -> Sequence[PromptMessage]:
  890. """Handle completion template processing outside of LLMNode class.
  891. Args:
  892. template: The completion model prompt template
  893. context: Optional context string
  894. jinja2_variables: Variables for jinja2 template rendering
  895. variable_pool: Variable pool for template conversion
  896. Returns:
  897. Sequence of prompt messages
  898. """
  899. prompt_messages = []
  900. if template.edition_type == "jinja2":
  901. result_text = _render_jinja2_message(
  902. template=template.jinja2_text or "",
  903. jinjia2_variables=jinja2_variables,
  904. variable_pool=variable_pool,
  905. )
  906. else:
  907. if context:
  908. template_text = template.text.replace("{#context#}", context)
  909. else:
  910. template_text = template.text
  911. result_text = variable_pool.convert_template(template_text).text
  912. prompt_message = _combine_message_content_with_role(
  913. contents=[TextPromptMessageContent(data=result_text)], role=PromptMessageRole.USER
  914. )
  915. prompt_messages.append(prompt_message)
  916. return prompt_messages