node.py 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043
  1. import json
  2. import logging
  3. from collections.abc import Generator, Mapping, Sequence
  4. from typing import TYPE_CHECKING, Any, Optional, cast
  5. from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
  6. from core.entities.model_entities import ModelStatus
  7. from core.entities.provider_entities import QuotaUnit
  8. from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
  9. from core.file import FileType, file_manager
  10. from core.helper.code_executor import CodeExecutor, CodeLanguage
  11. from core.memory.token_buffer_memory import TokenBufferMemory
  12. from core.model_manager import ModelInstance, ModelManager
  13. from core.model_runtime.entities import (
  14. ImagePromptMessageContent,
  15. PromptMessage,
  16. PromptMessageContentType,
  17. TextPromptMessageContent,
  18. )
  19. from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
  20. from core.model_runtime.entities.message_entities import (
  21. AssistantPromptMessage,
  22. PromptMessageContent,
  23. PromptMessageRole,
  24. SystemPromptMessage,
  25. UserPromptMessage,
  26. )
  27. from core.model_runtime.entities.model_entities import ModelFeature, ModelPropertyKey, ModelType
  28. from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
  29. from core.model_runtime.utils.encoders import jsonable_encoder
  30. from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig
  31. from core.prompt.utils.prompt_message_util import PromptMessageUtil
  32. from core.variables import (
  33. ArrayAnySegment,
  34. ArrayFileSegment,
  35. ArraySegment,
  36. FileSegment,
  37. NoneSegment,
  38. ObjectSegment,
  39. StringSegment,
  40. )
  41. from core.workflow.constants import SYSTEM_VARIABLE_NODE_ID
  42. from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult
  43. from core.workflow.entities.variable_entities import VariableSelector
  44. from core.workflow.entities.variable_pool import VariablePool
  45. from core.workflow.enums import SystemVariableKey
  46. from core.workflow.graph_engine.entities.event import InNodeEvent
  47. from core.workflow.nodes.base import BaseNode
  48. from core.workflow.nodes.enums import NodeType
  49. from core.workflow.nodes.event import (
  50. ModelInvokeCompletedEvent,
  51. NodeEvent,
  52. RunCompletedEvent,
  53. RunRetrieverResourceEvent,
  54. RunStreamChunkEvent,
  55. )
  56. from core.workflow.utils.variable_template_parser import VariableTemplateParser
  57. from extensions.ext_database import db
  58. from models.model import Conversation
  59. from models.provider import Provider, ProviderType
  60. from models.workflow import WorkflowNodeExecutionStatus
  61. from .entities import (
  62. LLMNodeChatModelMessage,
  63. LLMNodeCompletionModelPromptTemplate,
  64. LLMNodeData,
  65. ModelConfig,
  66. )
  67. from .exc import (
  68. InvalidContextStructureError,
  69. InvalidVariableTypeError,
  70. LLMModeRequiredError,
  71. LLMNodeError,
  72. MemoryRolePrefixRequiredError,
  73. ModelNotExistError,
  74. NoPromptFoundError,
  75. TemplateTypeNotSupportError,
  76. VariableNotFoundError,
  77. )
  78. if TYPE_CHECKING:
  79. from core.file.models import File
  80. logger = logging.getLogger(__name__)
  81. class LLMNode(BaseNode[LLMNodeData]):
  82. _node_data_cls = LLMNodeData
  83. _node_type = NodeType.LLM
  84. def _run(self) -> Generator[NodeEvent | InNodeEvent, None, None]:
  85. node_inputs: Optional[dict[str, Any]] = None
  86. process_data = None
  87. try:
  88. # init messages template
  89. self.node_data.prompt_template = self._transform_chat_messages(self.node_data.prompt_template)
  90. # fetch variables and fetch values from variable pool
  91. inputs = self._fetch_inputs(node_data=self.node_data)
  92. # fetch jinja2 inputs
  93. jinja_inputs = self._fetch_jinja_inputs(node_data=self.node_data)
  94. # merge inputs
  95. inputs.update(jinja_inputs)
  96. node_inputs = {}
  97. # fetch files
  98. files = (
  99. self._fetch_files(selector=self.node_data.vision.configs.variable_selector)
  100. if self.node_data.vision.enabled
  101. else []
  102. )
  103. if files:
  104. node_inputs["#files#"] = [file.to_dict() for file in files]
  105. # fetch context value
  106. generator = self._fetch_context(node_data=self.node_data)
  107. context = None
  108. for event in generator:
  109. if isinstance(event, RunRetrieverResourceEvent):
  110. context = event.context
  111. yield event
  112. if context:
  113. node_inputs["#context#"] = context
  114. # fetch model config
  115. model_instance, model_config = self._fetch_model_config(self.node_data.model)
  116. # fetch memory
  117. memory = self._fetch_memory(node_data_memory=self.node_data.memory, model_instance=model_instance)
  118. query = None
  119. if self.node_data.memory:
  120. query = self.node_data.memory.query_prompt_template
  121. if not query and (
  122. query_variable := self.graph_runtime_state.variable_pool.get(
  123. (SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY)
  124. )
  125. ):
  126. query = query_variable.text
  127. prompt_messages, stop = self._fetch_prompt_messages(
  128. sys_query=query,
  129. sys_files=files,
  130. context=context,
  131. memory=memory,
  132. model_config=model_config,
  133. prompt_template=self.node_data.prompt_template,
  134. memory_config=self.node_data.memory,
  135. vision_enabled=self.node_data.vision.enabled,
  136. vision_detail=self.node_data.vision.configs.detail,
  137. variable_pool=self.graph_runtime_state.variable_pool,
  138. jinja2_variables=self.node_data.prompt_config.jinja2_variables,
  139. )
  140. process_data = {
  141. "model_mode": model_config.mode,
  142. "prompts": PromptMessageUtil.prompt_messages_to_prompt_for_saving(
  143. model_mode=model_config.mode, prompt_messages=prompt_messages
  144. ),
  145. "model_provider": model_config.provider,
  146. "model_name": model_config.model,
  147. }
  148. # handle invoke result
  149. generator = self._invoke_llm(
  150. node_data_model=self.node_data.model,
  151. model_instance=model_instance,
  152. prompt_messages=prompt_messages,
  153. stop=stop,
  154. )
  155. result_text = ""
  156. usage = LLMUsage.empty_usage()
  157. finish_reason = None
  158. for event in generator:
  159. if isinstance(event, RunStreamChunkEvent):
  160. yield event
  161. elif isinstance(event, ModelInvokeCompletedEvent):
  162. result_text = event.text
  163. usage = event.usage
  164. finish_reason = event.finish_reason
  165. # deduct quota
  166. self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
  167. break
  168. except LLMNodeError as e:
  169. yield RunCompletedEvent(
  170. run_result=NodeRunResult(
  171. status=WorkflowNodeExecutionStatus.FAILED,
  172. error=str(e),
  173. inputs=node_inputs,
  174. process_data=process_data,
  175. error_type=type(e).__name__,
  176. )
  177. )
  178. except Exception as e:
  179. yield RunCompletedEvent(
  180. run_result=NodeRunResult(
  181. status=WorkflowNodeExecutionStatus.FAILED,
  182. error=str(e),
  183. inputs=node_inputs,
  184. process_data=process_data,
  185. )
  186. )
  187. outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
  188. yield RunCompletedEvent(
  189. run_result=NodeRunResult(
  190. status=WorkflowNodeExecutionStatus.SUCCEEDED,
  191. inputs=node_inputs,
  192. process_data=process_data,
  193. outputs=outputs,
  194. metadata={
  195. NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens,
  196. NodeRunMetadataKey.TOTAL_PRICE: usage.total_price,
  197. NodeRunMetadataKey.CURRENCY: usage.currency,
  198. },
  199. llm_usage=usage,
  200. )
  201. )
  202. def _invoke_llm(
  203. self,
  204. node_data_model: ModelConfig,
  205. model_instance: ModelInstance,
  206. prompt_messages: Sequence[PromptMessage],
  207. stop: Optional[Sequence[str]] = None,
  208. ) -> Generator[NodeEvent, None, None]:
  209. db.session.close()
  210. invoke_result = model_instance.invoke_llm(
  211. prompt_messages=prompt_messages,
  212. model_parameters=node_data_model.completion_params,
  213. stop=stop,
  214. stream=True,
  215. user=self.user_id,
  216. )
  217. return self._handle_invoke_result(invoke_result=invoke_result)
  218. def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
  219. if isinstance(invoke_result, LLMResult):
  220. return
  221. model = None
  222. prompt_messages: list[PromptMessage] = []
  223. full_text = ""
  224. usage = None
  225. finish_reason = None
  226. for result in invoke_result:
  227. text = result.delta.message.content
  228. full_text += text
  229. yield RunStreamChunkEvent(chunk_content=text, from_variable_selector=[self.node_id, "text"])
  230. if not model:
  231. model = result.model
  232. if not prompt_messages:
  233. prompt_messages = result.prompt_messages
  234. if not usage and result.delta.usage:
  235. usage = result.delta.usage
  236. if not finish_reason and result.delta.finish_reason:
  237. finish_reason = result.delta.finish_reason
  238. if not usage:
  239. usage = LLMUsage.empty_usage()
  240. yield ModelInvokeCompletedEvent(text=full_text, usage=usage, finish_reason=finish_reason)
  241. def _transform_chat_messages(
  242. self, messages: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate, /
  243. ) -> Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate:
  244. if isinstance(messages, LLMNodeCompletionModelPromptTemplate):
  245. if messages.edition_type == "jinja2" and messages.jinja2_text:
  246. messages.text = messages.jinja2_text
  247. return messages
  248. for message in messages:
  249. if message.edition_type == "jinja2" and message.jinja2_text:
  250. message.text = message.jinja2_text
  251. return messages
  252. def _fetch_jinja_inputs(self, node_data: LLMNodeData) -> dict[str, str]:
  253. variables: dict[str, Any] = {}
  254. if not node_data.prompt_config:
  255. return variables
  256. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  257. variable_name = variable_selector.variable
  258. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  259. if variable is None:
  260. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  261. def parse_dict(input_dict: Mapping[str, Any]) -> str:
  262. """
  263. Parse dict into string
  264. """
  265. # check if it's a context structure
  266. if "metadata" in input_dict and "_source" in input_dict["metadata"] and "content" in input_dict:
  267. return str(input_dict["content"])
  268. # else, parse the dict
  269. try:
  270. return json.dumps(input_dict, ensure_ascii=False)
  271. except Exception:
  272. return str(input_dict)
  273. if isinstance(variable, ArraySegment):
  274. result = ""
  275. for item in variable.value:
  276. if isinstance(item, dict):
  277. result += parse_dict(item)
  278. else:
  279. result += str(item)
  280. result += "\n"
  281. value = result.strip()
  282. elif isinstance(variable, ObjectSegment):
  283. value = parse_dict(variable.value)
  284. else:
  285. value = variable.text
  286. variables[variable_name] = value
  287. return variables
  288. def _fetch_inputs(self, node_data: LLMNodeData) -> dict[str, Any]:
  289. inputs = {}
  290. prompt_template = node_data.prompt_template
  291. variable_selectors = []
  292. if isinstance(prompt_template, list):
  293. for prompt in prompt_template:
  294. variable_template_parser = VariableTemplateParser(template=prompt.text)
  295. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  296. elif isinstance(prompt_template, CompletionModelPromptTemplate):
  297. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  298. variable_selectors = variable_template_parser.extract_variable_selectors()
  299. for variable_selector in variable_selectors:
  300. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  301. if variable is None:
  302. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  303. if isinstance(variable, NoneSegment):
  304. inputs[variable_selector.variable] = ""
  305. inputs[variable_selector.variable] = variable.to_object()
  306. memory = node_data.memory
  307. if memory and memory.query_prompt_template:
  308. query_variable_selectors = VariableTemplateParser(
  309. template=memory.query_prompt_template
  310. ).extract_variable_selectors()
  311. for variable_selector in query_variable_selectors:
  312. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  313. if variable is None:
  314. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  315. if isinstance(variable, NoneSegment):
  316. continue
  317. inputs[variable_selector.variable] = variable.to_object()
  318. return inputs
  319. def _fetch_files(self, *, selector: Sequence[str]) -> Sequence["File"]:
  320. variable = self.graph_runtime_state.variable_pool.get(selector)
  321. if variable is None:
  322. return []
  323. elif isinstance(variable, FileSegment):
  324. return [variable.value]
  325. elif isinstance(variable, ArrayFileSegment):
  326. return variable.value
  327. elif isinstance(variable, NoneSegment | ArrayAnySegment):
  328. return []
  329. raise InvalidVariableTypeError(f"Invalid variable type: {type(variable)}")
  330. def _fetch_context(self, node_data: LLMNodeData):
  331. if not node_data.context.enabled:
  332. return
  333. if not node_data.context.variable_selector:
  334. return
  335. context_value_variable = self.graph_runtime_state.variable_pool.get(node_data.context.variable_selector)
  336. if context_value_variable:
  337. if isinstance(context_value_variable, StringSegment):
  338. yield RunRetrieverResourceEvent(retriever_resources=[], context=context_value_variable.value)
  339. elif isinstance(context_value_variable, ArraySegment):
  340. context_str = ""
  341. original_retriever_resource = []
  342. for item in context_value_variable.value:
  343. if isinstance(item, str):
  344. context_str += item + "\n"
  345. else:
  346. if "content" not in item:
  347. raise InvalidContextStructureError(f"Invalid context structure: {item}")
  348. context_str += item["content"] + "\n"
  349. retriever_resource = self._convert_to_original_retriever_resource(item)
  350. if retriever_resource:
  351. original_retriever_resource.append(retriever_resource)
  352. yield RunRetrieverResourceEvent(
  353. retriever_resources=original_retriever_resource, context=context_str.strip()
  354. )
  355. def _convert_to_original_retriever_resource(self, context_dict: dict) -> Optional[dict]:
  356. if (
  357. "metadata" in context_dict
  358. and "_source" in context_dict["metadata"]
  359. and context_dict["metadata"]["_source"] == "knowledge"
  360. ):
  361. metadata = context_dict.get("metadata", {})
  362. source = {
  363. "position": metadata.get("position"),
  364. "dataset_id": metadata.get("dataset_id"),
  365. "dataset_name": metadata.get("dataset_name"),
  366. "document_id": metadata.get("document_id"),
  367. "document_name": metadata.get("document_name"),
  368. "data_source_type": metadata.get("document_data_source_type"),
  369. "segment_id": metadata.get("segment_id"),
  370. "retriever_from": metadata.get("retriever_from"),
  371. "score": metadata.get("score"),
  372. "hit_count": metadata.get("segment_hit_count"),
  373. "word_count": metadata.get("segment_word_count"),
  374. "segment_position": metadata.get("segment_position"),
  375. "index_node_hash": metadata.get("segment_index_node_hash"),
  376. "content": context_dict.get("content"),
  377. "page": metadata.get("page"),
  378. }
  379. return source
  380. return None
  381. def _fetch_model_config(
  382. self, node_data_model: ModelConfig
  383. ) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]:
  384. model_name = node_data_model.name
  385. provider_name = node_data_model.provider
  386. model_manager = ModelManager()
  387. model_instance = model_manager.get_model_instance(
  388. tenant_id=self.tenant_id, model_type=ModelType.LLM, provider=provider_name, model=model_name
  389. )
  390. provider_model_bundle = model_instance.provider_model_bundle
  391. model_type_instance = model_instance.model_type_instance
  392. model_type_instance = cast(LargeLanguageModel, model_type_instance)
  393. model_credentials = model_instance.credentials
  394. # check model
  395. provider_model = provider_model_bundle.configuration.get_provider_model(
  396. model=model_name, model_type=ModelType.LLM
  397. )
  398. if provider_model is None:
  399. raise ModelNotExistError(f"Model {model_name} not exist.")
  400. if provider_model.status == ModelStatus.NO_CONFIGURE:
  401. raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.")
  402. elif provider_model.status == ModelStatus.NO_PERMISSION:
  403. raise ModelCurrentlyNotSupportError(f"Dify Hosted OpenAI {model_name} currently not support.")
  404. elif provider_model.status == ModelStatus.QUOTA_EXCEEDED:
  405. raise QuotaExceededError(f"Model provider {provider_name} quota exceeded.")
  406. # model config
  407. completion_params = node_data_model.completion_params
  408. stop = []
  409. if "stop" in completion_params:
  410. stop = completion_params["stop"]
  411. del completion_params["stop"]
  412. # get model mode
  413. model_mode = node_data_model.mode
  414. if not model_mode:
  415. raise LLMModeRequiredError("LLM mode is required.")
  416. model_schema = model_type_instance.get_model_schema(model_name, model_credentials)
  417. if not model_schema:
  418. raise ModelNotExistError(f"Model {model_name} not exist.")
  419. return model_instance, ModelConfigWithCredentialsEntity(
  420. provider=provider_name,
  421. model=model_name,
  422. model_schema=model_schema,
  423. mode=model_mode,
  424. provider_model_bundle=provider_model_bundle,
  425. credentials=model_credentials,
  426. parameters=completion_params,
  427. stop=stop,
  428. )
  429. def _fetch_memory(
  430. self, node_data_memory: Optional[MemoryConfig], model_instance: ModelInstance
  431. ) -> Optional[TokenBufferMemory]:
  432. if not node_data_memory:
  433. return None
  434. # get conversation id
  435. conversation_id_variable = self.graph_runtime_state.variable_pool.get(
  436. ["sys", SystemVariableKey.CONVERSATION_ID.value]
  437. )
  438. if not isinstance(conversation_id_variable, StringSegment):
  439. return None
  440. conversation_id = conversation_id_variable.value
  441. # get conversation
  442. conversation = (
  443. db.session.query(Conversation)
  444. .filter(Conversation.app_id == self.app_id, Conversation.id == conversation_id)
  445. .first()
  446. )
  447. if not conversation:
  448. return None
  449. memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
  450. return memory
  451. def _fetch_prompt_messages(
  452. self,
  453. *,
  454. sys_query: str | None = None,
  455. sys_files: Sequence["File"],
  456. context: str | None = None,
  457. memory: TokenBufferMemory | None = None,
  458. model_config: ModelConfigWithCredentialsEntity,
  459. prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate,
  460. memory_config: MemoryConfig | None = None,
  461. vision_enabled: bool = False,
  462. vision_detail: ImagePromptMessageContent.DETAIL,
  463. variable_pool: VariablePool,
  464. jinja2_variables: Sequence[VariableSelector],
  465. ) -> tuple[Sequence[PromptMessage], Optional[Sequence[str]]]:
  466. # FIXME: fix the type error cause prompt_messages is type quick a few times
  467. prompt_messages: list[Any] = []
  468. if isinstance(prompt_template, list):
  469. # For chat model
  470. prompt_messages.extend(
  471. self._handle_list_messages(
  472. messages=prompt_template,
  473. context=context,
  474. jinja2_variables=jinja2_variables,
  475. variable_pool=variable_pool,
  476. vision_detail_config=vision_detail,
  477. )
  478. )
  479. # Get memory messages for chat mode
  480. memory_messages = _handle_memory_chat_mode(
  481. memory=memory,
  482. memory_config=memory_config,
  483. model_config=model_config,
  484. )
  485. # Extend prompt_messages with memory messages
  486. prompt_messages.extend(memory_messages)
  487. # Add current query to the prompt messages
  488. if sys_query:
  489. message = LLMNodeChatModelMessage(
  490. text=sys_query,
  491. role=PromptMessageRole.USER,
  492. edition_type="basic",
  493. )
  494. prompt_messages.extend(
  495. self._handle_list_messages(
  496. messages=[message],
  497. context="",
  498. jinja2_variables=[],
  499. variable_pool=variable_pool,
  500. vision_detail_config=vision_detail,
  501. )
  502. )
  503. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  504. # For completion model
  505. prompt_messages.extend(
  506. _handle_completion_template(
  507. template=prompt_template,
  508. context=context,
  509. jinja2_variables=jinja2_variables,
  510. variable_pool=variable_pool,
  511. )
  512. )
  513. # Get memory text for completion model
  514. memory_text = _handle_memory_completion_mode(
  515. memory=memory,
  516. memory_config=memory_config,
  517. model_config=model_config,
  518. )
  519. # Insert histories into the prompt
  520. prompt_content = prompt_messages[0].content
  521. # For issue #11247 - Check if prompt content is a string or a list
  522. prompt_content_type = type(prompt_content)
  523. if prompt_content_type == str:
  524. if "#histories#" in prompt_content:
  525. prompt_content = prompt_content.replace("#histories#", memory_text)
  526. else:
  527. prompt_content = memory_text + "\n" + prompt_content
  528. prompt_messages[0].content = prompt_content
  529. elif prompt_content_type == list:
  530. for content_item in prompt_content:
  531. if content_item.type == PromptMessageContentType.TEXT:
  532. if "#histories#" in content_item.data:
  533. content_item.data = content_item.data.replace("#histories#", memory_text)
  534. else:
  535. content_item.data = memory_text + "\n" + content_item.data
  536. else:
  537. raise ValueError("Invalid prompt content type")
  538. # Add current query to the prompt message
  539. if sys_query:
  540. if prompt_content_type == str:
  541. prompt_content = prompt_messages[0].content.replace("#sys.query#", sys_query)
  542. prompt_messages[0].content = prompt_content
  543. elif prompt_content_type == list:
  544. for content_item in prompt_content:
  545. if content_item.type == PromptMessageContentType.TEXT:
  546. content_item.data = sys_query + "\n" + content_item.data
  547. else:
  548. raise ValueError("Invalid prompt content type")
  549. else:
  550. raise TemplateTypeNotSupportError(type_name=str(type(prompt_template)))
  551. # The sys_files will be deprecated later
  552. if vision_enabled and sys_files:
  553. file_prompts = []
  554. for file in sys_files:
  555. file_prompt = file_manager.to_prompt_message_content(file, image_detail_config=vision_detail)
  556. file_prompts.append(file_prompt)
  557. # If last prompt is a user prompt, add files into its contents,
  558. # otherwise append a new user prompt
  559. if (
  560. len(prompt_messages) > 0
  561. and isinstance(prompt_messages[-1], UserPromptMessage)
  562. and isinstance(prompt_messages[-1].content, list)
  563. ):
  564. prompt_messages[-1] = UserPromptMessage(content=prompt_messages[-1].content + file_prompts)
  565. else:
  566. prompt_messages.append(UserPromptMessage(content=file_prompts))
  567. # Remove empty messages and filter unsupported content
  568. filtered_prompt_messages = []
  569. for prompt_message in prompt_messages:
  570. if isinstance(prompt_message.content, list):
  571. prompt_message_content = []
  572. for content_item in prompt_message.content:
  573. # Skip content if features are not defined
  574. if not model_config.model_schema.features:
  575. if content_item.type != PromptMessageContentType.TEXT:
  576. continue
  577. prompt_message_content.append(content_item)
  578. continue
  579. # Skip content if corresponding feature is not supported
  580. if (
  581. (
  582. content_item.type == PromptMessageContentType.IMAGE
  583. and ModelFeature.VISION not in model_config.model_schema.features
  584. )
  585. or (
  586. content_item.type == PromptMessageContentType.DOCUMENT
  587. and ModelFeature.DOCUMENT not in model_config.model_schema.features
  588. )
  589. or (
  590. content_item.type == PromptMessageContentType.VIDEO
  591. and ModelFeature.VIDEO not in model_config.model_schema.features
  592. )
  593. or (
  594. content_item.type == PromptMessageContentType.AUDIO
  595. and ModelFeature.AUDIO not in model_config.model_schema.features
  596. )
  597. ):
  598. continue
  599. prompt_message_content.append(content_item)
  600. if len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT:
  601. prompt_message.content = prompt_message_content[0].data
  602. else:
  603. prompt_message.content = prompt_message_content
  604. if prompt_message.is_empty():
  605. continue
  606. filtered_prompt_messages.append(prompt_message)
  607. if len(filtered_prompt_messages) == 0:
  608. raise NoPromptFoundError(
  609. "No prompt found in the LLM configuration. "
  610. "Please ensure a prompt is properly configured before proceeding."
  611. )
  612. stop = model_config.stop
  613. return filtered_prompt_messages, stop
  614. @classmethod
  615. def deduct_llm_quota(cls, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
  616. provider_model_bundle = model_instance.provider_model_bundle
  617. provider_configuration = provider_model_bundle.configuration
  618. if provider_configuration.using_provider_type != ProviderType.SYSTEM:
  619. return
  620. system_configuration = provider_configuration.system_configuration
  621. quota_unit = None
  622. for quota_configuration in system_configuration.quota_configurations:
  623. if quota_configuration.quota_type == system_configuration.current_quota_type:
  624. quota_unit = quota_configuration.quota_unit
  625. if quota_configuration.quota_limit == -1:
  626. return
  627. break
  628. used_quota = None
  629. if quota_unit:
  630. if quota_unit == QuotaUnit.TOKENS:
  631. used_quota = usage.total_tokens
  632. elif quota_unit == QuotaUnit.CREDITS:
  633. used_quota = 1
  634. if "gpt-4" in model_instance.model:
  635. used_quota = 20
  636. else:
  637. used_quota = 1
  638. if used_quota is not None and system_configuration.current_quota_type is not None:
  639. db.session.query(Provider).filter(
  640. Provider.tenant_id == tenant_id,
  641. Provider.provider_name == model_instance.provider,
  642. Provider.provider_type == ProviderType.SYSTEM.value,
  643. Provider.quota_type == system_configuration.current_quota_type.value,
  644. Provider.quota_limit > Provider.quota_used,
  645. ).update({"quota_used": Provider.quota_used + used_quota})
  646. db.session.commit()
  647. @classmethod
  648. def _extract_variable_selector_to_variable_mapping(
  649. cls,
  650. *,
  651. graph_config: Mapping[str, Any],
  652. node_id: str,
  653. node_data: LLMNodeData,
  654. ) -> Mapping[str, Sequence[str]]:
  655. prompt_template = node_data.prompt_template
  656. variable_selectors = []
  657. if isinstance(prompt_template, list) and all(
  658. isinstance(prompt, LLMNodeChatModelMessage) for prompt in prompt_template
  659. ):
  660. for prompt in prompt_template:
  661. if prompt.edition_type != "jinja2":
  662. variable_template_parser = VariableTemplateParser(template=prompt.text)
  663. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  664. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  665. if prompt_template.edition_type != "jinja2":
  666. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  667. variable_selectors = variable_template_parser.extract_variable_selectors()
  668. else:
  669. raise InvalidVariableTypeError(f"Invalid prompt template type: {type(prompt_template)}")
  670. variable_mapping: dict[str, Any] = {}
  671. for variable_selector in variable_selectors:
  672. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  673. memory = node_data.memory
  674. if memory and memory.query_prompt_template:
  675. query_variable_selectors = VariableTemplateParser(
  676. template=memory.query_prompt_template
  677. ).extract_variable_selectors()
  678. for variable_selector in query_variable_selectors:
  679. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  680. if node_data.context.enabled:
  681. variable_mapping["#context#"] = node_data.context.variable_selector
  682. if node_data.vision.enabled:
  683. variable_mapping["#files#"] = ["sys", SystemVariableKey.FILES.value]
  684. if node_data.memory:
  685. variable_mapping["#sys.query#"] = ["sys", SystemVariableKey.QUERY.value]
  686. if node_data.prompt_config:
  687. enable_jinja = False
  688. if isinstance(prompt_template, list):
  689. for prompt in prompt_template:
  690. if prompt.edition_type == "jinja2":
  691. enable_jinja = True
  692. break
  693. else:
  694. if prompt_template.edition_type == "jinja2":
  695. enable_jinja = True
  696. if enable_jinja:
  697. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  698. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  699. variable_mapping = {node_id + "." + key: value for key, value in variable_mapping.items()}
  700. return variable_mapping
  701. @classmethod
  702. def get_default_config(cls, filters: Optional[dict] = None) -> dict:
  703. return {
  704. "type": "llm",
  705. "config": {
  706. "prompt_templates": {
  707. "chat_model": {
  708. "prompts": [
  709. {"role": "system", "text": "You are a helpful AI assistant.", "edition_type": "basic"}
  710. ]
  711. },
  712. "completion_model": {
  713. "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
  714. "prompt": {
  715. "text": "Here are the chat histories between human and assistant, inside "
  716. "<histories></histories> XML tags.\n\n<histories>\n{{"
  717. "#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
  718. "edition_type": "basic",
  719. },
  720. "stop": ["Human:"],
  721. },
  722. }
  723. },
  724. }
  725. def _handle_list_messages(
  726. self,
  727. *,
  728. messages: Sequence[LLMNodeChatModelMessage],
  729. context: Optional[str],
  730. jinja2_variables: Sequence[VariableSelector],
  731. variable_pool: VariablePool,
  732. vision_detail_config: ImagePromptMessageContent.DETAIL,
  733. ) -> Sequence[PromptMessage]:
  734. prompt_messages: list[PromptMessage] = []
  735. for message in messages:
  736. if message.edition_type == "jinja2":
  737. result_text = _render_jinja2_message(
  738. template=message.jinja2_text or "",
  739. jinjia2_variables=jinja2_variables,
  740. variable_pool=variable_pool,
  741. )
  742. prompt_message = _combine_message_content_with_role(
  743. contents=[TextPromptMessageContent(data=result_text)], role=message.role
  744. )
  745. prompt_messages.append(prompt_message)
  746. else:
  747. # Get segment group from basic message
  748. if context:
  749. template = message.text.replace("{#context#}", context)
  750. else:
  751. template = message.text
  752. segment_group = variable_pool.convert_template(template)
  753. # Process segments for images
  754. file_contents = []
  755. for segment in segment_group.value:
  756. if isinstance(segment, ArrayFileSegment):
  757. for file in segment.value:
  758. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  759. file_content = file_manager.to_prompt_message_content(
  760. file, image_detail_config=vision_detail_config
  761. )
  762. file_contents.append(file_content)
  763. elif isinstance(segment, FileSegment):
  764. file = segment.value
  765. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  766. file_content = file_manager.to_prompt_message_content(
  767. file, image_detail_config=vision_detail_config
  768. )
  769. file_contents.append(file_content)
  770. # Create message with text from all segments
  771. plain_text = segment_group.text
  772. if plain_text:
  773. prompt_message = _combine_message_content_with_role(
  774. contents=[TextPromptMessageContent(data=plain_text)], role=message.role
  775. )
  776. prompt_messages.append(prompt_message)
  777. if file_contents:
  778. # Create message with image contents
  779. prompt_message = _combine_message_content_with_role(contents=file_contents, role=message.role)
  780. prompt_messages.append(prompt_message)
  781. return prompt_messages
  782. def _combine_message_content_with_role(*, contents: Sequence[PromptMessageContent], role: PromptMessageRole):
  783. match role:
  784. case PromptMessageRole.USER:
  785. return UserPromptMessage(content=contents)
  786. case PromptMessageRole.ASSISTANT:
  787. return AssistantPromptMessage(content=contents)
  788. case PromptMessageRole.SYSTEM:
  789. return SystemPromptMessage(content=contents)
  790. raise NotImplementedError(f"Role {role} is not supported")
  791. def _render_jinja2_message(
  792. *,
  793. template: str,
  794. jinjia2_variables: Sequence[VariableSelector],
  795. variable_pool: VariablePool,
  796. ):
  797. if not template:
  798. return ""
  799. jinjia2_inputs = {}
  800. for jinja2_variable in jinjia2_variables:
  801. variable = variable_pool.get(jinja2_variable.value_selector)
  802. jinjia2_inputs[jinja2_variable.variable] = variable.to_object() if variable else ""
  803. code_execute_resp = CodeExecutor.execute_workflow_code_template(
  804. language=CodeLanguage.JINJA2,
  805. code=template,
  806. inputs=jinjia2_inputs,
  807. )
  808. result_text = code_execute_resp["result"]
  809. return result_text
  810. def _calculate_rest_token(
  811. *, prompt_messages: list[PromptMessage], model_config: ModelConfigWithCredentialsEntity
  812. ) -> int:
  813. rest_tokens = 2000
  814. model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
  815. if model_context_tokens:
  816. model_instance = ModelInstance(
  817. provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
  818. )
  819. curr_message_tokens = model_instance.get_llm_num_tokens(prompt_messages)
  820. max_tokens = 0
  821. for parameter_rule in model_config.model_schema.parameter_rules:
  822. if parameter_rule.name == "max_tokens" or (
  823. parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
  824. ):
  825. max_tokens = (
  826. model_config.parameters.get(parameter_rule.name)
  827. or model_config.parameters.get(str(parameter_rule.use_template))
  828. or 0
  829. )
  830. rest_tokens = model_context_tokens - max_tokens - curr_message_tokens
  831. rest_tokens = max(rest_tokens, 0)
  832. return rest_tokens
  833. def _handle_memory_chat_mode(
  834. *,
  835. memory: TokenBufferMemory | None,
  836. memory_config: MemoryConfig | None,
  837. model_config: ModelConfigWithCredentialsEntity,
  838. ) -> Sequence[PromptMessage]:
  839. memory_messages: Sequence[PromptMessage] = []
  840. # Get messages from memory for chat model
  841. if memory and memory_config:
  842. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  843. memory_messages = memory.get_history_prompt_messages(
  844. max_token_limit=rest_tokens,
  845. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  846. )
  847. return memory_messages
  848. def _handle_memory_completion_mode(
  849. *,
  850. memory: TokenBufferMemory | None,
  851. memory_config: MemoryConfig | None,
  852. model_config: ModelConfigWithCredentialsEntity,
  853. ) -> str:
  854. memory_text = ""
  855. # Get history text from memory for completion model
  856. if memory and memory_config:
  857. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  858. if not memory_config.role_prefix:
  859. raise MemoryRolePrefixRequiredError("Memory role prefix is required for completion model.")
  860. memory_text = memory.get_history_prompt_text(
  861. max_token_limit=rest_tokens,
  862. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  863. human_prefix=memory_config.role_prefix.user,
  864. ai_prefix=memory_config.role_prefix.assistant,
  865. )
  866. return memory_text
  867. def _handle_completion_template(
  868. *,
  869. template: LLMNodeCompletionModelPromptTemplate,
  870. context: Optional[str],
  871. jinja2_variables: Sequence[VariableSelector],
  872. variable_pool: VariablePool,
  873. ) -> Sequence[PromptMessage]:
  874. """Handle completion template processing outside of LLMNode class.
  875. Args:
  876. template: The completion model prompt template
  877. context: Optional context string
  878. jinja2_variables: Variables for jinja2 template rendering
  879. variable_pool: Variable pool for template conversion
  880. Returns:
  881. Sequence of prompt messages
  882. """
  883. prompt_messages = []
  884. if template.edition_type == "jinja2":
  885. result_text = _render_jinja2_message(
  886. template=template.jinja2_text or "",
  887. jinjia2_variables=jinja2_variables,
  888. variable_pool=variable_pool,
  889. )
  890. else:
  891. if context:
  892. template_text = template.text.replace("{#context#}", context)
  893. else:
  894. template_text = template.text
  895. result_text = variable_pool.convert_template(template_text).text
  896. prompt_message = _combine_message_content_with_role(
  897. contents=[TextPromptMessageContent(data=result_text)], role=PromptMessageRole.USER
  898. )
  899. prompt_messages.append(prompt_message)
  900. return prompt_messages