node.py 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019
  1. import json
  2. import logging
  3. from collections.abc import Generator, Mapping, Sequence
  4. from typing import TYPE_CHECKING, Any, Optional, cast
  5. from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
  6. from core.entities.model_entities import ModelStatus
  7. from core.entities.provider_entities import QuotaUnit
  8. from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
  9. from core.file import FileType, file_manager
  10. from core.helper.code_executor import CodeExecutor, CodeLanguage
  11. from core.memory.token_buffer_memory import TokenBufferMemory
  12. from core.model_manager import ModelInstance, ModelManager
  13. from core.model_runtime.entities import (
  14. ImagePromptMessageContent,
  15. PromptMessage,
  16. PromptMessageContentType,
  17. TextPromptMessageContent,
  18. )
  19. from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
  20. from core.model_runtime.entities.message_entities import (
  21. AssistantPromptMessage,
  22. PromptMessageRole,
  23. SystemPromptMessage,
  24. UserPromptMessage,
  25. )
  26. from core.model_runtime.entities.model_entities import ModelFeature, ModelPropertyKey, ModelType
  27. from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
  28. from core.model_runtime.utils.encoders import jsonable_encoder
  29. from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig
  30. from core.prompt.utils.prompt_message_util import PromptMessageUtil
  31. from core.variables import (
  32. ArrayAnySegment,
  33. ArrayFileSegment,
  34. ArraySegment,
  35. FileSegment,
  36. NoneSegment,
  37. ObjectSegment,
  38. StringSegment,
  39. )
  40. from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult
  41. from core.workflow.entities.variable_entities import VariableSelector
  42. from core.workflow.entities.variable_pool import VariablePool
  43. from core.workflow.enums import SystemVariableKey
  44. from core.workflow.graph_engine.entities.event import InNodeEvent
  45. from core.workflow.nodes.base import BaseNode
  46. from core.workflow.nodes.enums import NodeType
  47. from core.workflow.nodes.event import (
  48. ModelInvokeCompletedEvent,
  49. NodeEvent,
  50. RunCompletedEvent,
  51. RunRetrieverResourceEvent,
  52. RunStreamChunkEvent,
  53. )
  54. from core.workflow.utils.variable_template_parser import VariableTemplateParser
  55. from extensions.ext_database import db
  56. from models.model import Conversation
  57. from models.provider import Provider, ProviderType
  58. from models.workflow import WorkflowNodeExecutionStatus
  59. from .entities import (
  60. LLMNodeChatModelMessage,
  61. LLMNodeCompletionModelPromptTemplate,
  62. LLMNodeData,
  63. ModelConfig,
  64. )
  65. from .exc import (
  66. FileTypeNotSupportError,
  67. InvalidContextStructureError,
  68. InvalidVariableTypeError,
  69. LLMModeRequiredError,
  70. LLMNodeError,
  71. MemoryRolePrefixRequiredError,
  72. ModelNotExistError,
  73. NoPromptFoundError,
  74. TemplateTypeNotSupportError,
  75. VariableNotFoundError,
  76. )
  77. if TYPE_CHECKING:
  78. from core.file.models import File
  79. logger = logging.getLogger(__name__)
  80. class LLMNode(BaseNode[LLMNodeData]):
  81. _node_data_cls = LLMNodeData
  82. _node_type = NodeType.LLM
  83. def _run(self) -> NodeRunResult | Generator[NodeEvent | InNodeEvent, None, None]:
  84. node_inputs = None
  85. process_data = None
  86. try:
  87. # init messages template
  88. self.node_data.prompt_template = self._transform_chat_messages(self.node_data.prompt_template)
  89. # fetch variables and fetch values from variable pool
  90. inputs = self._fetch_inputs(node_data=self.node_data)
  91. # fetch jinja2 inputs
  92. jinja_inputs = self._fetch_jinja_inputs(node_data=self.node_data)
  93. # merge inputs
  94. inputs.update(jinja_inputs)
  95. node_inputs = {}
  96. # fetch files
  97. files = (
  98. self._fetch_files(selector=self.node_data.vision.configs.variable_selector)
  99. if self.node_data.vision.enabled
  100. else []
  101. )
  102. if files:
  103. node_inputs["#files#"] = [file.to_dict() for file in files]
  104. # fetch context value
  105. generator = self._fetch_context(node_data=self.node_data)
  106. context = None
  107. for event in generator:
  108. if isinstance(event, RunRetrieverResourceEvent):
  109. context = event.context
  110. yield event
  111. if context:
  112. node_inputs["#context#"] = context
  113. # fetch model config
  114. model_instance, model_config = self._fetch_model_config(self.node_data.model)
  115. # fetch memory
  116. memory = self._fetch_memory(node_data_memory=self.node_data.memory, model_instance=model_instance)
  117. # fetch prompt messages
  118. if self.node_data.memory:
  119. query = self.node_data.memory.query_prompt_template
  120. else:
  121. query = None
  122. prompt_messages, stop = self._fetch_prompt_messages(
  123. user_query=query,
  124. user_files=files,
  125. context=context,
  126. memory=memory,
  127. model_config=model_config,
  128. prompt_template=self.node_data.prompt_template,
  129. memory_config=self.node_data.memory,
  130. vision_enabled=self.node_data.vision.enabled,
  131. vision_detail=self.node_data.vision.configs.detail,
  132. variable_pool=self.graph_runtime_state.variable_pool,
  133. jinja2_variables=self.node_data.prompt_config.jinja2_variables,
  134. )
  135. process_data = {
  136. "model_mode": model_config.mode,
  137. "prompts": PromptMessageUtil.prompt_messages_to_prompt_for_saving(
  138. model_mode=model_config.mode, prompt_messages=prompt_messages
  139. ),
  140. "model_provider": model_config.provider,
  141. "model_name": model_config.model,
  142. }
  143. # handle invoke result
  144. generator = self._invoke_llm(
  145. node_data_model=self.node_data.model,
  146. model_instance=model_instance,
  147. prompt_messages=prompt_messages,
  148. stop=stop,
  149. )
  150. result_text = ""
  151. usage = LLMUsage.empty_usage()
  152. finish_reason = None
  153. for event in generator:
  154. if isinstance(event, RunStreamChunkEvent):
  155. yield event
  156. elif isinstance(event, ModelInvokeCompletedEvent):
  157. result_text = event.text
  158. usage = event.usage
  159. finish_reason = event.finish_reason
  160. break
  161. except LLMNodeError as e:
  162. yield RunCompletedEvent(
  163. run_result=NodeRunResult(
  164. status=WorkflowNodeExecutionStatus.FAILED,
  165. error=str(e),
  166. inputs=node_inputs,
  167. process_data=process_data,
  168. )
  169. )
  170. return
  171. except Exception as e:
  172. logger.exception(f"Node {self.node_id} failed to run")
  173. yield RunCompletedEvent(
  174. run_result=NodeRunResult(
  175. status=WorkflowNodeExecutionStatus.FAILED,
  176. error=str(e),
  177. inputs=node_inputs,
  178. process_data=process_data,
  179. )
  180. )
  181. return
  182. outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
  183. yield RunCompletedEvent(
  184. run_result=NodeRunResult(
  185. status=WorkflowNodeExecutionStatus.SUCCEEDED,
  186. inputs=node_inputs,
  187. process_data=process_data,
  188. outputs=outputs,
  189. metadata={
  190. NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens,
  191. NodeRunMetadataKey.TOTAL_PRICE: usage.total_price,
  192. NodeRunMetadataKey.CURRENCY: usage.currency,
  193. },
  194. llm_usage=usage,
  195. )
  196. )
  197. def _invoke_llm(
  198. self,
  199. node_data_model: ModelConfig,
  200. model_instance: ModelInstance,
  201. prompt_messages: Sequence[PromptMessage],
  202. stop: Optional[Sequence[str]] = None,
  203. ) -> Generator[NodeEvent, None, None]:
  204. db.session.close()
  205. invoke_result = model_instance.invoke_llm(
  206. prompt_messages=prompt_messages,
  207. model_parameters=node_data_model.completion_params,
  208. stop=stop,
  209. stream=True,
  210. user=self.user_id,
  211. )
  212. # handle invoke result
  213. generator = self._handle_invoke_result(invoke_result=invoke_result)
  214. usage = LLMUsage.empty_usage()
  215. for event in generator:
  216. yield event
  217. if isinstance(event, ModelInvokeCompletedEvent):
  218. usage = event.usage
  219. # deduct quota
  220. self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
  221. def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
  222. if isinstance(invoke_result, LLMResult):
  223. return
  224. model = None
  225. prompt_messages: list[PromptMessage] = []
  226. full_text = ""
  227. usage = None
  228. finish_reason = None
  229. for result in invoke_result:
  230. text = result.delta.message.content
  231. full_text += text
  232. yield RunStreamChunkEvent(chunk_content=text, from_variable_selector=[self.node_id, "text"])
  233. if not model:
  234. model = result.model
  235. if not prompt_messages:
  236. prompt_messages = result.prompt_messages
  237. if not usage and result.delta.usage:
  238. usage = result.delta.usage
  239. if not finish_reason and result.delta.finish_reason:
  240. finish_reason = result.delta.finish_reason
  241. if not usage:
  242. usage = LLMUsage.empty_usage()
  243. yield ModelInvokeCompletedEvent(text=full_text, usage=usage, finish_reason=finish_reason)
  244. def _transform_chat_messages(
  245. self, messages: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate, /
  246. ) -> Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate:
  247. if isinstance(messages, LLMNodeCompletionModelPromptTemplate):
  248. if messages.edition_type == "jinja2" and messages.jinja2_text:
  249. messages.text = messages.jinja2_text
  250. return messages
  251. for message in messages:
  252. if message.edition_type == "jinja2" and message.jinja2_text:
  253. message.text = message.jinja2_text
  254. return messages
  255. def _fetch_jinja_inputs(self, node_data: LLMNodeData) -> dict[str, str]:
  256. variables = {}
  257. if not node_data.prompt_config:
  258. return variables
  259. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  260. variable_name = variable_selector.variable
  261. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  262. if variable is None:
  263. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  264. def parse_dict(input_dict: Mapping[str, Any]) -> str:
  265. """
  266. Parse dict into string
  267. """
  268. # check if it's a context structure
  269. if "metadata" in input_dict and "_source" in input_dict["metadata"] and "content" in input_dict:
  270. return input_dict["content"]
  271. # else, parse the dict
  272. try:
  273. return json.dumps(input_dict, ensure_ascii=False)
  274. except Exception:
  275. return str(input_dict)
  276. if isinstance(variable, ArraySegment):
  277. result = ""
  278. for item in variable.value:
  279. if isinstance(item, dict):
  280. result += parse_dict(item)
  281. else:
  282. result += str(item)
  283. result += "\n"
  284. value = result.strip()
  285. elif isinstance(variable, ObjectSegment):
  286. value = parse_dict(variable.value)
  287. else:
  288. value = variable.text
  289. variables[variable_name] = value
  290. return variables
  291. def _fetch_inputs(self, node_data: LLMNodeData) -> dict[str, Any]:
  292. inputs = {}
  293. prompt_template = node_data.prompt_template
  294. variable_selectors = []
  295. if isinstance(prompt_template, list):
  296. for prompt in prompt_template:
  297. variable_template_parser = VariableTemplateParser(template=prompt.text)
  298. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  299. elif isinstance(prompt_template, CompletionModelPromptTemplate):
  300. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  301. variable_selectors = variable_template_parser.extract_variable_selectors()
  302. for variable_selector in variable_selectors:
  303. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  304. if variable is None:
  305. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  306. if isinstance(variable, NoneSegment):
  307. inputs[variable_selector.variable] = ""
  308. inputs[variable_selector.variable] = variable.to_object()
  309. memory = node_data.memory
  310. if memory and memory.query_prompt_template:
  311. query_variable_selectors = VariableTemplateParser(
  312. template=memory.query_prompt_template
  313. ).extract_variable_selectors()
  314. for variable_selector in query_variable_selectors:
  315. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  316. if variable is None:
  317. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  318. if isinstance(variable, NoneSegment):
  319. continue
  320. inputs[variable_selector.variable] = variable.to_object()
  321. return inputs
  322. def _fetch_files(self, *, selector: Sequence[str]) -> Sequence["File"]:
  323. variable = self.graph_runtime_state.variable_pool.get(selector)
  324. if variable is None:
  325. return []
  326. elif isinstance(variable, FileSegment):
  327. return [variable.value]
  328. elif isinstance(variable, ArrayFileSegment):
  329. return variable.value
  330. elif isinstance(variable, NoneSegment | ArrayAnySegment):
  331. return []
  332. raise InvalidVariableTypeError(f"Invalid variable type: {type(variable)}")
  333. def _fetch_context(self, node_data: LLMNodeData):
  334. if not node_data.context.enabled:
  335. return
  336. if not node_data.context.variable_selector:
  337. return
  338. context_value_variable = self.graph_runtime_state.variable_pool.get(node_data.context.variable_selector)
  339. if context_value_variable:
  340. if isinstance(context_value_variable, StringSegment):
  341. yield RunRetrieverResourceEvent(retriever_resources=[], context=context_value_variable.value)
  342. elif isinstance(context_value_variable, ArraySegment):
  343. context_str = ""
  344. original_retriever_resource = []
  345. for item in context_value_variable.value:
  346. if isinstance(item, str):
  347. context_str += item + "\n"
  348. else:
  349. if "content" not in item:
  350. raise InvalidContextStructureError(f"Invalid context structure: {item}")
  351. context_str += item["content"] + "\n"
  352. retriever_resource = self._convert_to_original_retriever_resource(item)
  353. if retriever_resource:
  354. original_retriever_resource.append(retriever_resource)
  355. yield RunRetrieverResourceEvent(
  356. retriever_resources=original_retriever_resource, context=context_str.strip()
  357. )
  358. def _convert_to_original_retriever_resource(self, context_dict: dict) -> Optional[dict]:
  359. if (
  360. "metadata" in context_dict
  361. and "_source" in context_dict["metadata"]
  362. and context_dict["metadata"]["_source"] == "knowledge"
  363. ):
  364. metadata = context_dict.get("metadata", {})
  365. source = {
  366. "position": metadata.get("position"),
  367. "dataset_id": metadata.get("dataset_id"),
  368. "dataset_name": metadata.get("dataset_name"),
  369. "document_id": metadata.get("document_id"),
  370. "document_name": metadata.get("document_name"),
  371. "data_source_type": metadata.get("document_data_source_type"),
  372. "segment_id": metadata.get("segment_id"),
  373. "retriever_from": metadata.get("retriever_from"),
  374. "score": metadata.get("score"),
  375. "hit_count": metadata.get("segment_hit_count"),
  376. "word_count": metadata.get("segment_word_count"),
  377. "segment_position": metadata.get("segment_position"),
  378. "index_node_hash": metadata.get("segment_index_node_hash"),
  379. "content": context_dict.get("content"),
  380. "page": metadata.get("page"),
  381. }
  382. return source
  383. return None
  384. def _fetch_model_config(
  385. self, node_data_model: ModelConfig
  386. ) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]:
  387. model_name = node_data_model.name
  388. provider_name = node_data_model.provider
  389. model_manager = ModelManager()
  390. model_instance = model_manager.get_model_instance(
  391. tenant_id=self.tenant_id, model_type=ModelType.LLM, provider=provider_name, model=model_name
  392. )
  393. provider_model_bundle = model_instance.provider_model_bundle
  394. model_type_instance = model_instance.model_type_instance
  395. model_type_instance = cast(LargeLanguageModel, model_type_instance)
  396. model_credentials = model_instance.credentials
  397. # check model
  398. provider_model = provider_model_bundle.configuration.get_provider_model(
  399. model=model_name, model_type=ModelType.LLM
  400. )
  401. if provider_model is None:
  402. raise ModelNotExistError(f"Model {model_name} not exist.")
  403. if provider_model.status == ModelStatus.NO_CONFIGURE:
  404. raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.")
  405. elif provider_model.status == ModelStatus.NO_PERMISSION:
  406. raise ModelCurrentlyNotSupportError(f"Dify Hosted OpenAI {model_name} currently not support.")
  407. elif provider_model.status == ModelStatus.QUOTA_EXCEEDED:
  408. raise QuotaExceededError(f"Model provider {provider_name} quota exceeded.")
  409. # model config
  410. completion_params = node_data_model.completion_params
  411. stop = []
  412. if "stop" in completion_params:
  413. stop = completion_params["stop"]
  414. del completion_params["stop"]
  415. # get model mode
  416. model_mode = node_data_model.mode
  417. if not model_mode:
  418. raise LLMModeRequiredError("LLM mode is required.")
  419. model_schema = model_type_instance.get_model_schema(model_name, model_credentials)
  420. if not model_schema:
  421. raise ModelNotExistError(f"Model {model_name} not exist.")
  422. return model_instance, ModelConfigWithCredentialsEntity(
  423. provider=provider_name,
  424. model=model_name,
  425. model_schema=model_schema,
  426. mode=model_mode,
  427. provider_model_bundle=provider_model_bundle,
  428. credentials=model_credentials,
  429. parameters=completion_params,
  430. stop=stop,
  431. )
  432. def _fetch_memory(
  433. self, node_data_memory: Optional[MemoryConfig], model_instance: ModelInstance
  434. ) -> Optional[TokenBufferMemory]:
  435. if not node_data_memory:
  436. return None
  437. # get conversation id
  438. conversation_id_variable = self.graph_runtime_state.variable_pool.get(
  439. ["sys", SystemVariableKey.CONVERSATION_ID.value]
  440. )
  441. if not isinstance(conversation_id_variable, StringSegment):
  442. return None
  443. conversation_id = conversation_id_variable.value
  444. # get conversation
  445. conversation = (
  446. db.session.query(Conversation)
  447. .filter(Conversation.app_id == self.app_id, Conversation.id == conversation_id)
  448. .first()
  449. )
  450. if not conversation:
  451. return None
  452. memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
  453. return memory
  454. def _fetch_prompt_messages(
  455. self,
  456. *,
  457. user_query: str | None = None,
  458. user_files: Sequence["File"],
  459. context: str | None = None,
  460. memory: TokenBufferMemory | None = None,
  461. model_config: ModelConfigWithCredentialsEntity,
  462. prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate,
  463. memory_config: MemoryConfig | None = None,
  464. vision_enabled: bool = False,
  465. vision_detail: ImagePromptMessageContent.DETAIL,
  466. variable_pool: VariablePool,
  467. jinja2_variables: Sequence[VariableSelector],
  468. ) -> tuple[Sequence[PromptMessage], Optional[Sequence[str]]]:
  469. prompt_messages = []
  470. if isinstance(prompt_template, list):
  471. # For chat model
  472. prompt_messages.extend(
  473. _handle_list_messages(
  474. messages=prompt_template,
  475. context=context,
  476. jinja2_variables=jinja2_variables,
  477. variable_pool=variable_pool,
  478. vision_detail_config=vision_detail,
  479. )
  480. )
  481. # Get memory messages for chat mode
  482. memory_messages = _handle_memory_chat_mode(
  483. memory=memory,
  484. memory_config=memory_config,
  485. model_config=model_config,
  486. )
  487. # Extend prompt_messages with memory messages
  488. prompt_messages.extend(memory_messages)
  489. # Add current query to the prompt messages
  490. if user_query:
  491. message = LLMNodeChatModelMessage(
  492. text=user_query,
  493. role=PromptMessageRole.USER,
  494. edition_type="basic",
  495. )
  496. prompt_messages.extend(
  497. _handle_list_messages(
  498. messages=[message],
  499. context="",
  500. jinja2_variables=[],
  501. variable_pool=variable_pool,
  502. vision_detail_config=vision_detail,
  503. )
  504. )
  505. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  506. # For completion model
  507. prompt_messages.extend(
  508. _handle_completion_template(
  509. template=prompt_template,
  510. context=context,
  511. jinja2_variables=jinja2_variables,
  512. variable_pool=variable_pool,
  513. )
  514. )
  515. # Get memory text for completion model
  516. memory_text = _handle_memory_completion_mode(
  517. memory=memory,
  518. memory_config=memory_config,
  519. model_config=model_config,
  520. )
  521. # Insert histories into the prompt
  522. prompt_content = prompt_messages[0].content
  523. if "#histories#" in prompt_content:
  524. prompt_content = prompt_content.replace("#histories#", memory_text)
  525. else:
  526. prompt_content = memory_text + "\n" + prompt_content
  527. prompt_messages[0].content = prompt_content
  528. # Add current query to the prompt message
  529. if user_query:
  530. prompt_content = prompt_messages[0].content.replace("#sys.query#", user_query)
  531. prompt_messages[0].content = prompt_content
  532. else:
  533. raise TemplateTypeNotSupportError(type_name=str(type(prompt_template)))
  534. if vision_enabled and user_files:
  535. file_prompts = []
  536. for file in user_files:
  537. file_prompt = file_manager.to_prompt_message_content(file, image_detail_config=vision_detail)
  538. file_prompts.append(file_prompt)
  539. if (
  540. len(prompt_messages) > 0
  541. and isinstance(prompt_messages[-1], UserPromptMessage)
  542. and isinstance(prompt_messages[-1].content, list)
  543. ):
  544. prompt_messages[-1] = UserPromptMessage(content=prompt_messages[-1].content + file_prompts)
  545. else:
  546. prompt_messages.append(UserPromptMessage(content=file_prompts))
  547. # Filter prompt messages
  548. filtered_prompt_messages = []
  549. for prompt_message in prompt_messages:
  550. if isinstance(prompt_message.content, list):
  551. prompt_message_content = []
  552. for content_item in prompt_message.content:
  553. # Skip content if features are not defined
  554. if not model_config.model_schema.features:
  555. if content_item.type != PromptMessageContentType.TEXT:
  556. continue
  557. prompt_message_content.append(content_item)
  558. continue
  559. # Skip content if corresponding feature is not supported
  560. if (
  561. (
  562. content_item.type == PromptMessageContentType.IMAGE
  563. and ModelFeature.VISION not in model_config.model_schema.features
  564. )
  565. or (
  566. content_item.type == PromptMessageContentType.DOCUMENT
  567. and ModelFeature.DOCUMENT not in model_config.model_schema.features
  568. )
  569. or (
  570. content_item.type == PromptMessageContentType.VIDEO
  571. and ModelFeature.VIDEO not in model_config.model_schema.features
  572. )
  573. or (
  574. content_item.type == PromptMessageContentType.AUDIO
  575. and ModelFeature.AUDIO not in model_config.model_schema.features
  576. )
  577. ):
  578. raise FileTypeNotSupportError(type_name=content_item.type)
  579. prompt_message_content.append(content_item)
  580. if len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT:
  581. prompt_message.content = prompt_message_content[0].data
  582. else:
  583. prompt_message.content = prompt_message_content
  584. if prompt_message.is_empty():
  585. continue
  586. filtered_prompt_messages.append(prompt_message)
  587. if len(filtered_prompt_messages) == 0:
  588. raise NoPromptFoundError(
  589. "No prompt found in the LLM configuration. "
  590. "Please ensure a prompt is properly configured before proceeding."
  591. )
  592. stop = model_config.stop
  593. return filtered_prompt_messages, stop
  594. @classmethod
  595. def deduct_llm_quota(cls, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
  596. provider_model_bundle = model_instance.provider_model_bundle
  597. provider_configuration = provider_model_bundle.configuration
  598. if provider_configuration.using_provider_type != ProviderType.SYSTEM:
  599. return
  600. system_configuration = provider_configuration.system_configuration
  601. quota_unit = None
  602. for quota_configuration in system_configuration.quota_configurations:
  603. if quota_configuration.quota_type == system_configuration.current_quota_type:
  604. quota_unit = quota_configuration.quota_unit
  605. if quota_configuration.quota_limit == -1:
  606. return
  607. break
  608. used_quota = None
  609. if quota_unit:
  610. if quota_unit == QuotaUnit.TOKENS:
  611. used_quota = usage.total_tokens
  612. elif quota_unit == QuotaUnit.CREDITS:
  613. used_quota = 1
  614. if "gpt-4" in model_instance.model:
  615. used_quota = 20
  616. else:
  617. used_quota = 1
  618. if used_quota is not None and system_configuration.current_quota_type is not None:
  619. db.session.query(Provider).filter(
  620. Provider.tenant_id == tenant_id,
  621. Provider.provider_name == model_instance.provider,
  622. Provider.provider_type == ProviderType.SYSTEM.value,
  623. Provider.quota_type == system_configuration.current_quota_type.value,
  624. Provider.quota_limit > Provider.quota_used,
  625. ).update({"quota_used": Provider.quota_used + used_quota})
  626. db.session.commit()
  627. @classmethod
  628. def _extract_variable_selector_to_variable_mapping(
  629. cls,
  630. *,
  631. graph_config: Mapping[str, Any],
  632. node_id: str,
  633. node_data: LLMNodeData,
  634. ) -> Mapping[str, Sequence[str]]:
  635. prompt_template = node_data.prompt_template
  636. variable_selectors = []
  637. if isinstance(prompt_template, list) and all(
  638. isinstance(prompt, LLMNodeChatModelMessage) for prompt in prompt_template
  639. ):
  640. for prompt in prompt_template:
  641. if prompt.edition_type != "jinja2":
  642. variable_template_parser = VariableTemplateParser(template=prompt.text)
  643. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  644. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  645. if prompt_template.edition_type != "jinja2":
  646. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  647. variable_selectors = variable_template_parser.extract_variable_selectors()
  648. else:
  649. raise InvalidVariableTypeError(f"Invalid prompt template type: {type(prompt_template)}")
  650. variable_mapping = {}
  651. for variable_selector in variable_selectors:
  652. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  653. memory = node_data.memory
  654. if memory and memory.query_prompt_template:
  655. query_variable_selectors = VariableTemplateParser(
  656. template=memory.query_prompt_template
  657. ).extract_variable_selectors()
  658. for variable_selector in query_variable_selectors:
  659. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  660. if node_data.context.enabled:
  661. variable_mapping["#context#"] = node_data.context.variable_selector
  662. if node_data.vision.enabled:
  663. variable_mapping["#files#"] = ["sys", SystemVariableKey.FILES.value]
  664. if node_data.memory:
  665. variable_mapping["#sys.query#"] = ["sys", SystemVariableKey.QUERY.value]
  666. if node_data.prompt_config:
  667. enable_jinja = False
  668. if isinstance(prompt_template, list):
  669. for prompt in prompt_template:
  670. if prompt.edition_type == "jinja2":
  671. enable_jinja = True
  672. break
  673. else:
  674. if prompt_template.edition_type == "jinja2":
  675. enable_jinja = True
  676. if enable_jinja:
  677. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  678. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  679. variable_mapping = {node_id + "." + key: value for key, value in variable_mapping.items()}
  680. return variable_mapping
  681. @classmethod
  682. def get_default_config(cls, filters: Optional[dict] = None) -> dict:
  683. return {
  684. "type": "llm",
  685. "config": {
  686. "prompt_templates": {
  687. "chat_model": {
  688. "prompts": [
  689. {"role": "system", "text": "You are a helpful AI assistant.", "edition_type": "basic"}
  690. ]
  691. },
  692. "completion_model": {
  693. "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
  694. "prompt": {
  695. "text": "Here is the chat histories between human and assistant, inside "
  696. "<histories></histories> XML tags.\n\n<histories>\n{{"
  697. "#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
  698. "edition_type": "basic",
  699. },
  700. "stop": ["Human:"],
  701. },
  702. }
  703. },
  704. }
  705. def _combine_text_message_with_role(*, text: str, role: PromptMessageRole):
  706. match role:
  707. case PromptMessageRole.USER:
  708. return UserPromptMessage(content=[TextPromptMessageContent(data=text)])
  709. case PromptMessageRole.ASSISTANT:
  710. return AssistantPromptMessage(content=[TextPromptMessageContent(data=text)])
  711. case PromptMessageRole.SYSTEM:
  712. return SystemPromptMessage(content=[TextPromptMessageContent(data=text)])
  713. raise NotImplementedError(f"Role {role} is not supported")
  714. def _render_jinja2_message(
  715. *,
  716. template: str,
  717. jinjia2_variables: Sequence[VariableSelector],
  718. variable_pool: VariablePool,
  719. ):
  720. if not template:
  721. return ""
  722. jinjia2_inputs = {}
  723. for jinja2_variable in jinjia2_variables:
  724. variable = variable_pool.get(jinja2_variable.value_selector)
  725. jinjia2_inputs[jinja2_variable.variable] = variable.to_object() if variable else ""
  726. code_execute_resp = CodeExecutor.execute_workflow_code_template(
  727. language=CodeLanguage.JINJA2,
  728. code=template,
  729. inputs=jinjia2_inputs,
  730. )
  731. result_text = code_execute_resp["result"]
  732. return result_text
  733. def _handle_list_messages(
  734. *,
  735. messages: Sequence[LLMNodeChatModelMessage],
  736. context: Optional[str],
  737. jinja2_variables: Sequence[VariableSelector],
  738. variable_pool: VariablePool,
  739. vision_detail_config: ImagePromptMessageContent.DETAIL,
  740. ) -> Sequence[PromptMessage]:
  741. prompt_messages = []
  742. for message in messages:
  743. if message.edition_type == "jinja2":
  744. result_text = _render_jinja2_message(
  745. template=message.jinja2_text or "",
  746. jinjia2_variables=jinja2_variables,
  747. variable_pool=variable_pool,
  748. )
  749. prompt_message = _combine_text_message_with_role(text=result_text, role=message.role)
  750. prompt_messages.append(prompt_message)
  751. else:
  752. # Get segment group from basic message
  753. if context:
  754. template = message.text.replace("{#context#}", context)
  755. else:
  756. template = message.text
  757. segment_group = variable_pool.convert_template(template)
  758. # Process segments for images
  759. file_contents = []
  760. for segment in segment_group.value:
  761. if isinstance(segment, ArrayFileSegment):
  762. for file in segment.value:
  763. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  764. file_content = file_manager.to_prompt_message_content(
  765. file, image_detail_config=vision_detail_config
  766. )
  767. file_contents.append(file_content)
  768. if isinstance(segment, FileSegment):
  769. file = segment.value
  770. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  771. file_content = file_manager.to_prompt_message_content(
  772. file, image_detail_config=vision_detail_config
  773. )
  774. file_contents.append(file_content)
  775. # Create message with text from all segments
  776. plain_text = segment_group.text
  777. if plain_text:
  778. prompt_message = _combine_text_message_with_role(text=plain_text, role=message.role)
  779. prompt_messages.append(prompt_message)
  780. if file_contents:
  781. # Create message with image contents
  782. prompt_message = UserPromptMessage(content=file_contents)
  783. prompt_messages.append(prompt_message)
  784. return prompt_messages
  785. def _calculate_rest_token(
  786. *, prompt_messages: list[PromptMessage], model_config: ModelConfigWithCredentialsEntity
  787. ) -> int:
  788. rest_tokens = 2000
  789. model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
  790. if model_context_tokens:
  791. model_instance = ModelInstance(
  792. provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
  793. )
  794. curr_message_tokens = model_instance.get_llm_num_tokens(prompt_messages)
  795. max_tokens = 0
  796. for parameter_rule in model_config.model_schema.parameter_rules:
  797. if parameter_rule.name == "max_tokens" or (
  798. parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
  799. ):
  800. max_tokens = (
  801. model_config.parameters.get(parameter_rule.name)
  802. or model_config.parameters.get(str(parameter_rule.use_template))
  803. or 0
  804. )
  805. rest_tokens = model_context_tokens - max_tokens - curr_message_tokens
  806. rest_tokens = max(rest_tokens, 0)
  807. return rest_tokens
  808. def _handle_memory_chat_mode(
  809. *,
  810. memory: TokenBufferMemory | None,
  811. memory_config: MemoryConfig | None,
  812. model_config: ModelConfigWithCredentialsEntity,
  813. ) -> Sequence[PromptMessage]:
  814. memory_messages = []
  815. # Get messages from memory for chat model
  816. if memory and memory_config:
  817. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  818. memory_messages = memory.get_history_prompt_messages(
  819. max_token_limit=rest_tokens,
  820. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  821. )
  822. return memory_messages
  823. def _handle_memory_completion_mode(
  824. *,
  825. memory: TokenBufferMemory | None,
  826. memory_config: MemoryConfig | None,
  827. model_config: ModelConfigWithCredentialsEntity,
  828. ) -> str:
  829. memory_text = ""
  830. # Get history text from memory for completion model
  831. if memory and memory_config:
  832. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  833. if not memory_config.role_prefix:
  834. raise MemoryRolePrefixRequiredError("Memory role prefix is required for completion model.")
  835. memory_text = memory.get_history_prompt_text(
  836. max_token_limit=rest_tokens,
  837. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  838. human_prefix=memory_config.role_prefix.user,
  839. ai_prefix=memory_config.role_prefix.assistant,
  840. )
  841. return memory_text
  842. def _handle_completion_template(
  843. *,
  844. template: LLMNodeCompletionModelPromptTemplate,
  845. context: Optional[str],
  846. jinja2_variables: Sequence[VariableSelector],
  847. variable_pool: VariablePool,
  848. ) -> Sequence[PromptMessage]:
  849. """Handle completion template processing outside of LLMNode class.
  850. Args:
  851. template: The completion model prompt template
  852. context: Optional context string
  853. jinja2_variables: Variables for jinja2 template rendering
  854. variable_pool: Variable pool for template conversion
  855. Returns:
  856. Sequence of prompt messages
  857. """
  858. prompt_messages = []
  859. if template.edition_type == "jinja2":
  860. result_text = _render_jinja2_message(
  861. template=template.jinja2_text or "",
  862. jinjia2_variables=jinja2_variables,
  863. variable_pool=variable_pool,
  864. )
  865. else:
  866. if context:
  867. template_text = template.text.replace("{#context#}", context)
  868. else:
  869. template_text = template.text
  870. result_text = variable_pool.convert_template(template_text).text
  871. prompt_message = _combine_text_message_with_role(text=result_text, role=PromptMessageRole.USER)
  872. prompt_messages.append(prompt_message)
  873. return prompt_messages