node.py 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029
  1. import json
  2. import logging
  3. from collections.abc import Generator, Mapping, Sequence
  4. from typing import TYPE_CHECKING, Any, Optional, cast
  5. from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
  6. from core.entities.model_entities import ModelStatus
  7. from core.entities.provider_entities import QuotaUnit
  8. from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
  9. from core.file import FileType, file_manager
  10. from core.helper.code_executor import CodeExecutor, CodeLanguage
  11. from core.memory.token_buffer_memory import TokenBufferMemory
  12. from core.model_manager import ModelInstance, ModelManager
  13. from core.model_runtime.entities import (
  14. ImagePromptMessageContent,
  15. PromptMessage,
  16. PromptMessageContentType,
  17. TextPromptMessageContent,
  18. )
  19. from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
  20. from core.model_runtime.entities.message_entities import (
  21. AssistantPromptMessage,
  22. PromptMessageContent,
  23. PromptMessageRole,
  24. SystemPromptMessage,
  25. UserPromptMessage,
  26. )
  27. from core.model_runtime.entities.model_entities import ModelFeature, ModelPropertyKey, ModelType
  28. from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
  29. from core.model_runtime.utils.encoders import jsonable_encoder
  30. from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig
  31. from core.prompt.utils.prompt_message_util import PromptMessageUtil
  32. from core.variables import (
  33. ArrayAnySegment,
  34. ArrayFileSegment,
  35. ArraySegment,
  36. FileSegment,
  37. NoneSegment,
  38. ObjectSegment,
  39. StringSegment,
  40. )
  41. from core.workflow.constants import SYSTEM_VARIABLE_NODE_ID
  42. from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult
  43. from core.workflow.entities.variable_entities import VariableSelector
  44. from core.workflow.entities.variable_pool import VariablePool
  45. from core.workflow.enums import SystemVariableKey
  46. from core.workflow.graph_engine.entities.event import InNodeEvent
  47. from core.workflow.nodes.base import BaseNode
  48. from core.workflow.nodes.enums import NodeType
  49. from core.workflow.nodes.event import (
  50. ModelInvokeCompletedEvent,
  51. NodeEvent,
  52. RunCompletedEvent,
  53. RunRetrieverResourceEvent,
  54. RunStreamChunkEvent,
  55. )
  56. from core.workflow.utils.variable_template_parser import VariableTemplateParser
  57. from extensions.ext_database import db
  58. from models.model import Conversation
  59. from models.provider import Provider, ProviderType
  60. from models.workflow import WorkflowNodeExecutionStatus
  61. from .entities import (
  62. LLMNodeChatModelMessage,
  63. LLMNodeCompletionModelPromptTemplate,
  64. LLMNodeData,
  65. ModelConfig,
  66. )
  67. from .exc import (
  68. InvalidContextStructureError,
  69. InvalidVariableTypeError,
  70. LLMModeRequiredError,
  71. LLMNodeError,
  72. MemoryRolePrefixRequiredError,
  73. ModelNotExistError,
  74. NoPromptFoundError,
  75. TemplateTypeNotSupportError,
  76. VariableNotFoundError,
  77. )
  78. if TYPE_CHECKING:
  79. from core.file.models import File
  80. logger = logging.getLogger(__name__)
  81. class LLMNode(BaseNode[LLMNodeData]):
  82. _node_data_cls = LLMNodeData
  83. _node_type = NodeType.LLM
  84. def _run(self) -> NodeRunResult | Generator[NodeEvent | InNodeEvent, None, None]:
  85. node_inputs = None
  86. process_data = None
  87. try:
  88. # init messages template
  89. self.node_data.prompt_template = self._transform_chat_messages(self.node_data.prompt_template)
  90. # fetch variables and fetch values from variable pool
  91. inputs = self._fetch_inputs(node_data=self.node_data)
  92. # fetch jinja2 inputs
  93. jinja_inputs = self._fetch_jinja_inputs(node_data=self.node_data)
  94. # merge inputs
  95. inputs.update(jinja_inputs)
  96. node_inputs = {}
  97. # fetch files
  98. files = (
  99. self._fetch_files(selector=self.node_data.vision.configs.variable_selector)
  100. if self.node_data.vision.enabled
  101. else []
  102. )
  103. if files:
  104. node_inputs["#files#"] = [file.to_dict() for file in files]
  105. # fetch context value
  106. generator = self._fetch_context(node_data=self.node_data)
  107. context = None
  108. for event in generator:
  109. if isinstance(event, RunRetrieverResourceEvent):
  110. context = event.context
  111. yield event
  112. if context:
  113. node_inputs["#context#"] = context
  114. # fetch model config
  115. model_instance, model_config = self._fetch_model_config(self.node_data.model)
  116. # fetch memory
  117. memory = self._fetch_memory(node_data_memory=self.node_data.memory, model_instance=model_instance)
  118. query = None
  119. if self.node_data.memory:
  120. query = self.node_data.memory.query_prompt_template
  121. if not query and (
  122. query_variable := self.graph_runtime_state.variable_pool.get(
  123. (SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY)
  124. )
  125. ):
  126. query = query_variable.text
  127. prompt_messages, stop = self._fetch_prompt_messages(
  128. user_query=query,
  129. user_files=files,
  130. context=context,
  131. memory=memory,
  132. model_config=model_config,
  133. prompt_template=self.node_data.prompt_template,
  134. memory_config=self.node_data.memory,
  135. vision_enabled=self.node_data.vision.enabled,
  136. vision_detail=self.node_data.vision.configs.detail,
  137. variable_pool=self.graph_runtime_state.variable_pool,
  138. jinja2_variables=self.node_data.prompt_config.jinja2_variables,
  139. )
  140. process_data = {
  141. "model_mode": model_config.mode,
  142. "prompts": PromptMessageUtil.prompt_messages_to_prompt_for_saving(
  143. model_mode=model_config.mode, prompt_messages=prompt_messages
  144. ),
  145. "model_provider": model_config.provider,
  146. "model_name": model_config.model,
  147. }
  148. # handle invoke result
  149. generator = self._invoke_llm(
  150. node_data_model=self.node_data.model,
  151. model_instance=model_instance,
  152. prompt_messages=prompt_messages,
  153. stop=stop,
  154. )
  155. result_text = ""
  156. usage = LLMUsage.empty_usage()
  157. finish_reason = None
  158. for event in generator:
  159. if isinstance(event, RunStreamChunkEvent):
  160. yield event
  161. elif isinstance(event, ModelInvokeCompletedEvent):
  162. result_text = event.text
  163. usage = event.usage
  164. finish_reason = event.finish_reason
  165. break
  166. except LLMNodeError as e:
  167. yield RunCompletedEvent(
  168. run_result=NodeRunResult(
  169. status=WorkflowNodeExecutionStatus.FAILED,
  170. error=str(e),
  171. inputs=node_inputs,
  172. process_data=process_data,
  173. )
  174. )
  175. return
  176. except Exception as e:
  177. yield RunCompletedEvent(
  178. run_result=NodeRunResult(
  179. status=WorkflowNodeExecutionStatus.FAILED,
  180. error=str(e),
  181. inputs=node_inputs,
  182. process_data=process_data,
  183. )
  184. )
  185. return
  186. outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason}
  187. yield RunCompletedEvent(
  188. run_result=NodeRunResult(
  189. status=WorkflowNodeExecutionStatus.SUCCEEDED,
  190. inputs=node_inputs,
  191. process_data=process_data,
  192. outputs=outputs,
  193. metadata={
  194. NodeRunMetadataKey.TOTAL_TOKENS: usage.total_tokens,
  195. NodeRunMetadataKey.TOTAL_PRICE: usage.total_price,
  196. NodeRunMetadataKey.CURRENCY: usage.currency,
  197. },
  198. llm_usage=usage,
  199. )
  200. )
  201. def _invoke_llm(
  202. self,
  203. node_data_model: ModelConfig,
  204. model_instance: ModelInstance,
  205. prompt_messages: Sequence[PromptMessage],
  206. stop: Optional[Sequence[str]] = None,
  207. ) -> Generator[NodeEvent, None, None]:
  208. db.session.close()
  209. invoke_result = model_instance.invoke_llm(
  210. prompt_messages=prompt_messages,
  211. model_parameters=node_data_model.completion_params,
  212. stop=stop,
  213. stream=True,
  214. user=self.user_id,
  215. )
  216. # handle invoke result
  217. generator = self._handle_invoke_result(invoke_result=invoke_result)
  218. usage = LLMUsage.empty_usage()
  219. for event in generator:
  220. yield event
  221. if isinstance(event, ModelInvokeCompletedEvent):
  222. usage = event.usage
  223. # deduct quota
  224. self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
  225. def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
  226. if isinstance(invoke_result, LLMResult):
  227. return
  228. model = None
  229. prompt_messages: list[PromptMessage] = []
  230. full_text = ""
  231. usage = None
  232. finish_reason = None
  233. for result in invoke_result:
  234. text = result.delta.message.content
  235. full_text += text
  236. yield RunStreamChunkEvent(chunk_content=text, from_variable_selector=[self.node_id, "text"])
  237. if not model:
  238. model = result.model
  239. if not prompt_messages:
  240. prompt_messages = result.prompt_messages
  241. if not usage and result.delta.usage:
  242. usage = result.delta.usage
  243. if not finish_reason and result.delta.finish_reason:
  244. finish_reason = result.delta.finish_reason
  245. if not usage:
  246. usage = LLMUsage.empty_usage()
  247. yield ModelInvokeCompletedEvent(text=full_text, usage=usage, finish_reason=finish_reason)
  248. def _transform_chat_messages(
  249. self, messages: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate, /
  250. ) -> Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate:
  251. if isinstance(messages, LLMNodeCompletionModelPromptTemplate):
  252. if messages.edition_type == "jinja2" and messages.jinja2_text:
  253. messages.text = messages.jinja2_text
  254. return messages
  255. for message in messages:
  256. if message.edition_type == "jinja2" and message.jinja2_text:
  257. message.text = message.jinja2_text
  258. return messages
  259. def _fetch_jinja_inputs(self, node_data: LLMNodeData) -> dict[str, str]:
  260. variables = {}
  261. if not node_data.prompt_config:
  262. return variables
  263. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  264. variable_name = variable_selector.variable
  265. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  266. if variable is None:
  267. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  268. def parse_dict(input_dict: Mapping[str, Any]) -> str:
  269. """
  270. Parse dict into string
  271. """
  272. # check if it's a context structure
  273. if "metadata" in input_dict and "_source" in input_dict["metadata"] and "content" in input_dict:
  274. return input_dict["content"]
  275. # else, parse the dict
  276. try:
  277. return json.dumps(input_dict, ensure_ascii=False)
  278. except Exception:
  279. return str(input_dict)
  280. if isinstance(variable, ArraySegment):
  281. result = ""
  282. for item in variable.value:
  283. if isinstance(item, dict):
  284. result += parse_dict(item)
  285. else:
  286. result += str(item)
  287. result += "\n"
  288. value = result.strip()
  289. elif isinstance(variable, ObjectSegment):
  290. value = parse_dict(variable.value)
  291. else:
  292. value = variable.text
  293. variables[variable_name] = value
  294. return variables
  295. def _fetch_inputs(self, node_data: LLMNodeData) -> dict[str, Any]:
  296. inputs = {}
  297. prompt_template = node_data.prompt_template
  298. variable_selectors = []
  299. if isinstance(prompt_template, list):
  300. for prompt in prompt_template:
  301. variable_template_parser = VariableTemplateParser(template=prompt.text)
  302. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  303. elif isinstance(prompt_template, CompletionModelPromptTemplate):
  304. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  305. variable_selectors = variable_template_parser.extract_variable_selectors()
  306. for variable_selector in variable_selectors:
  307. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  308. if variable is None:
  309. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  310. if isinstance(variable, NoneSegment):
  311. inputs[variable_selector.variable] = ""
  312. inputs[variable_selector.variable] = variable.to_object()
  313. memory = node_data.memory
  314. if memory and memory.query_prompt_template:
  315. query_variable_selectors = VariableTemplateParser(
  316. template=memory.query_prompt_template
  317. ).extract_variable_selectors()
  318. for variable_selector in query_variable_selectors:
  319. variable = self.graph_runtime_state.variable_pool.get(variable_selector.value_selector)
  320. if variable is None:
  321. raise VariableNotFoundError(f"Variable {variable_selector.variable} not found")
  322. if isinstance(variable, NoneSegment):
  323. continue
  324. inputs[variable_selector.variable] = variable.to_object()
  325. return inputs
  326. def _fetch_files(self, *, selector: Sequence[str]) -> Sequence["File"]:
  327. variable = self.graph_runtime_state.variable_pool.get(selector)
  328. if variable is None:
  329. return []
  330. elif isinstance(variable, FileSegment):
  331. return [variable.value]
  332. elif isinstance(variable, ArrayFileSegment):
  333. return variable.value
  334. elif isinstance(variable, NoneSegment | ArrayAnySegment):
  335. return []
  336. raise InvalidVariableTypeError(f"Invalid variable type: {type(variable)}")
  337. def _fetch_context(self, node_data: LLMNodeData):
  338. if not node_data.context.enabled:
  339. return
  340. if not node_data.context.variable_selector:
  341. return
  342. context_value_variable = self.graph_runtime_state.variable_pool.get(node_data.context.variable_selector)
  343. if context_value_variable:
  344. if isinstance(context_value_variable, StringSegment):
  345. yield RunRetrieverResourceEvent(retriever_resources=[], context=context_value_variable.value)
  346. elif isinstance(context_value_variable, ArraySegment):
  347. context_str = ""
  348. original_retriever_resource = []
  349. for item in context_value_variable.value:
  350. if isinstance(item, str):
  351. context_str += item + "\n"
  352. else:
  353. if "content" not in item:
  354. raise InvalidContextStructureError(f"Invalid context structure: {item}")
  355. context_str += item["content"] + "\n"
  356. retriever_resource = self._convert_to_original_retriever_resource(item)
  357. if retriever_resource:
  358. original_retriever_resource.append(retriever_resource)
  359. yield RunRetrieverResourceEvent(
  360. retriever_resources=original_retriever_resource, context=context_str.strip()
  361. )
  362. def _convert_to_original_retriever_resource(self, context_dict: dict) -> Optional[dict]:
  363. if (
  364. "metadata" in context_dict
  365. and "_source" in context_dict["metadata"]
  366. and context_dict["metadata"]["_source"] == "knowledge"
  367. ):
  368. metadata = context_dict.get("metadata", {})
  369. source = {
  370. "position": metadata.get("position"),
  371. "dataset_id": metadata.get("dataset_id"),
  372. "dataset_name": metadata.get("dataset_name"),
  373. "document_id": metadata.get("document_id"),
  374. "document_name": metadata.get("document_name"),
  375. "data_source_type": metadata.get("document_data_source_type"),
  376. "segment_id": metadata.get("segment_id"),
  377. "retriever_from": metadata.get("retriever_from"),
  378. "score": metadata.get("score"),
  379. "hit_count": metadata.get("segment_hit_count"),
  380. "word_count": metadata.get("segment_word_count"),
  381. "segment_position": metadata.get("segment_position"),
  382. "index_node_hash": metadata.get("segment_index_node_hash"),
  383. "content": context_dict.get("content"),
  384. "page": metadata.get("page"),
  385. }
  386. return source
  387. return None
  388. def _fetch_model_config(
  389. self, node_data_model: ModelConfig
  390. ) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]:
  391. model_name = node_data_model.name
  392. provider_name = node_data_model.provider
  393. model_manager = ModelManager()
  394. model_instance = model_manager.get_model_instance(
  395. tenant_id=self.tenant_id, model_type=ModelType.LLM, provider=provider_name, model=model_name
  396. )
  397. provider_model_bundle = model_instance.provider_model_bundle
  398. model_type_instance = model_instance.model_type_instance
  399. model_type_instance = cast(LargeLanguageModel, model_type_instance)
  400. model_credentials = model_instance.credentials
  401. # check model
  402. provider_model = provider_model_bundle.configuration.get_provider_model(
  403. model=model_name, model_type=ModelType.LLM
  404. )
  405. if provider_model is None:
  406. raise ModelNotExistError(f"Model {model_name} not exist.")
  407. if provider_model.status == ModelStatus.NO_CONFIGURE:
  408. raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.")
  409. elif provider_model.status == ModelStatus.NO_PERMISSION:
  410. raise ModelCurrentlyNotSupportError(f"Dify Hosted OpenAI {model_name} currently not support.")
  411. elif provider_model.status == ModelStatus.QUOTA_EXCEEDED:
  412. raise QuotaExceededError(f"Model provider {provider_name} quota exceeded.")
  413. # model config
  414. completion_params = node_data_model.completion_params
  415. stop = []
  416. if "stop" in completion_params:
  417. stop = completion_params["stop"]
  418. del completion_params["stop"]
  419. # get model mode
  420. model_mode = node_data_model.mode
  421. if not model_mode:
  422. raise LLMModeRequiredError("LLM mode is required.")
  423. model_schema = model_type_instance.get_model_schema(model_name, model_credentials)
  424. if not model_schema:
  425. raise ModelNotExistError(f"Model {model_name} not exist.")
  426. return model_instance, ModelConfigWithCredentialsEntity(
  427. provider=provider_name,
  428. model=model_name,
  429. model_schema=model_schema,
  430. mode=model_mode,
  431. provider_model_bundle=provider_model_bundle,
  432. credentials=model_credentials,
  433. parameters=completion_params,
  434. stop=stop,
  435. )
  436. def _fetch_memory(
  437. self, node_data_memory: Optional[MemoryConfig], model_instance: ModelInstance
  438. ) -> Optional[TokenBufferMemory]:
  439. if not node_data_memory:
  440. return None
  441. # get conversation id
  442. conversation_id_variable = self.graph_runtime_state.variable_pool.get(
  443. ["sys", SystemVariableKey.CONVERSATION_ID.value]
  444. )
  445. if not isinstance(conversation_id_variable, StringSegment):
  446. return None
  447. conversation_id = conversation_id_variable.value
  448. # get conversation
  449. conversation = (
  450. db.session.query(Conversation)
  451. .filter(Conversation.app_id == self.app_id, Conversation.id == conversation_id)
  452. .first()
  453. )
  454. if not conversation:
  455. return None
  456. memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
  457. return memory
  458. def _fetch_prompt_messages(
  459. self,
  460. *,
  461. user_query: str | None = None,
  462. user_files: Sequence["File"],
  463. context: str | None = None,
  464. memory: TokenBufferMemory | None = None,
  465. model_config: ModelConfigWithCredentialsEntity,
  466. prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate,
  467. memory_config: MemoryConfig | None = None,
  468. vision_enabled: bool = False,
  469. vision_detail: ImagePromptMessageContent.DETAIL,
  470. variable_pool: VariablePool,
  471. jinja2_variables: Sequence[VariableSelector],
  472. ) -> tuple[Sequence[PromptMessage], Optional[Sequence[str]]]:
  473. prompt_messages = []
  474. if isinstance(prompt_template, list):
  475. # For chat model
  476. prompt_messages.extend(
  477. _handle_list_messages(
  478. messages=prompt_template,
  479. context=context,
  480. jinja2_variables=jinja2_variables,
  481. variable_pool=variable_pool,
  482. vision_detail_config=vision_detail,
  483. )
  484. )
  485. # Get memory messages for chat mode
  486. memory_messages = _handle_memory_chat_mode(
  487. memory=memory,
  488. memory_config=memory_config,
  489. model_config=model_config,
  490. )
  491. # Extend prompt_messages with memory messages
  492. prompt_messages.extend(memory_messages)
  493. # Add current query to the prompt messages
  494. if user_query:
  495. message = LLMNodeChatModelMessage(
  496. text=user_query,
  497. role=PromptMessageRole.USER,
  498. edition_type="basic",
  499. )
  500. prompt_messages.extend(
  501. _handle_list_messages(
  502. messages=[message],
  503. context="",
  504. jinja2_variables=[],
  505. variable_pool=variable_pool,
  506. vision_detail_config=vision_detail,
  507. )
  508. )
  509. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  510. # For completion model
  511. prompt_messages.extend(
  512. _handle_completion_template(
  513. template=prompt_template,
  514. context=context,
  515. jinja2_variables=jinja2_variables,
  516. variable_pool=variable_pool,
  517. )
  518. )
  519. # Get memory text for completion model
  520. memory_text = _handle_memory_completion_mode(
  521. memory=memory,
  522. memory_config=memory_config,
  523. model_config=model_config,
  524. )
  525. # Insert histories into the prompt
  526. prompt_content = prompt_messages[0].content
  527. if "#histories#" in prompt_content:
  528. prompt_content = prompt_content.replace("#histories#", memory_text)
  529. else:
  530. prompt_content = memory_text + "\n" + prompt_content
  531. prompt_messages[0].content = prompt_content
  532. # Add current query to the prompt message
  533. if user_query:
  534. prompt_content = prompt_messages[0].content.replace("#sys.query#", user_query)
  535. prompt_messages[0].content = prompt_content
  536. else:
  537. raise TemplateTypeNotSupportError(type_name=str(type(prompt_template)))
  538. if vision_enabled and user_files:
  539. file_prompts = []
  540. for file in user_files:
  541. file_prompt = file_manager.to_prompt_message_content(file, image_detail_config=vision_detail)
  542. file_prompts.append(file_prompt)
  543. if (
  544. len(prompt_messages) > 0
  545. and isinstance(prompt_messages[-1], UserPromptMessage)
  546. and isinstance(prompt_messages[-1].content, list)
  547. ):
  548. prompt_messages[-1] = UserPromptMessage(content=prompt_messages[-1].content + file_prompts)
  549. else:
  550. prompt_messages.append(UserPromptMessage(content=file_prompts))
  551. # Filter prompt messages
  552. filtered_prompt_messages = []
  553. for prompt_message in prompt_messages:
  554. if isinstance(prompt_message.content, list):
  555. prompt_message_content = []
  556. for content_item in prompt_message.content:
  557. # Skip content if features are not defined
  558. if not model_config.model_schema.features:
  559. if content_item.type != PromptMessageContentType.TEXT:
  560. continue
  561. prompt_message_content.append(content_item)
  562. continue
  563. # Skip content if corresponding feature is not supported
  564. if (
  565. (
  566. content_item.type == PromptMessageContentType.IMAGE
  567. and ModelFeature.VISION not in model_config.model_schema.features
  568. )
  569. or (
  570. content_item.type == PromptMessageContentType.DOCUMENT
  571. and ModelFeature.DOCUMENT not in model_config.model_schema.features
  572. )
  573. or (
  574. content_item.type == PromptMessageContentType.VIDEO
  575. and ModelFeature.VIDEO not in model_config.model_schema.features
  576. )
  577. or (
  578. content_item.type == PromptMessageContentType.AUDIO
  579. and ModelFeature.AUDIO not in model_config.model_schema.features
  580. )
  581. ):
  582. continue
  583. prompt_message_content.append(content_item)
  584. if len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT:
  585. prompt_message.content = prompt_message_content[0].data
  586. else:
  587. prompt_message.content = prompt_message_content
  588. if prompt_message.is_empty():
  589. continue
  590. filtered_prompt_messages.append(prompt_message)
  591. if len(filtered_prompt_messages) == 0:
  592. raise NoPromptFoundError(
  593. "No prompt found in the LLM configuration. "
  594. "Please ensure a prompt is properly configured before proceeding."
  595. )
  596. stop = model_config.stop
  597. return filtered_prompt_messages, stop
  598. @classmethod
  599. def deduct_llm_quota(cls, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
  600. provider_model_bundle = model_instance.provider_model_bundle
  601. provider_configuration = provider_model_bundle.configuration
  602. if provider_configuration.using_provider_type != ProviderType.SYSTEM:
  603. return
  604. system_configuration = provider_configuration.system_configuration
  605. quota_unit = None
  606. for quota_configuration in system_configuration.quota_configurations:
  607. if quota_configuration.quota_type == system_configuration.current_quota_type:
  608. quota_unit = quota_configuration.quota_unit
  609. if quota_configuration.quota_limit == -1:
  610. return
  611. break
  612. used_quota = None
  613. if quota_unit:
  614. if quota_unit == QuotaUnit.TOKENS:
  615. used_quota = usage.total_tokens
  616. elif quota_unit == QuotaUnit.CREDITS:
  617. used_quota = 1
  618. if "gpt-4" in model_instance.model:
  619. used_quota = 20
  620. else:
  621. used_quota = 1
  622. if used_quota is not None and system_configuration.current_quota_type is not None:
  623. db.session.query(Provider).filter(
  624. Provider.tenant_id == tenant_id,
  625. Provider.provider_name == model_instance.provider,
  626. Provider.provider_type == ProviderType.SYSTEM.value,
  627. Provider.quota_type == system_configuration.current_quota_type.value,
  628. Provider.quota_limit > Provider.quota_used,
  629. ).update({"quota_used": Provider.quota_used + used_quota})
  630. db.session.commit()
  631. @classmethod
  632. def _extract_variable_selector_to_variable_mapping(
  633. cls,
  634. *,
  635. graph_config: Mapping[str, Any],
  636. node_id: str,
  637. node_data: LLMNodeData,
  638. ) -> Mapping[str, Sequence[str]]:
  639. prompt_template = node_data.prompt_template
  640. variable_selectors = []
  641. if isinstance(prompt_template, list) and all(
  642. isinstance(prompt, LLMNodeChatModelMessage) for prompt in prompt_template
  643. ):
  644. for prompt in prompt_template:
  645. if prompt.edition_type != "jinja2":
  646. variable_template_parser = VariableTemplateParser(template=prompt.text)
  647. variable_selectors.extend(variable_template_parser.extract_variable_selectors())
  648. elif isinstance(prompt_template, LLMNodeCompletionModelPromptTemplate):
  649. if prompt_template.edition_type != "jinja2":
  650. variable_template_parser = VariableTemplateParser(template=prompt_template.text)
  651. variable_selectors = variable_template_parser.extract_variable_selectors()
  652. else:
  653. raise InvalidVariableTypeError(f"Invalid prompt template type: {type(prompt_template)}")
  654. variable_mapping = {}
  655. for variable_selector in variable_selectors:
  656. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  657. memory = node_data.memory
  658. if memory and memory.query_prompt_template:
  659. query_variable_selectors = VariableTemplateParser(
  660. template=memory.query_prompt_template
  661. ).extract_variable_selectors()
  662. for variable_selector in query_variable_selectors:
  663. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  664. if node_data.context.enabled:
  665. variable_mapping["#context#"] = node_data.context.variable_selector
  666. if node_data.vision.enabled:
  667. variable_mapping["#files#"] = ["sys", SystemVariableKey.FILES.value]
  668. if node_data.memory:
  669. variable_mapping["#sys.query#"] = ["sys", SystemVariableKey.QUERY.value]
  670. if node_data.prompt_config:
  671. enable_jinja = False
  672. if isinstance(prompt_template, list):
  673. for prompt in prompt_template:
  674. if prompt.edition_type == "jinja2":
  675. enable_jinja = True
  676. break
  677. else:
  678. if prompt_template.edition_type == "jinja2":
  679. enable_jinja = True
  680. if enable_jinja:
  681. for variable_selector in node_data.prompt_config.jinja2_variables or []:
  682. variable_mapping[variable_selector.variable] = variable_selector.value_selector
  683. variable_mapping = {node_id + "." + key: value for key, value in variable_mapping.items()}
  684. return variable_mapping
  685. @classmethod
  686. def get_default_config(cls, filters: Optional[dict] = None) -> dict:
  687. return {
  688. "type": "llm",
  689. "config": {
  690. "prompt_templates": {
  691. "chat_model": {
  692. "prompts": [
  693. {"role": "system", "text": "You are a helpful AI assistant.", "edition_type": "basic"}
  694. ]
  695. },
  696. "completion_model": {
  697. "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
  698. "prompt": {
  699. "text": "Here are the chat histories between human and assistant, inside "
  700. "<histories></histories> XML tags.\n\n<histories>\n{{"
  701. "#histories#}}\n</histories>\n\n\nHuman: {{#sys.query#}}\n\nAssistant:",
  702. "edition_type": "basic",
  703. },
  704. "stop": ["Human:"],
  705. },
  706. }
  707. },
  708. }
  709. def _combine_message_content_with_role(*, contents: Sequence[PromptMessageContent], role: PromptMessageRole):
  710. match role:
  711. case PromptMessageRole.USER:
  712. return UserPromptMessage(content=contents)
  713. case PromptMessageRole.ASSISTANT:
  714. return AssistantPromptMessage(content=contents)
  715. case PromptMessageRole.SYSTEM:
  716. return SystemPromptMessage(content=contents)
  717. raise NotImplementedError(f"Role {role} is not supported")
  718. def _render_jinja2_message(
  719. *,
  720. template: str,
  721. jinjia2_variables: Sequence[VariableSelector],
  722. variable_pool: VariablePool,
  723. ):
  724. if not template:
  725. return ""
  726. jinjia2_inputs = {}
  727. for jinja2_variable in jinjia2_variables:
  728. variable = variable_pool.get(jinja2_variable.value_selector)
  729. jinjia2_inputs[jinja2_variable.variable] = variable.to_object() if variable else ""
  730. code_execute_resp = CodeExecutor.execute_workflow_code_template(
  731. language=CodeLanguage.JINJA2,
  732. code=template,
  733. inputs=jinjia2_inputs,
  734. )
  735. result_text = code_execute_resp["result"]
  736. return result_text
  737. def _handle_list_messages(
  738. *,
  739. messages: Sequence[LLMNodeChatModelMessage],
  740. context: Optional[str],
  741. jinja2_variables: Sequence[VariableSelector],
  742. variable_pool: VariablePool,
  743. vision_detail_config: ImagePromptMessageContent.DETAIL,
  744. ) -> Sequence[PromptMessage]:
  745. prompt_messages = []
  746. for message in messages:
  747. if message.edition_type == "jinja2":
  748. result_text = _render_jinja2_message(
  749. template=message.jinja2_text or "",
  750. jinjia2_variables=jinja2_variables,
  751. variable_pool=variable_pool,
  752. )
  753. prompt_message = _combine_message_content_with_role(
  754. contents=[TextPromptMessageContent(data=result_text)], role=message.role
  755. )
  756. prompt_messages.append(prompt_message)
  757. else:
  758. # Get segment group from basic message
  759. if context:
  760. template = message.text.replace("{#context#}", context)
  761. else:
  762. template = message.text
  763. segment_group = variable_pool.convert_template(template)
  764. # Process segments for images
  765. file_contents = []
  766. for segment in segment_group.value:
  767. if isinstance(segment, ArrayFileSegment):
  768. for file in segment.value:
  769. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  770. file_content = file_manager.to_prompt_message_content(
  771. file, image_detail_config=vision_detail_config
  772. )
  773. file_contents.append(file_content)
  774. if isinstance(segment, FileSegment):
  775. file = segment.value
  776. if file.type in {FileType.IMAGE, FileType.VIDEO, FileType.AUDIO, FileType.DOCUMENT}:
  777. file_content = file_manager.to_prompt_message_content(
  778. file, image_detail_config=vision_detail_config
  779. )
  780. file_contents.append(file_content)
  781. # Create message with text from all segments
  782. plain_text = segment_group.text
  783. if plain_text:
  784. prompt_message = _combine_message_content_with_role(
  785. contents=[TextPromptMessageContent(data=plain_text)], role=message.role
  786. )
  787. prompt_messages.append(prompt_message)
  788. if file_contents:
  789. # Create message with image contents
  790. prompt_message = _combine_message_content_with_role(contents=file_contents, role=message.role)
  791. prompt_messages.append(prompt_message)
  792. return prompt_messages
  793. def _calculate_rest_token(
  794. *, prompt_messages: list[PromptMessage], model_config: ModelConfigWithCredentialsEntity
  795. ) -> int:
  796. rest_tokens = 2000
  797. model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
  798. if model_context_tokens:
  799. model_instance = ModelInstance(
  800. provider_model_bundle=model_config.provider_model_bundle, model=model_config.model
  801. )
  802. curr_message_tokens = model_instance.get_llm_num_tokens(prompt_messages)
  803. max_tokens = 0
  804. for parameter_rule in model_config.model_schema.parameter_rules:
  805. if parameter_rule.name == "max_tokens" or (
  806. parameter_rule.use_template and parameter_rule.use_template == "max_tokens"
  807. ):
  808. max_tokens = (
  809. model_config.parameters.get(parameter_rule.name)
  810. or model_config.parameters.get(str(parameter_rule.use_template))
  811. or 0
  812. )
  813. rest_tokens = model_context_tokens - max_tokens - curr_message_tokens
  814. rest_tokens = max(rest_tokens, 0)
  815. return rest_tokens
  816. def _handle_memory_chat_mode(
  817. *,
  818. memory: TokenBufferMemory | None,
  819. memory_config: MemoryConfig | None,
  820. model_config: ModelConfigWithCredentialsEntity,
  821. ) -> Sequence[PromptMessage]:
  822. memory_messages = []
  823. # Get messages from memory for chat model
  824. if memory and memory_config:
  825. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  826. memory_messages = memory.get_history_prompt_messages(
  827. max_token_limit=rest_tokens,
  828. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  829. )
  830. return memory_messages
  831. def _handle_memory_completion_mode(
  832. *,
  833. memory: TokenBufferMemory | None,
  834. memory_config: MemoryConfig | None,
  835. model_config: ModelConfigWithCredentialsEntity,
  836. ) -> str:
  837. memory_text = ""
  838. # Get history text from memory for completion model
  839. if memory and memory_config:
  840. rest_tokens = _calculate_rest_token(prompt_messages=[], model_config=model_config)
  841. if not memory_config.role_prefix:
  842. raise MemoryRolePrefixRequiredError("Memory role prefix is required for completion model.")
  843. memory_text = memory.get_history_prompt_text(
  844. max_token_limit=rest_tokens,
  845. message_limit=memory_config.window.size if memory_config.window.enabled else None,
  846. human_prefix=memory_config.role_prefix.user,
  847. ai_prefix=memory_config.role_prefix.assistant,
  848. )
  849. return memory_text
  850. def _handle_completion_template(
  851. *,
  852. template: LLMNodeCompletionModelPromptTemplate,
  853. context: Optional[str],
  854. jinja2_variables: Sequence[VariableSelector],
  855. variable_pool: VariablePool,
  856. ) -> Sequence[PromptMessage]:
  857. """Handle completion template processing outside of LLMNode class.
  858. Args:
  859. template: The completion model prompt template
  860. context: Optional context string
  861. jinja2_variables: Variables for jinja2 template rendering
  862. variable_pool: Variable pool for template conversion
  863. Returns:
  864. Sequence of prompt messages
  865. """
  866. prompt_messages = []
  867. if template.edition_type == "jinja2":
  868. result_text = _render_jinja2_message(
  869. template=template.jinja2_text or "",
  870. jinjia2_variables=jinja2_variables,
  871. variable_pool=variable_pool,
  872. )
  873. else:
  874. if context:
  875. template_text = template.text.replace("{#context#}", context)
  876. else:
  877. template_text = template.text
  878. result_text = variable_pool.convert_template(template_text).text
  879. prompt_message = _combine_message_content_with_role(
  880. contents=[TextPromptMessageContent(data=result_text)], role=PromptMessageRole.USER
  881. )
  882. prompt_messages.append(prompt_message)
  883. return prompt_messages