|
@@ -7,6 +7,7 @@ from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEnti
|
|
from core.file import File
|
|
from core.file import File
|
|
from core.memory.token_buffer_memory import TokenBufferMemory
|
|
from core.memory.token_buffer_memory import TokenBufferMemory
|
|
from core.model_manager import ModelInstance
|
|
from core.model_manager import ModelInstance
|
|
|
|
+from core.model_runtime.entities import ImagePromptMessageContent
|
|
from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
|
|
from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage
|
|
from core.model_runtime.entities.message_entities import (
|
|
from core.model_runtime.entities.message_entities import (
|
|
AssistantPromptMessage,
|
|
AssistantPromptMessage,
|
|
@@ -129,6 +130,7 @@ class ParameterExtractorNode(LLMNode):
|
|
model_config=model_config,
|
|
model_config=model_config,
|
|
memory=memory,
|
|
memory=memory,
|
|
files=files,
|
|
files=files,
|
|
|
|
+ vision_detail=node_data.vision.configs.detail,
|
|
)
|
|
)
|
|
else:
|
|
else:
|
|
|
|
|
|
@@ -139,6 +141,7 @@ class ParameterExtractorNode(LLMNode):
|
|
model_config=model_config,
|
|
model_config=model_config,
|
|
memory=memory,
|
|
memory=memory,
|
|
files=files,
|
|
files=files,
|
|
|
|
+ vision_detail=node_data.vision.configs.detail,
|
|
)
|
|
)
|
|
|
|
|
|
prompt_message_tools = []
|
|
prompt_message_tools = []
|
|
@@ -267,6 +270,7 @@ class ParameterExtractorNode(LLMNode):
|
|
model_config: ModelConfigWithCredentialsEntity,
|
|
model_config: ModelConfigWithCredentialsEntity,
|
|
memory: Optional[TokenBufferMemory],
|
|
memory: Optional[TokenBufferMemory],
|
|
files: Sequence[File],
|
|
files: Sequence[File],
|
|
|
|
+ vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
|
|
) -> tuple[list[PromptMessage], list[PromptMessageTool]]:
|
|
) -> tuple[list[PromptMessage], list[PromptMessageTool]]:
|
|
"""
|
|
"""
|
|
Generate function call prompt.
|
|
Generate function call prompt.
|
|
@@ -289,6 +293,7 @@ class ParameterExtractorNode(LLMNode):
|
|
memory_config=node_data.memory,
|
|
memory_config=node_data.memory,
|
|
memory=None,
|
|
memory=None,
|
|
model_config=model_config,
|
|
model_config=model_config,
|
|
|
|
+ image_detail_config=vision_detail,
|
|
)
|
|
)
|
|
|
|
|
|
|
|
|
|
@@ -347,6 +352,7 @@ class ParameterExtractorNode(LLMNode):
|
|
model_config: ModelConfigWithCredentialsEntity,
|
|
model_config: ModelConfigWithCredentialsEntity,
|
|
memory: Optional[TokenBufferMemory],
|
|
memory: Optional[TokenBufferMemory],
|
|
files: Sequence[File],
|
|
files: Sequence[File],
|
|
|
|
+ vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
|
|
) -> list[PromptMessage]:
|
|
) -> list[PromptMessage]:
|
|
"""
|
|
"""
|
|
Generate prompt engineering prompt.
|
|
Generate prompt engineering prompt.
|
|
@@ -361,6 +367,7 @@ class ParameterExtractorNode(LLMNode):
|
|
model_config=model_config,
|
|
model_config=model_config,
|
|
memory=memory,
|
|
memory=memory,
|
|
files=files,
|
|
files=files,
|
|
|
|
+ vision_detail=vision_detail,
|
|
)
|
|
)
|
|
elif model_mode == ModelMode.CHAT:
|
|
elif model_mode == ModelMode.CHAT:
|
|
return self._generate_prompt_engineering_chat_prompt(
|
|
return self._generate_prompt_engineering_chat_prompt(
|
|
@@ -370,6 +377,7 @@ class ParameterExtractorNode(LLMNode):
|
|
model_config=model_config,
|
|
model_config=model_config,
|
|
memory=memory,
|
|
memory=memory,
|
|
files=files,
|
|
files=files,
|
|
|
|
+ vision_detail=vision_detail,
|
|
)
|
|
)
|
|
else:
|
|
else:
|
|
raise InvalidModelModeError(f"Invalid model mode: {model_mode}")
|
|
raise InvalidModelModeError(f"Invalid model mode: {model_mode}")
|
|
@@ -382,6 +390,7 @@ class ParameterExtractorNode(LLMNode):
|
|
model_config: ModelConfigWithCredentialsEntity,
|
|
model_config: ModelConfigWithCredentialsEntity,
|
|
memory: Optional[TokenBufferMemory],
|
|
memory: Optional[TokenBufferMemory],
|
|
files: Sequence[File],
|
|
files: Sequence[File],
|
|
|
|
+ vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
|
|
) -> list[PromptMessage]:
|
|
) -> list[PromptMessage]:
|
|
"""
|
|
"""
|
|
Generate completion prompt.
|
|
Generate completion prompt.
|
|
@@ -402,6 +411,7 @@ class ParameterExtractorNode(LLMNode):
|
|
memory_config=node_data.memory,
|
|
memory_config=node_data.memory,
|
|
memory=memory,
|
|
memory=memory,
|
|
model_config=model_config,
|
|
model_config=model_config,
|
|
|
|
+ image_detail_config=vision_detail,
|
|
)
|
|
)
|
|
|
|
|
|
return prompt_messages
|
|
return prompt_messages
|
|
@@ -414,6 +424,7 @@ class ParameterExtractorNode(LLMNode):
|
|
model_config: ModelConfigWithCredentialsEntity,
|
|
model_config: ModelConfigWithCredentialsEntity,
|
|
memory: Optional[TokenBufferMemory],
|
|
memory: Optional[TokenBufferMemory],
|
|
files: Sequence[File],
|
|
files: Sequence[File],
|
|
|
|
+ vision_detail: Optional[ImagePromptMessageContent.DETAIL] = None,
|
|
) -> list[PromptMessage]:
|
|
) -> list[PromptMessage]:
|
|
"""
|
|
"""
|
|
Generate chat prompt.
|
|
Generate chat prompt.
|
|
@@ -441,6 +452,7 @@ class ParameterExtractorNode(LLMNode):
|
|
memory_config=node_data.memory,
|
|
memory_config=node_data.memory,
|
|
memory=None,
|
|
memory=None,
|
|
model_config=model_config,
|
|
model_config=model_config,
|
|
|
|
+ image_detail_config=vision_detail,
|
|
)
|
|
)
|
|
|
|
|
|
|
|
|