| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869 | from typing import Any, Literal, Optional, Unionfrom pydantic import BaseModelfrom core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfigfrom core.workflow.entities.base_node_data_entities import BaseNodeDatafrom core.workflow.entities.variable_entities import VariableSelectorclass ModelConfig(BaseModel):    """    Model Config.    """    provider: str    name: str    mode: str    completion_params: dict[str, Any] = {}class ContextConfig(BaseModel):    """    Context Config.    """    enabled: bool    variable_selector: Optional[list[str]] = Noneclass VisionConfig(BaseModel):    """    Vision Config.    """    class Configs(BaseModel):        """        Configs.        """        detail: Literal['low', 'high']    enabled: bool    configs: Optional[Configs] = Noneclass PromptConfig(BaseModel):    """    Prompt Config.    """    jinja2_variables: Optional[list[VariableSelector]] = Noneclass LLMNodeChatModelMessage(ChatModelMessage):    """    LLM Node Chat Model Message.    """    jinja2_text: Optional[str] = Noneclass LLMNodeCompletionModelPromptTemplate(CompletionModelPromptTemplate):    """    LLM Node Chat Model Prompt Template.    """    jinja2_text: Optional[str] = Noneclass LLMNodeData(BaseNodeData):    """    LLM Node Data.    """    model: ModelConfig    prompt_template: Union[list[LLMNodeChatModelMessage], LLMNodeCompletionModelPromptTemplate]    prompt_config: Optional[PromptConfig] = None    memory: Optional[MemoryConfig] = None    context: ContextConfig    vision: VisionConfig
 |