request.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. from collections.abc import Mapping
  2. from typing import Any, Literal, Optional
  3. from pydantic import BaseModel, Field, field_validator
  4. from core.entities.provider_entities import BasicProviderConfig
  5. from core.model_runtime.entities.message_entities import (
  6. AssistantPromptMessage,
  7. PromptMessage,
  8. PromptMessageRole,
  9. PromptMessageTool,
  10. SystemPromptMessage,
  11. ToolPromptMessage,
  12. UserPromptMessage,
  13. )
  14. from core.model_runtime.entities.model_entities import ModelType
  15. from core.workflow.nodes.parameter_extractor.entities import (
  16. ModelConfig as ParameterExtractorModelConfig,
  17. )
  18. from core.workflow.nodes.parameter_extractor.entities import (
  19. ParameterConfig,
  20. )
  21. from core.workflow.nodes.question_classifier.entities import (
  22. ClassConfig,
  23. )
  24. from core.workflow.nodes.question_classifier.entities import (
  25. ModelConfig as QuestionClassifierModelConfig,
  26. )
  27. class RequestInvokeTool(BaseModel):
  28. """
  29. Request to invoke a tool
  30. """
  31. class BaseRequestInvokeModel(BaseModel):
  32. provider: str
  33. model: str
  34. model_type: ModelType
  35. class RequestInvokeLLM(BaseRequestInvokeModel):
  36. """
  37. Request to invoke LLM
  38. """
  39. model_type: ModelType = ModelType.LLM
  40. mode: str
  41. model_parameters: dict[str, Any] = Field(default_factory=dict)
  42. prompt_messages: list[PromptMessage] = Field(default_factory=list)
  43. tools: Optional[list[PromptMessageTool]] = Field(default_factory=list)
  44. stop: Optional[list[str]] = Field(default_factory=list)
  45. stream: Optional[bool] = False
  46. @field_validator("prompt_messages", mode="before")
  47. @classmethod
  48. def convert_prompt_messages(cls, v):
  49. if not isinstance(v, list):
  50. raise ValueError("prompt_messages must be a list")
  51. for i in range(len(v)):
  52. if v[i]["role"] == PromptMessageRole.USER.value:
  53. v[i] = UserPromptMessage(**v[i])
  54. elif v[i]["role"] == PromptMessageRole.ASSISTANT.value:
  55. v[i] = AssistantPromptMessage(**v[i])
  56. elif v[i]["role"] == PromptMessageRole.SYSTEM.value:
  57. v[i] = SystemPromptMessage(**v[i])
  58. elif v[i]["role"] == PromptMessageRole.TOOL.value:
  59. v[i] = ToolPromptMessage(**v[i])
  60. else:
  61. v[i] = PromptMessage(**v[i])
  62. return v
  63. class RequestInvokeTextEmbedding(BaseRequestInvokeModel):
  64. """
  65. Request to invoke text embedding
  66. """
  67. model_type: ModelType = ModelType.TEXT_EMBEDDING
  68. texts: list[str]
  69. class RequestInvokeRerank(BaseRequestInvokeModel):
  70. """
  71. Request to invoke rerank
  72. """
  73. model_type: ModelType = ModelType.RERANK
  74. query: str
  75. docs: list[str]
  76. score_threshold: float
  77. top_n: int
  78. class RequestInvokeTTS(BaseRequestInvokeModel):
  79. """
  80. Request to invoke TTS
  81. """
  82. model_type: ModelType = ModelType.TTS
  83. content_text: str
  84. voice: str
  85. class RequestInvokeSpeech2Text(BaseRequestInvokeModel):
  86. """
  87. Request to invoke speech2text
  88. """
  89. model_type: ModelType = ModelType.SPEECH2TEXT
  90. file: bytes
  91. @field_validator("file", mode="before")
  92. @classmethod
  93. def convert_file(cls, v):
  94. # hex string to bytes
  95. if isinstance(v, str):
  96. return bytes.fromhex(v)
  97. else:
  98. raise ValueError("file must be a hex string")
  99. class RequestInvokeModeration(BaseRequestInvokeModel):
  100. """
  101. Request to invoke moderation
  102. """
  103. model_type: ModelType = ModelType.MODERATION
  104. text: str
  105. class RequestInvokeParameterExtractorNode(BaseModel):
  106. """
  107. Request to invoke parameter extractor node
  108. """
  109. parameters: list[ParameterConfig]
  110. model: ParameterExtractorModelConfig
  111. instruction: str
  112. query: str
  113. class RequestInvokeQuestionClassifierNode(BaseModel):
  114. """
  115. Request to invoke question classifier node
  116. """
  117. query: str
  118. model: QuestionClassifierModelConfig
  119. classes: list[ClassConfig]
  120. instruction: str
  121. class RequestInvokeApp(BaseModel):
  122. """
  123. Request to invoke app
  124. """
  125. app_id: str
  126. inputs: dict[str, Any]
  127. query: Optional[str] = None
  128. response_mode: Literal["blocking", "streaming"]
  129. conversation_id: Optional[str] = None
  130. user: Optional[str] = None
  131. files: list[dict] = Field(default_factory=list)
  132. class RequestInvokeEncrypt(BaseModel):
  133. """
  134. Request to encryption
  135. """
  136. opt: Literal["encrypt", "decrypt", "clear"]
  137. namespace: Literal["endpoint"]
  138. identity: str
  139. data: dict = Field(default_factory=dict)
  140. config: Mapping[str, BasicProviderConfig] = Field(default_factory=Mapping)