123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111 |
- import logging
- from collections.abc import Generator
- from typing import Optional, Union
- from dify_plugin import LargeLanguageModel
- from dify_plugin.entities import I18nObject
- from dify_plugin.errors.model import (
- CredentialsValidateFailedError,
- )
- from dify_plugin.entities.model import (
- AIModelEntity,
- FetchFrom,
- ModelType,
- )
- from dify_plugin.entities.model.llm import (
- LLMResult,
- )
- from dify_plugin.entities.model.message import (
- PromptMessage,
- PromptMessageTool,
- )
- logger = logging.getLogger(__name__)
- class {{ .PluginName | SnakeToCamel }}LargeLanguageModel(LargeLanguageModel):
- """
- Model class for {{ .PluginName }} large language model.
- """
- def _invoke(
- self,
- model: str,
- credentials: dict,
- prompt_messages: list[PromptMessage],
- model_parameters: dict,
- tools: Optional[list[PromptMessageTool]] = None,
- stop: Optional[list[str]] = None,
- stream: bool = True,
- user: Optional[str] = None,
- ) -> Union[LLMResult, Generator]:
- """
- Invoke large language model
- :param model: model name
- :param credentials: model credentials
- :param prompt_messages: prompt messages
- :param model_parameters: model parameters
- :param tools: tools for tool calling
- :param stop: stop words
- :param stream: is stream response
- :param user: unique user id
- :return: full response or stream response chunk generator result
- """
- pass
-
- def get_num_tokens(
- self,
- model: str,
- credentials: dict,
- prompt_messages: list[PromptMessage],
- tools: Optional[list[PromptMessageTool]] = None,
- ) -> int:
- """
- Get number of tokens for given prompt messages
- :param model: model name
- :param credentials: model credentials
- :param prompt_messages: prompt messages
- :param tools: tools for tool calling
- :return:
- """
- return 0
- def validate_credentials(self, model: str, credentials: dict) -> None:
- """
- Validate model credentials
- :param model: model name
- :param credentials: model credentials
- :return:
- """
- try:
- pass
- except Exception as ex:
- raise CredentialsValidateFailedError(str(ex))
- def get_customizable_model_schema(
- self, model: str, credentials: dict
- ) -> AIModelEntity:
- """
- If your model supports fine-tuning, this method returns the schema of the base model
- but renamed to the fine-tuned model name.
- :param model: model name
- :param credentials: credentials
- :return: model schema
- """
- entity = AIModelEntity(
- model=model,
- label=I18nObject(zh_Hans=model, en_US=model),
- model_type=ModelType.LLM,
- features=[],
- fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
- model_properties={},
- parameter_rules=[],
- )
- return entity
|