llm.py 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. import logging
  2. from collections.abc import Generator
  3. from typing import Optional, Union
  4. from dify_plugin import LargeLanguageModel
  5. from dify_plugin.entities import I18nObject
  6. from dify_plugin.errors.model import (
  7. CredentialsValidateFailedError,
  8. )
  9. from dify_plugin.entities.model import (
  10. AIModelEntity,
  11. FetchFrom,
  12. ModelType,
  13. )
  14. from dify_plugin.entities.model.llm import (
  15. LLMResult,
  16. )
  17. from dify_plugin.entities.model.message import (
  18. PromptMessage,
  19. PromptMessageTool,
  20. )
  21. logger = logging.getLogger(__name__)
  22. class {{ .PluginName | SnakeToCamel }}LargeLanguageModel(LargeLanguageModel):
  23. """
  24. Model class for {{ .PluginName }} large language model.
  25. """
  26. def _invoke(
  27. self,
  28. model: str,
  29. credentials: dict,
  30. prompt_messages: list[PromptMessage],
  31. model_parameters: dict,
  32. tools: Optional[list[PromptMessageTool]] = None,
  33. stop: Optional[list[str]] = None,
  34. stream: bool = True,
  35. user: Optional[str] = None,
  36. ) -> Union[LLMResult, Generator]:
  37. """
  38. Invoke large language model
  39. :param model: model name
  40. :param credentials: model credentials
  41. :param prompt_messages: prompt messages
  42. :param model_parameters: model parameters
  43. :param tools: tools for tool calling
  44. :param stop: stop words
  45. :param stream: is stream response
  46. :param user: unique user id
  47. :return: full response or stream response chunk generator result
  48. """
  49. pass
  50. def get_num_tokens(
  51. self,
  52. model: str,
  53. credentials: dict,
  54. prompt_messages: list[PromptMessage],
  55. tools: Optional[list[PromptMessageTool]] = None,
  56. ) -> int:
  57. """
  58. Get number of tokens for given prompt messages
  59. :param model: model name
  60. :param credentials: model credentials
  61. :param prompt_messages: prompt messages
  62. :param tools: tools for tool calling
  63. :return:
  64. """
  65. return 0
  66. def validate_credentials(self, model: str, credentials: dict) -> None:
  67. """
  68. Validate model credentials
  69. :param model: model name
  70. :param credentials: model credentials
  71. :return:
  72. """
  73. try:
  74. pass
  75. except Exception as ex:
  76. raise CredentialsValidateFailedError(str(ex))
  77. def get_customizable_model_schema(
  78. self, model: str, credentials: dict
  79. ) -> AIModelEntity:
  80. """
  81. If your model supports fine-tuning, this method returns the schema of the base model
  82. but renamed to the fine-tuned model name.
  83. :param model: model name
  84. :param credentials: credentials
  85. :return: model schema
  86. """
  87. entity = AIModelEntity(
  88. model=model,
  89. label=I18nObject(zh_Hans=model, en_US=model),
  90. model_type=ModelType.LLM,
  91. features=[],
  92. fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
  93. model_properties={},
  94. parameter_rules=[],
  95. )
  96. return entity