test_llm.py 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. import os
  2. from typing import Generator
  3. import pytest
  4. from core.model_runtime.entities.message_entities import SystemPromptMessage, UserPromptMessage, AssistantPromptMessage
  5. from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, \
  6. LLMResultChunkDelta
  7. from core.model_runtime.errors.validate import CredentialsValidateFailedError
  8. from core.model_runtime.model_providers.tongyi.llm.llm import TongyiLargeLanguageModel
  9. def test_validate_credentials():
  10. model = TongyiLargeLanguageModel()
  11. with pytest.raises(CredentialsValidateFailedError):
  12. model.validate_credentials(
  13. model='qwen-turbo',
  14. credentials={
  15. 'dashscope_api_key': 'invalid_key'
  16. }
  17. )
  18. model.validate_credentials(
  19. model='qwen-turbo',
  20. credentials={
  21. 'dashscope_api_key': os.environ.get('TONGYI_DASHSCOPE_API_KEY')
  22. }
  23. )
  24. def test_invoke_model():
  25. model = TongyiLargeLanguageModel()
  26. response = model.invoke(
  27. model='qwen-turbo',
  28. credentials={
  29. 'dashscope_api_key': os.environ.get('TONGYI_DASHSCOPE_API_KEY')
  30. },
  31. prompt_messages=[
  32. UserPromptMessage(
  33. content='Who are you?'
  34. )
  35. ],
  36. model_parameters={
  37. 'temperature': 0.5,
  38. 'max_tokens': 10
  39. },
  40. stop=['How'],
  41. stream=False,
  42. user="abc-123"
  43. )
  44. assert isinstance(response, LLMResult)
  45. assert len(response.message.content) > 0
  46. def test_invoke_stream_model():
  47. model = TongyiLargeLanguageModel()
  48. response = model.invoke(
  49. model='qwen-turbo',
  50. credentials={
  51. 'dashscope_api_key': os.environ.get('TONGYI_DASHSCOPE_API_KEY')
  52. },
  53. prompt_messages=[
  54. UserPromptMessage(
  55. content='Hello World!'
  56. )
  57. ],
  58. model_parameters={
  59. 'temperature': 0.5,
  60. 'max_tokens': 100,
  61. 'seed': 1234
  62. },
  63. stream=True,
  64. user="abc-123"
  65. )
  66. assert isinstance(response, Generator)
  67. for chunk in response:
  68. assert isinstance(chunk, LLMResultChunk)
  69. assert isinstance(chunk.delta, LLMResultChunkDelta)
  70. assert isinstance(chunk.delta.message, AssistantPromptMessage)
  71. assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
  72. def test_get_num_tokens():
  73. model = TongyiLargeLanguageModel()
  74. num_tokens = model.get_num_tokens(
  75. model='qwen-turbo',
  76. credentials={
  77. 'dashscope_api_key': os.environ.get('TONGYI_DASHSCOPE_API_KEY')
  78. },
  79. prompt_messages=[
  80. SystemPromptMessage(
  81. content='You are a helpful AI assistant.',
  82. ),
  83. UserPromptMessage(
  84. content='Hello World!'
  85. )
  86. ]
  87. )
  88. assert num_tokens == 12