test_llm.py 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576
  1. import os
  2. from collections.abc import Generator
  3. import pytest
  4. from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
  5. from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
  6. from core.model_runtime.errors.validate import CredentialsValidateFailedError
  7. from core.model_runtime.model_providers.tongyi.llm.llm import TongyiLargeLanguageModel
  8. def test_validate_credentials():
  9. model = TongyiLargeLanguageModel()
  10. with pytest.raises(CredentialsValidateFailedError):
  11. model.validate_credentials(model="qwen-turbo", credentials={"dashscope_api_key": "invalid_key"})
  12. model.validate_credentials(
  13. model="qwen-turbo", credentials={"dashscope_api_key": os.environ.get("TONGYI_DASHSCOPE_API_KEY")}
  14. )
  15. def test_invoke_model():
  16. model = TongyiLargeLanguageModel()
  17. response = model.invoke(
  18. model="qwen-turbo",
  19. credentials={"dashscope_api_key": os.environ.get("TONGYI_DASHSCOPE_API_KEY")},
  20. prompt_messages=[UserPromptMessage(content="Who are you?")],
  21. model_parameters={"temperature": 0.5, "max_tokens": 10},
  22. stop=["How"],
  23. stream=False,
  24. user="abc-123",
  25. )
  26. assert isinstance(response, LLMResult)
  27. assert len(response.message.content) > 0
  28. def test_invoke_stream_model():
  29. model = TongyiLargeLanguageModel()
  30. response = model.invoke(
  31. model="qwen-turbo",
  32. credentials={"dashscope_api_key": os.environ.get("TONGYI_DASHSCOPE_API_KEY")},
  33. prompt_messages=[UserPromptMessage(content="Hello World!")],
  34. model_parameters={"temperature": 0.5, "max_tokens": 100, "seed": 1234},
  35. stream=True,
  36. user="abc-123",
  37. )
  38. assert isinstance(response, Generator)
  39. for chunk in response:
  40. assert isinstance(chunk, LLMResultChunk)
  41. assert isinstance(chunk.delta, LLMResultChunkDelta)
  42. assert isinstance(chunk.delta.message, AssistantPromptMessage)
  43. assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
  44. def test_get_num_tokens():
  45. model = TongyiLargeLanguageModel()
  46. num_tokens = model.get_num_tokens(
  47. model="qwen-turbo",
  48. credentials={"dashscope_api_key": os.environ.get("TONGYI_DASHSCOPE_API_KEY")},
  49. prompt_messages=[
  50. SystemPromptMessage(
  51. content="You are a helpful AI assistant.",
  52. ),
  53. UserPromptMessage(content="Hello World!"),
  54. ],
  55. )
  56. assert num_tokens == 12