test_llm.py 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. import os
  2. import pytest
  3. from typing import Generator
  4. from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
  5. from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunkDelta, \
  6. LLMResultChunk
  7. from core.model_runtime.errors.validate import CredentialsValidateFailedError
  8. from core.model_runtime.model_providers.openllm.llm.llm import OpenLLMLargeLanguageModel
  9. def test_validate_credentials_for_chat_model():
  10. model = OpenLLMLargeLanguageModel()
  11. with pytest.raises(CredentialsValidateFailedError):
  12. model.validate_credentials(
  13. model='NOT IMPORTANT',
  14. credentials={
  15. 'server_url': 'invalid_key',
  16. }
  17. )
  18. model.validate_credentials(
  19. model='NOT IMPORTANT',
  20. credentials={
  21. 'server_url': os.environ.get('OPENLLM_SERVER_URL'),
  22. }
  23. )
  24. def test_invoke_model():
  25. model = OpenLLMLargeLanguageModel()
  26. response = model.invoke(
  27. model='NOT IMPORTANT',
  28. credentials={
  29. 'server_url': os.environ.get('OPENLLM_SERVER_URL'),
  30. },
  31. prompt_messages=[
  32. UserPromptMessage(
  33. content='Hello World!'
  34. )
  35. ],
  36. model_parameters={
  37. 'temperature': 0.7,
  38. 'top_p': 1.0,
  39. 'top_k': 1,
  40. },
  41. stop=['you'],
  42. user="abc-123",
  43. stream=False
  44. )
  45. assert isinstance(response, LLMResult)
  46. assert len(response.message.content) > 0
  47. assert response.usage.total_tokens > 0
  48. def test_invoke_stream_model():
  49. model = OpenLLMLargeLanguageModel()
  50. response = model.invoke(
  51. model='NOT IMPORTANT',
  52. credentials={
  53. 'server_url': os.environ.get('OPENLLM_SERVER_URL'),
  54. },
  55. prompt_messages=[
  56. UserPromptMessage(
  57. content='Hello World!'
  58. )
  59. ],
  60. model_parameters={
  61. 'temperature': 0.7,
  62. 'top_p': 1.0,
  63. 'top_k': 1,
  64. },
  65. stop=['you'],
  66. stream=True,
  67. user="abc-123"
  68. )
  69. assert isinstance(response, Generator)
  70. for chunk in response:
  71. assert isinstance(chunk, LLMResultChunk)
  72. assert isinstance(chunk.delta, LLMResultChunkDelta)
  73. assert isinstance(chunk.delta.message, AssistantPromptMessage)
  74. assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
  75. def test_get_num_tokens():
  76. model = OpenLLMLargeLanguageModel()
  77. response = model.get_num_tokens(
  78. model='NOT IMPORTANT',
  79. credentials={
  80. 'server_url': os.environ.get('OPENLLM_SERVER_URL'),
  81. },
  82. prompt_messages=[
  83. UserPromptMessage(
  84. content='Hello World!'
  85. )
  86. ],
  87. tools=[]
  88. )
  89. assert isinstance(response, int)
  90. assert response == 3