test_localai_model.py 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. import json
  2. import os
  3. from unittest.mock import patch, MagicMock
  4. from core.model_providers.models.llm.localai_model import LocalAIModel
  5. from core.model_providers.providers.localai_provider import LocalAIProvider
  6. from core.model_providers.models.entity.message import PromptMessage
  7. from core.model_providers.models.entity.model_params import ModelKwargs, ModelType
  8. from models.provider import Provider, ProviderType, ProviderModel
  9. def get_mock_provider(server_url):
  10. return Provider(
  11. id='provider_id',
  12. tenant_id='tenant_id',
  13. provider_name='localai',
  14. provider_type=ProviderType.CUSTOM.value,
  15. encrypted_config=json.dumps({}),
  16. is_valid=True,
  17. )
  18. def get_mock_model(model_name, mocker):
  19. model_kwargs = ModelKwargs(
  20. max_tokens=10,
  21. temperature=0
  22. )
  23. server_url = os.environ['LOCALAI_SERVER_URL']
  24. mock_query = MagicMock()
  25. mock_query.filter.return_value.first.return_value = ProviderModel(
  26. provider_name='localai',
  27. model_name=model_name,
  28. model_type=ModelType.TEXT_GENERATION.value,
  29. encrypted_config=json.dumps({'server_url': server_url, 'completion_type': 'completion'}),
  30. is_valid=True,
  31. )
  32. mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query)
  33. openai_provider = LocalAIProvider(provider=get_mock_provider(server_url))
  34. return LocalAIModel(
  35. model_provider=openai_provider,
  36. name=model_name,
  37. model_kwargs=model_kwargs
  38. )
  39. def decrypt_side_effect(tenant_id, encrypted_openai_api_key):
  40. return encrypted_openai_api_key
  41. @patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
  42. def test_get_num_tokens(mock_decrypt, mocker):
  43. openai_model = get_mock_model('ggml-gpt4all-j', mocker)
  44. rst = openai_model.get_num_tokens([PromptMessage(content='you are a kindness Assistant.')])
  45. assert rst > 0
  46. @patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect)
  47. def test_run(mock_decrypt, mocker):
  48. mocker.patch('core.model_providers.providers.base.BaseModelProvider.update_last_used', return_value=None)
  49. openai_model = get_mock_model('ggml-gpt4all-j', mocker)
  50. rst = openai_model.run(
  51. [PromptMessage(content='Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ')],
  52. stop=['\nHuman:'],
  53. )
  54. assert len(rst.content) > 0