xinference.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. import os
  2. import re
  3. from typing import Union
  4. import pytest
  5. from _pytest.monkeypatch import MonkeyPatch
  6. from requests import Response
  7. from requests.exceptions import ConnectionError
  8. from requests.sessions import Session
  9. from xinference_client.client.restful.restful_client import (
  10. Client,
  11. RESTfulChatModelHandle,
  12. RESTfulEmbeddingModelHandle,
  13. RESTfulGenerateModelHandle,
  14. RESTfulRerankModelHandle,
  15. )
  16. from xinference_client.types import Embedding, EmbeddingData, EmbeddingUsage
  17. class MockXinferenceClass:
  18. def get_chat_model(self: Client, model_uid: str) -> Union[RESTfulGenerateModelHandle, RESTfulChatModelHandle]:
  19. if not re.match(r"https?:\/\/[^\s\/$.?#].[^\s]*$", self.base_url):
  20. raise RuntimeError("404 Not Found")
  21. if "generate" == model_uid:
  22. return RESTfulGenerateModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  23. if "chat" == model_uid:
  24. return RESTfulChatModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  25. if "embedding" == model_uid:
  26. return RESTfulEmbeddingModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  27. if "rerank" == model_uid:
  28. return RESTfulRerankModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  29. raise RuntimeError("404 Not Found")
  30. def get(self: Session, url: str, **kwargs):
  31. response = Response()
  32. if "v1/models/" in url:
  33. # get model uid
  34. model_uid = url.split("/")[-1] or ""
  35. if not re.match(
  36. r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}", model_uid
  37. ) and model_uid not in {"generate", "chat", "embedding", "rerank"}:
  38. response.status_code = 404
  39. response._content = b"{}"
  40. return response
  41. # check if url is valid
  42. if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", url):
  43. response.status_code = 404
  44. response._content = b"{}"
  45. return response
  46. if model_uid in {"generate", "chat"}:
  47. response.status_code = 200
  48. response._content = b"""{
  49. "model_type": "LLM",
  50. "address": "127.0.0.1:43877",
  51. "accelerators": [
  52. "0",
  53. "1"
  54. ],
  55. "model_name": "chatglm3-6b",
  56. "model_lang": [
  57. "en"
  58. ],
  59. "model_ability": [
  60. "generate",
  61. "chat"
  62. ],
  63. "model_description": "latest chatglm3",
  64. "model_format": "pytorch",
  65. "model_size_in_billions": 7,
  66. "quantization": "none",
  67. "model_hub": "huggingface",
  68. "revision": null,
  69. "context_length": 2048,
  70. "replica": 1
  71. }"""
  72. return response
  73. elif model_uid == "embedding":
  74. response.status_code = 200
  75. response._content = b"""{
  76. "model_type": "embedding",
  77. "address": "127.0.0.1:43877",
  78. "accelerators": [
  79. "0",
  80. "1"
  81. ],
  82. "model_name": "bge",
  83. "model_lang": [
  84. "en"
  85. ],
  86. "revision": null,
  87. "max_tokens": 512
  88. }"""
  89. return response
  90. elif "v1/cluster/auth" in url:
  91. response.status_code = 200
  92. response._content = b"""{
  93. "auth": true
  94. }"""
  95. return response
  96. def _check_cluster_authenticated(self):
  97. self._cluster_authed = True
  98. def rerank(
  99. self: RESTfulRerankModelHandle, documents: list[str], query: str, top_n: int, return_documents: bool
  100. ) -> dict:
  101. # check if self._model_uid is a valid uuid
  102. if (
  103. not re.match(r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}", self._model_uid)
  104. and self._model_uid != "rerank"
  105. ):
  106. raise RuntimeError("404 Not Found")
  107. if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", self._base_url):
  108. raise RuntimeError("404 Not Found")
  109. if top_n is None:
  110. top_n = 1
  111. return {
  112. "results": [
  113. {"index": i, "document": doc, "relevance_score": 0.9} for i, doc in enumerate(documents[:top_n])
  114. ]
  115. }
  116. def create_embedding(self: RESTfulGenerateModelHandle, input: Union[str, list[str]], **kwargs) -> dict:
  117. # check if self._model_uid is a valid uuid
  118. if (
  119. not re.match(r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}", self._model_uid)
  120. and self._model_uid != "embedding"
  121. ):
  122. raise RuntimeError("404 Not Found")
  123. if isinstance(input, str):
  124. input = [input]
  125. ipt_len = len(input)
  126. embedding = Embedding(
  127. object="list",
  128. model=self._model_uid,
  129. data=[
  130. EmbeddingData(index=i, object="embedding", embedding=[1919.810 for _ in range(768)])
  131. for i in range(ipt_len)
  132. ],
  133. usage=EmbeddingUsage(prompt_tokens=ipt_len, total_tokens=ipt_len),
  134. )
  135. return embedding
  136. MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true"
  137. @pytest.fixture
  138. def setup_xinference_mock(request, monkeypatch: MonkeyPatch):
  139. if MOCK:
  140. monkeypatch.setattr(Client, "get_model", MockXinferenceClass.get_chat_model)
  141. monkeypatch.setattr(Client, "_check_cluster_authenticated", MockXinferenceClass._check_cluster_authenticated)
  142. monkeypatch.setattr(Session, "get", MockXinferenceClass.get)
  143. monkeypatch.setattr(RESTfulEmbeddingModelHandle, "create_embedding", MockXinferenceClass.create_embedding)
  144. monkeypatch.setattr(RESTfulRerankModelHandle, "rerank", MockXinferenceClass.rerank)
  145. yield
  146. if MOCK:
  147. monkeypatch.undo()