xinference.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. import os
  2. import re
  3. from typing import Union
  4. import pytest
  5. from _pytest.monkeypatch import MonkeyPatch
  6. from requests import Response
  7. from requests.sessions import Session
  8. from xinference_client.client.restful.restful_client import (
  9. Client,
  10. RESTfulChatModelHandle,
  11. RESTfulEmbeddingModelHandle,
  12. RESTfulGenerateModelHandle,
  13. RESTfulRerankModelHandle,
  14. )
  15. from xinference_client.types import Embedding, EmbeddingData, EmbeddingUsage
  16. class MockXinferenceClass:
  17. def get_chat_model(self: Client, model_uid: str) -> Union[RESTfulGenerateModelHandle, RESTfulChatModelHandle]:
  18. if not re.match(r"https?:\/\/[^\s\/$.?#].[^\s]*$", self.base_url):
  19. raise RuntimeError("404 Not Found")
  20. if "generate" == model_uid:
  21. return RESTfulGenerateModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  22. if "chat" == model_uid:
  23. return RESTfulChatModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  24. if "embedding" == model_uid:
  25. return RESTfulEmbeddingModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  26. if "rerank" == model_uid:
  27. return RESTfulRerankModelHandle(model_uid, base_url=self.base_url, auth_headers={})
  28. raise RuntimeError("404 Not Found")
  29. def get(self: Session, url: str, **kwargs):
  30. response = Response()
  31. if "v1/models/" in url:
  32. # get model uid
  33. model_uid = url.split("/")[-1] or ""
  34. if not re.match(
  35. r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}", model_uid
  36. ) and model_uid not in {"generate", "chat", "embedding", "rerank"}:
  37. response.status_code = 404
  38. response._content = b"{}"
  39. return response
  40. # check if url is valid
  41. if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", url):
  42. response.status_code = 404
  43. response._content = b"{}"
  44. return response
  45. if model_uid in {"generate", "chat"}:
  46. response.status_code = 200
  47. response._content = b"""{
  48. "model_type": "LLM",
  49. "address": "127.0.0.1:43877",
  50. "accelerators": [
  51. "0",
  52. "1"
  53. ],
  54. "model_name": "chatglm3-6b",
  55. "model_lang": [
  56. "en"
  57. ],
  58. "model_ability": [
  59. "generate",
  60. "chat"
  61. ],
  62. "model_description": "latest chatglm3",
  63. "model_format": "pytorch",
  64. "model_size_in_billions": 7,
  65. "quantization": "none",
  66. "model_hub": "huggingface",
  67. "revision": null,
  68. "context_length": 2048,
  69. "replica": 1
  70. }"""
  71. return response
  72. elif model_uid == "embedding":
  73. response.status_code = 200
  74. response._content = b"""{
  75. "model_type": "embedding",
  76. "address": "127.0.0.1:43877",
  77. "accelerators": [
  78. "0",
  79. "1"
  80. ],
  81. "model_name": "bge",
  82. "model_lang": [
  83. "en"
  84. ],
  85. "revision": null,
  86. "max_tokens": 512
  87. }"""
  88. return response
  89. elif "v1/cluster/auth" in url:
  90. response.status_code = 200
  91. response._content = b"""{
  92. "auth": true
  93. }"""
  94. return response
  95. def _check_cluster_authenticated(self):
  96. self._cluster_authed = True
  97. def rerank(
  98. self: RESTfulRerankModelHandle, documents: list[str], query: str, top_n: int, return_documents: bool
  99. ) -> dict:
  100. # check if self._model_uid is a valid uuid
  101. if (
  102. not re.match(r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}", self._model_uid)
  103. and self._model_uid != "rerank"
  104. ):
  105. raise RuntimeError("404 Not Found")
  106. if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", self._base_url):
  107. raise RuntimeError("404 Not Found")
  108. if top_n is None:
  109. top_n = 1
  110. return {
  111. "results": [
  112. {"index": i, "document": doc, "relevance_score": 0.9} for i, doc in enumerate(documents[:top_n])
  113. ]
  114. }
  115. def create_embedding(self: RESTfulGenerateModelHandle, input: Union[str, list[str]], **kwargs) -> dict:
  116. # check if self._model_uid is a valid uuid
  117. if (
  118. not re.match(r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}", self._model_uid)
  119. and self._model_uid != "embedding"
  120. ):
  121. raise RuntimeError("404 Not Found")
  122. if isinstance(input, str):
  123. input = [input]
  124. ipt_len = len(input)
  125. embedding = Embedding(
  126. object="list",
  127. model=self._model_uid,
  128. data=[
  129. EmbeddingData(index=i, object="embedding", embedding=[1919.810 for _ in range(768)])
  130. for i in range(ipt_len)
  131. ],
  132. usage=EmbeddingUsage(prompt_tokens=ipt_len, total_tokens=ipt_len),
  133. )
  134. return embedding
  135. MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true"
  136. @pytest.fixture
  137. def setup_xinference_mock(request, monkeypatch: MonkeyPatch):
  138. if MOCK:
  139. monkeypatch.setattr(Client, "get_model", MockXinferenceClass.get_chat_model)
  140. monkeypatch.setattr(Client, "_check_cluster_authenticated", MockXinferenceClass._check_cluster_authenticated)
  141. monkeypatch.setattr(Session, "get", MockXinferenceClass.get)
  142. monkeypatch.setattr(RESTfulEmbeddingModelHandle, "create_embedding", MockXinferenceClass.create_embedding)
  143. monkeypatch.setattr(RESTfulRerankModelHandle, "rerank", MockXinferenceClass.rerank)
  144. yield
  145. if MOCK:
  146. monkeypatch.undo()