base.py 1.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. from __future__ import annotations
  2. from abc import abstractmethod, ABC
  3. from typing import List, Any
  4. from langchain.schema import Document, BaseRetriever
  5. from models.dataset import Dataset
  6. class BaseIndex(ABC):
  7. def __init__(self, dataset: Dataset):
  8. self.dataset = dataset
  9. @abstractmethod
  10. def create(self, texts: list[Document], **kwargs) -> BaseIndex:
  11. raise NotImplementedError
  12. @abstractmethod
  13. def add_texts(self, texts: list[Document], **kwargs):
  14. raise NotImplementedError
  15. @abstractmethod
  16. def text_exists(self, id: str) -> bool:
  17. raise NotImplementedError
  18. @abstractmethod
  19. def delete_by_ids(self, ids: list[str]) -> None:
  20. raise NotImplementedError
  21. @abstractmethod
  22. def delete_by_document_id(self, document_id: str):
  23. raise NotImplementedError
  24. @abstractmethod
  25. def get_retriever(self, **kwargs: Any) -> BaseRetriever:
  26. raise NotImplementedError
  27. @abstractmethod
  28. def search(
  29. self, query: str,
  30. **kwargs: Any
  31. ) -> List[Document]:
  32. raise NotImplementedError
  33. def delete(self) -> None:
  34. raise NotImplementedError
  35. def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
  36. for text in texts:
  37. doc_id = text.metadata['doc_id']
  38. exists_duplicate_node = self.text_exists(doc_id)
  39. if exists_duplicate_node:
  40. texts.remove(text)
  41. return texts
  42. def _get_uuids(self, texts: list[Document]) -> list[str]:
  43. return [text.metadata['doc_id'] for text in texts]