dataset_retriever_tool.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. import re
  2. from typing import Type
  3. from flask import current_app
  4. from langchain.embeddings import OpenAIEmbeddings
  5. from langchain.tools import BaseTool
  6. from pydantic import Field, BaseModel
  7. from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
  8. from core.embedding.cached_embedding import CacheEmbedding
  9. from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig
  10. from core.index.vector_index.vector_index import VectorIndex
  11. from core.llm.llm_builder import LLMBuilder
  12. from extensions.ext_database import db
  13. from models.dataset import Dataset, DocumentSegment
  14. class DatasetRetrieverToolInput(BaseModel):
  15. dataset_id: str = Field(..., description="ID of dataset to be queried. MUST be UUID format.")
  16. query: str = Field(..., description="Query for the dataset to be used to retrieve the dataset.")
  17. class DatasetRetrieverTool(BaseTool):
  18. """Tool for querying a Dataset."""
  19. name: str = "dataset"
  20. args_schema: Type[BaseModel] = DatasetRetrieverToolInput
  21. description: str = "use this to retrieve a dataset. "
  22. tenant_id: str
  23. dataset_id: str
  24. k: int = 3
  25. @classmethod
  26. def from_dataset(cls, dataset: Dataset, **kwargs):
  27. description = dataset.description
  28. if not description:
  29. description = 'useful for when you want to answer queries about the ' + dataset.name
  30. description = description.replace('\n', '').replace('\r', '')
  31. description += '\nID of dataset MUST be ' + dataset.id
  32. return cls(
  33. tenant_id=dataset.tenant_id,
  34. dataset_id=dataset.id,
  35. description=description,
  36. **kwargs
  37. )
  38. def _run(self, dataset_id: str, query: str) -> str:
  39. pattern = r'\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b'
  40. match = re.search(pattern, dataset_id, re.IGNORECASE)
  41. if match:
  42. dataset_id = match.group()
  43. dataset = db.session.query(Dataset).filter(
  44. Dataset.tenant_id == self.tenant_id,
  45. Dataset.id == dataset_id
  46. ).first()
  47. if not dataset:
  48. return f'[{self.name} failed to find dataset with id {dataset_id}.]'
  49. if dataset.indexing_technique == "economy":
  50. # use keyword table query
  51. kw_table_index = KeywordTableIndex(
  52. dataset=dataset,
  53. config=KeywordTableConfig(
  54. max_keywords_per_chunk=5
  55. )
  56. )
  57. documents = kw_table_index.search(query, search_kwargs={'k': self.k})
  58. return str("\n".join([document.page_content for document in documents]))
  59. else:
  60. model_credentials = LLMBuilder.get_model_credentials(
  61. tenant_id=dataset.tenant_id,
  62. model_provider=LLMBuilder.get_default_provider(dataset.tenant_id, 'text-embedding-ada-002'),
  63. model_name='text-embedding-ada-002'
  64. )
  65. embeddings = CacheEmbedding(OpenAIEmbeddings(
  66. **model_credentials
  67. ))
  68. vector_index = VectorIndex(
  69. dataset=dataset,
  70. config=current_app.config,
  71. embeddings=embeddings
  72. )
  73. if self.k > 0:
  74. documents = vector_index.search(
  75. query,
  76. search_type='similarity',
  77. search_kwargs={
  78. 'k': self.k
  79. }
  80. )
  81. else:
  82. documents = []
  83. hit_callback = DatasetIndexToolCallbackHandler(dataset.id)
  84. hit_callback.on_tool_end(documents)
  85. document_context_list = []
  86. index_node_ids = [document.metadata['doc_id'] for document in documents]
  87. segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  88. DocumentSegment.status == 'completed',
  89. DocumentSegment.enabled == True,
  90. DocumentSegment.index_node_id.in_(index_node_ids)
  91. ).all()
  92. if segments:
  93. index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)}
  94. sorted_segments = sorted(segments,
  95. key=lambda segment: index_node_id_to_position.get(segment.index_node_id,
  96. float('inf')))
  97. for segment in sorted_segments:
  98. if segment.answer:
  99. document_context_list.append(f'question:{segment.content} \nanswer:{segment.answer}')
  100. else:
  101. document_context_list.append(segment.content)
  102. return str("\n".join(document_context_list))
  103. async def _arun(self, tool_input: str) -> str:
  104. raise NotImplementedError()