dataset_retriever_tool.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. import threading
  2. from typing import Type, Optional, List
  3. from flask import current_app
  4. from langchain.tools import BaseTool
  5. from pydantic import Field, BaseModel
  6. from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
  7. from core.embedding.cached_embedding import CacheEmbedding
  8. from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig
  9. from core.model_manager import ModelManager
  10. from core.model_runtime.entities.model_entities import ModelType
  11. from core.model_runtime.errors.invoke import InvokeAuthorizationError
  12. from core.rerank.rerank import RerankRunner
  13. from extensions.ext_database import db
  14. from models.dataset import Dataset, DocumentSegment, Document
  15. from services.retrieval_service import RetrievalService
  16. default_retrieval_model = {
  17. 'search_method': 'semantic_search',
  18. 'reranking_enable': False,
  19. 'reranking_model': {
  20. 'reranking_provider_name': '',
  21. 'reranking_model_name': ''
  22. },
  23. 'top_k': 2,
  24. 'score_threshold_enabled': False
  25. }
  26. class DatasetRetrieverToolInput(BaseModel):
  27. query: str = Field(..., description="Query for the dataset to be used to retrieve the dataset.")
  28. class DatasetRetrieverTool(BaseTool):
  29. """Tool for querying a Dataset."""
  30. name: str = "dataset"
  31. args_schema: Type[BaseModel] = DatasetRetrieverToolInput
  32. description: str = "use this to retrieve a dataset. "
  33. tenant_id: str
  34. dataset_id: str
  35. top_k: int = 2
  36. score_threshold: Optional[float] = None
  37. hit_callbacks: List[DatasetIndexToolCallbackHandler] = []
  38. return_resource: bool
  39. retriever_from: str
  40. @classmethod
  41. def from_dataset(cls, dataset: Dataset, **kwargs):
  42. description = dataset.description
  43. if not description:
  44. description = 'useful for when you want to answer queries about the ' + dataset.name
  45. description = description.replace('\n', '').replace('\r', '')
  46. return cls(
  47. name=f'dataset-{dataset.id}',
  48. tenant_id=dataset.tenant_id,
  49. dataset_id=dataset.id,
  50. description=description,
  51. **kwargs
  52. )
  53. def _run(self, query: str) -> str:
  54. dataset = db.session.query(Dataset).filter(
  55. Dataset.tenant_id == self.tenant_id,
  56. Dataset.id == self.dataset_id
  57. ).first()
  58. if not dataset:
  59. return ''
  60. for hit_callback in self.hit_callbacks:
  61. hit_callback.on_query(query, dataset.id)
  62. # get retrieval model , if the model is not setting , using default
  63. retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
  64. if dataset.indexing_technique == "economy":
  65. # use keyword table query
  66. kw_table_index = KeywordTableIndex(
  67. dataset=dataset,
  68. config=KeywordTableConfig(
  69. max_keywords_per_chunk=5
  70. )
  71. )
  72. documents = kw_table_index.search(query, search_kwargs={'k': self.top_k})
  73. return str("\n".join([document.page_content for document in documents]))
  74. else:
  75. # get embedding model instance
  76. try:
  77. model_manager = ModelManager()
  78. embedding_model = model_manager.get_model_instance(
  79. tenant_id=dataset.tenant_id,
  80. provider=dataset.embedding_model_provider,
  81. model_type=ModelType.TEXT_EMBEDDING,
  82. model=dataset.embedding_model
  83. )
  84. except InvokeAuthorizationError:
  85. return ''
  86. embeddings = CacheEmbedding(embedding_model)
  87. documents = []
  88. threads = []
  89. if self.top_k > 0:
  90. # retrieval source with semantic
  91. if retrieval_model['search_method'] == 'semantic_search' or retrieval_model['search_method'] == 'hybrid_search':
  92. embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
  93. 'flask_app': current_app._get_current_object(),
  94. 'dataset_id': str(dataset.id),
  95. 'query': query,
  96. 'top_k': self.top_k,
  97. 'score_threshold': retrieval_model['score_threshold'] if retrieval_model[
  98. 'score_threshold_enabled'] else None,
  99. 'reranking_model': retrieval_model['reranking_model'] if retrieval_model[
  100. 'reranking_enable'] else None,
  101. 'all_documents': documents,
  102. 'search_method': retrieval_model['search_method'],
  103. 'embeddings': embeddings
  104. })
  105. threads.append(embedding_thread)
  106. embedding_thread.start()
  107. # retrieval_model source with full text
  108. if retrieval_model['search_method'] == 'full_text_search' or retrieval_model['search_method'] == 'hybrid_search':
  109. full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search, kwargs={
  110. 'flask_app': current_app._get_current_object(),
  111. 'dataset_id': str(dataset.id),
  112. 'query': query,
  113. 'search_method': retrieval_model['search_method'],
  114. 'embeddings': embeddings,
  115. 'score_threshold': retrieval_model['score_threshold'] if retrieval_model[
  116. 'score_threshold_enabled'] else None,
  117. 'top_k': self.top_k,
  118. 'reranking_model': retrieval_model['reranking_model'] if retrieval_model[
  119. 'reranking_enable'] else None,
  120. 'all_documents': documents
  121. })
  122. threads.append(full_text_index_thread)
  123. full_text_index_thread.start()
  124. for thread in threads:
  125. thread.join()
  126. # hybrid search: rerank after all documents have been searched
  127. if retrieval_model['search_method'] == 'hybrid_search':
  128. # get rerank model instance
  129. try:
  130. model_manager = ModelManager()
  131. rerank_model_instance = model_manager.get_model_instance(
  132. tenant_id=dataset.tenant_id,
  133. provider=retrieval_model['reranking_model']['reranking_provider_name'],
  134. model_type=ModelType.RERANK,
  135. model=retrieval_model['reranking_model']['reranking_model_name']
  136. )
  137. except InvokeAuthorizationError:
  138. return ''
  139. rerank_runner = RerankRunner(rerank_model_instance)
  140. documents = rerank_runner.run(
  141. query=query,
  142. documents=documents,
  143. score_threshold=retrieval_model['score_threshold'] if retrieval_model[
  144. 'score_threshold_enabled'] else None,
  145. top_n=self.top_k
  146. )
  147. else:
  148. documents = []
  149. for hit_callback in self.hit_callbacks:
  150. hit_callback.on_tool_end(documents)
  151. document_score_list = {}
  152. if dataset.indexing_technique != "economy":
  153. for item in documents:
  154. if 'score' in item.metadata and item.metadata['score']:
  155. document_score_list[item.metadata['doc_id']] = item.metadata['score']
  156. document_context_list = []
  157. index_node_ids = [document.metadata['doc_id'] for document in documents]
  158. segments = DocumentSegment.query.filter(DocumentSegment.dataset_id == self.dataset_id,
  159. DocumentSegment.completed_at.isnot(None),
  160. DocumentSegment.status == 'completed',
  161. DocumentSegment.enabled == True,
  162. DocumentSegment.index_node_id.in_(index_node_ids)
  163. ).all()
  164. if segments:
  165. index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)}
  166. sorted_segments = sorted(segments,
  167. key=lambda segment: index_node_id_to_position.get(segment.index_node_id,
  168. float('inf')))
  169. for segment in sorted_segments:
  170. if segment.answer:
  171. document_context_list.append(f'question:{segment.content} answer:{segment.answer}')
  172. else:
  173. document_context_list.append(segment.content)
  174. if self.return_resource:
  175. context_list = []
  176. resource_number = 1
  177. for segment in sorted_segments:
  178. context = {}
  179. document = Document.query.filter(Document.id == segment.document_id,
  180. Document.enabled == True,
  181. Document.archived == False,
  182. ).first()
  183. if dataset and document:
  184. source = {
  185. 'position': resource_number,
  186. 'dataset_id': dataset.id,
  187. 'dataset_name': dataset.name,
  188. 'document_id': document.id,
  189. 'document_name': document.name,
  190. 'data_source_type': document.data_source_type,
  191. 'segment_id': segment.id,
  192. 'retriever_from': self.retriever_from,
  193. 'score': document_score_list.get(segment.index_node_id, None)
  194. }
  195. if self.retriever_from == 'dev':
  196. source['hit_count'] = segment.hit_count
  197. source['word_count'] = segment.word_count
  198. source['segment_position'] = segment.position
  199. source['index_node_hash'] = segment.index_node_hash
  200. if segment.answer:
  201. source['content'] = f'question:{segment.content} \nanswer:{segment.answer}'
  202. else:
  203. source['content'] = segment.content
  204. context_list.append(source)
  205. resource_number += 1
  206. for hit_callback in self.hit_callbacks:
  207. hit_callback.return_retriever_resource_info(context_list)
  208. return str("\n".join(document_context_list))
  209. async def _arun(self, tool_input: str) -> str:
  210. raise NotImplementedError()