dataset_multi_retriever_tool.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. import json
  2. import threading
  3. from typing import Type, Optional, List
  4. from flask import current_app, Flask
  5. from langchain.tools import BaseTool
  6. from pydantic import Field, BaseModel
  7. from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
  8. from core.conversation_message_task import ConversationMessageTask
  9. from core.embedding.cached_embedding import CacheEmbedding
  10. from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig
  11. from core.model_providers.error import LLMBadRequestError, ProviderTokenNotInitError
  12. from core.model_providers.model_factory import ModelFactory
  13. from extensions.ext_database import db
  14. from models.dataset import Dataset, DocumentSegment, Document
  15. from services.retrieval_service import RetrievalService
  16. default_retrieval_model = {
  17. 'search_method': 'semantic_search',
  18. 'reranking_enable': False,
  19. 'reranking_model': {
  20. 'reranking_provider_name': '',
  21. 'reranking_model_name': ''
  22. },
  23. 'top_k': 2,
  24. 'score_threshold_enable': False
  25. }
  26. class DatasetMultiRetrieverToolInput(BaseModel):
  27. query: str = Field(..., description="dataset multi retriever and rerank")
  28. class DatasetMultiRetrieverTool(BaseTool):
  29. """Tool for querying multi dataset."""
  30. name: str = "dataset-"
  31. args_schema: Type[BaseModel] = DatasetMultiRetrieverToolInput
  32. description: str = "dataset multi retriever and rerank. "
  33. tenant_id: str
  34. dataset_ids: List[str]
  35. top_k: int = 2
  36. score_threshold: Optional[float] = None
  37. reranking_provider_name: str
  38. reranking_model_name: str
  39. conversation_message_task: ConversationMessageTask
  40. return_resource: bool
  41. retriever_from: str
  42. @classmethod
  43. def from_dataset(cls, dataset_ids: List[str], tenant_id: str, **kwargs):
  44. return cls(
  45. name=f'dataset-{tenant_id}',
  46. tenant_id=tenant_id,
  47. dataset_ids=dataset_ids,
  48. **kwargs
  49. )
  50. def _run(self, query: str) -> str:
  51. threads = []
  52. all_documents = []
  53. for dataset_id in self.dataset_ids:
  54. retrieval_thread = threading.Thread(target=self._retriever, kwargs={
  55. 'flask_app': current_app._get_current_object(),
  56. 'dataset_id': dataset_id,
  57. 'query': query,
  58. 'all_documents': all_documents
  59. })
  60. threads.append(retrieval_thread)
  61. retrieval_thread.start()
  62. for thread in threads:
  63. thread.join()
  64. # do rerank for searched documents
  65. rerank = ModelFactory.get_reranking_model(
  66. tenant_id=self.tenant_id,
  67. model_provider_name=self.reranking_provider_name,
  68. model_name=self.reranking_model_name
  69. )
  70. all_documents = rerank.rerank(query, all_documents, self.score_threshold, self.top_k)
  71. hit_callback = DatasetIndexToolCallbackHandler(self.conversation_message_task)
  72. hit_callback.on_tool_end(all_documents)
  73. document_context_list = []
  74. index_node_ids = [document.metadata['doc_id'] for document in all_documents]
  75. segments = DocumentSegment.query.filter(
  76. DocumentSegment.completed_at.isnot(None),
  77. DocumentSegment.status == 'completed',
  78. DocumentSegment.enabled == True,
  79. DocumentSegment.index_node_id.in_(index_node_ids)
  80. ).all()
  81. if segments:
  82. index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)}
  83. sorted_segments = sorted(segments,
  84. key=lambda segment: index_node_id_to_position.get(segment.index_node_id,
  85. float('inf')))
  86. for segment in sorted_segments:
  87. if segment.answer:
  88. document_context_list.append(f'question:{segment.content} answer:{segment.answer}')
  89. else:
  90. document_context_list.append(segment.content)
  91. if self.return_resource:
  92. context_list = []
  93. resource_number = 1
  94. for segment in sorted_segments:
  95. dataset = Dataset.query.filter_by(
  96. id=segment.dataset_id
  97. ).first()
  98. document = Document.query.filter(Document.id == segment.document_id,
  99. Document.enabled == True,
  100. Document.archived == False,
  101. ).first()
  102. if dataset and document:
  103. source = {
  104. 'position': resource_number,
  105. 'dataset_id': dataset.id,
  106. 'dataset_name': dataset.name,
  107. 'document_id': document.id,
  108. 'document_name': document.name,
  109. 'data_source_type': document.data_source_type,
  110. 'segment_id': segment.id,
  111. 'retriever_from': self.retriever_from
  112. }
  113. if self.retriever_from == 'dev':
  114. source['hit_count'] = segment.hit_count
  115. source['word_count'] = segment.word_count
  116. source['segment_position'] = segment.position
  117. source['index_node_hash'] = segment.index_node_hash
  118. if segment.answer:
  119. source['content'] = f'question:{segment.content} \nanswer:{segment.answer}'
  120. else:
  121. source['content'] = segment.content
  122. context_list.append(source)
  123. resource_number += 1
  124. hit_callback.return_retriever_resource_info(context_list)
  125. return str("\n".join(document_context_list))
  126. async def _arun(self, tool_input: str) -> str:
  127. raise NotImplementedError()
  128. def _retriever(self, flask_app: Flask, dataset_id: str, query: str, all_documents: List):
  129. with flask_app.app_context():
  130. dataset = db.session.query(Dataset).filter(
  131. Dataset.tenant_id == self.tenant_id,
  132. Dataset.id == dataset_id
  133. ).first()
  134. if not dataset:
  135. return []
  136. # get retrieval model , if the model is not setting , using default
  137. retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
  138. if dataset.indexing_technique == "economy":
  139. # use keyword table query
  140. kw_table_index = KeywordTableIndex(
  141. dataset=dataset,
  142. config=KeywordTableConfig(
  143. max_keywords_per_chunk=5
  144. )
  145. )
  146. documents = kw_table_index.search(query, search_kwargs={'k': self.top_k})
  147. if documents:
  148. all_documents.extend(documents)
  149. else:
  150. try:
  151. embedding_model = ModelFactory.get_embedding_model(
  152. tenant_id=dataset.tenant_id,
  153. model_provider_name=dataset.embedding_model_provider,
  154. model_name=dataset.embedding_model
  155. )
  156. except LLMBadRequestError:
  157. return []
  158. except ProviderTokenNotInitError:
  159. return []
  160. embeddings = CacheEmbedding(embedding_model)
  161. documents = []
  162. threads = []
  163. if self.top_k > 0:
  164. # retrieval_model source with semantic
  165. if retrieval_model['search_method'] == 'semantic_search' or retrieval_model[
  166. 'search_method'] == 'hybrid_search':
  167. embedding_thread = threading.Thread(target=RetrievalService.embedding_search, kwargs={
  168. 'flask_app': current_app._get_current_object(),
  169. 'dataset': dataset,
  170. 'query': query,
  171. 'top_k': self.top_k,
  172. 'score_threshold': self.score_threshold,
  173. 'reranking_model': None,
  174. 'all_documents': documents,
  175. 'search_method': 'hybrid_search',
  176. 'embeddings': embeddings
  177. })
  178. threads.append(embedding_thread)
  179. embedding_thread.start()
  180. # retrieval_model source with full text
  181. if retrieval_model['search_method'] == 'full_text_search' or retrieval_model[
  182. 'search_method'] == 'hybrid_search':
  183. full_text_index_thread = threading.Thread(target=RetrievalService.full_text_index_search,
  184. kwargs={
  185. 'flask_app': current_app._get_current_object(),
  186. 'dataset': dataset,
  187. 'query': query,
  188. 'search_method': 'hybrid_search',
  189. 'embeddings': embeddings,
  190. 'score_threshold': retrieval_model[
  191. 'score_threshold'] if retrieval_model[
  192. 'score_threshold_enable'] else None,
  193. 'top_k': self.top_k,
  194. 'reranking_model': retrieval_model[
  195. 'reranking_model'] if retrieval_model[
  196. 'reranking_enable'] else None,
  197. 'all_documents': documents
  198. })
  199. threads.append(full_text_index_thread)
  200. full_text_index_thread.start()
  201. for thread in threads:
  202. thread.join()
  203. all_documents.extend(documents)