retrieval_service.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. import concurrent.futures
  2. import json
  3. from typing import Optional
  4. from flask import Flask, current_app
  5. from sqlalchemy.orm import load_only
  6. from configs import dify_config
  7. from core.rag.data_post_processor.data_post_processor import DataPostProcessor
  8. from core.rag.datasource.keyword.keyword_factory import Keyword
  9. from core.rag.datasource.vdb.vector_factory import Vector
  10. from core.rag.embedding.retrieval import RetrievalSegments
  11. from core.rag.index_processor.constant.index_type import IndexType
  12. from core.rag.models.document import Document
  13. from core.rag.rerank.rerank_type import RerankMode
  14. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  15. from extensions.ext_database import db
  16. from models.dataset import ChildChunk, Dataset, DocumentSegment
  17. from models.dataset import Document as DatasetDocument
  18. from services.external_knowledge_service import ExternalDatasetService
  19. default_retrieval_model = {
  20. "search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
  21. "reranking_enable": False,
  22. "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""},
  23. "top_k": 2,
  24. "score_threshold_enabled": False,
  25. }
  26. class RetrievalService:
  27. # Cache precompiled regular expressions to avoid repeated compilation
  28. @classmethod
  29. def retrieve(
  30. cls,
  31. retrieval_method: str,
  32. dataset_id: str,
  33. query: str,
  34. top_k: int,
  35. score_threshold: Optional[float] = 0.0,
  36. reranking_model: Optional[dict] = None,
  37. reranking_mode: str = "reranking_model",
  38. weights: Optional[dict] = None,
  39. ):
  40. if not query:
  41. return []
  42. dataset = cls._get_dataset(dataset_id)
  43. if not dataset or dataset.available_document_count == 0 or dataset.available_segment_count == 0:
  44. return []
  45. all_documents: list[Document] = []
  46. exceptions: list[str] = []
  47. # Optimize multithreading with thread pools
  48. with concurrent.futures.ThreadPoolExecutor(max_workers=dify_config.RETRIEVAL_SERVICE_WORKER) as executor: # type: ignore
  49. futures = []
  50. if retrieval_method == "keyword_search":
  51. futures.append(
  52. executor.submit(
  53. cls.keyword_search,
  54. flask_app=current_app._get_current_object(), # type: ignore
  55. dataset_id=dataset_id,
  56. query=query,
  57. top_k=top_k,
  58. all_documents=all_documents,
  59. exceptions=exceptions,
  60. )
  61. )
  62. if RetrievalMethod.is_support_semantic_search(retrieval_method):
  63. futures.append(
  64. executor.submit(
  65. cls.embedding_search,
  66. flask_app=current_app._get_current_object(), # type: ignore
  67. dataset_id=dataset_id,
  68. query=query,
  69. top_k=top_k,
  70. score_threshold=score_threshold,
  71. reranking_model=reranking_model,
  72. all_documents=all_documents,
  73. retrieval_method=retrieval_method,
  74. exceptions=exceptions,
  75. )
  76. )
  77. if RetrievalMethod.is_support_fulltext_search(retrieval_method):
  78. futures.append(
  79. executor.submit(
  80. cls.full_text_index_search,
  81. flask_app=current_app._get_current_object(), # type: ignore
  82. dataset_id=dataset_id,
  83. query=query,
  84. top_k=top_k,
  85. score_threshold=score_threshold,
  86. reranking_model=reranking_model,
  87. all_documents=all_documents,
  88. retrieval_method=retrieval_method,
  89. exceptions=exceptions,
  90. )
  91. )
  92. concurrent.futures.wait(futures, timeout=30, return_when=concurrent.futures.ALL_COMPLETED)
  93. if exceptions:
  94. raise ValueError(";\n".join(exceptions))
  95. if retrieval_method == RetrievalMethod.HYBRID_SEARCH.value:
  96. data_post_processor = DataPostProcessor(
  97. str(dataset.tenant_id), reranking_mode, reranking_model, weights, False
  98. )
  99. all_documents = data_post_processor.invoke(
  100. query=query,
  101. documents=all_documents,
  102. score_threshold=score_threshold,
  103. top_n=top_k,
  104. )
  105. return all_documents
  106. @classmethod
  107. def external_retrieve(cls, dataset_id: str, query: str, external_retrieval_model: Optional[dict] = None):
  108. dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
  109. if not dataset:
  110. return []
  111. all_documents = ExternalDatasetService.fetch_external_knowledge_retrieval(
  112. dataset.tenant_id, dataset_id, query, external_retrieval_model or {}
  113. )
  114. return all_documents
  115. @classmethod
  116. def _get_dataset(cls, dataset_id: str) -> Optional[Dataset]:
  117. return db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
  118. @classmethod
  119. def keyword_search(
  120. cls, flask_app: Flask, dataset_id: str, query: str, top_k: int, all_documents: list, exceptions: list
  121. ):
  122. with flask_app.app_context():
  123. try:
  124. dataset = cls._get_dataset(dataset_id)
  125. if not dataset:
  126. raise ValueError("dataset not found")
  127. keyword = Keyword(dataset=dataset)
  128. documents = keyword.search(cls.escape_query_for_search(query), top_k=top_k)
  129. all_documents.extend(documents)
  130. except Exception as e:
  131. exceptions.append(str(e))
  132. @classmethod
  133. def embedding_search(
  134. cls,
  135. flask_app: Flask,
  136. dataset_id: str,
  137. query: str,
  138. top_k: int,
  139. score_threshold: Optional[float],
  140. reranking_model: Optional[dict],
  141. all_documents: list,
  142. retrieval_method: str,
  143. exceptions: list,
  144. ):
  145. with flask_app.app_context():
  146. try:
  147. dataset = cls._get_dataset(dataset_id)
  148. if not dataset:
  149. raise ValueError("dataset not found")
  150. vector = Vector(dataset=dataset)
  151. documents = vector.search_by_vector(
  152. query,
  153. search_type="similarity_score_threshold",
  154. top_k=top_k,
  155. score_threshold=score_threshold,
  156. filter={"group_id": [dataset.id]},
  157. )
  158. if documents:
  159. if (
  160. reranking_model
  161. and reranking_model.get("reranking_model_name")
  162. and reranking_model.get("reranking_provider_name")
  163. and retrieval_method == RetrievalMethod.SEMANTIC_SEARCH.value
  164. ):
  165. data_post_processor = DataPostProcessor(
  166. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  167. )
  168. all_documents.extend(
  169. data_post_processor.invoke(
  170. query=query,
  171. documents=documents,
  172. score_threshold=score_threshold,
  173. top_n=len(documents),
  174. )
  175. )
  176. else:
  177. all_documents.extend(documents)
  178. except Exception as e:
  179. exceptions.append(str(e))
  180. @classmethod
  181. def full_text_index_search(
  182. cls,
  183. flask_app: Flask,
  184. dataset_id: str,
  185. query: str,
  186. top_k: int,
  187. score_threshold: Optional[float],
  188. reranking_model: Optional[dict],
  189. all_documents: list,
  190. retrieval_method: str,
  191. exceptions: list,
  192. ):
  193. with flask_app.app_context():
  194. try:
  195. dataset = cls._get_dataset(dataset_id)
  196. if not dataset:
  197. raise ValueError("dataset not found")
  198. vector_processor = Vector(dataset=dataset)
  199. documents = vector_processor.search_by_full_text(cls.escape_query_for_search(query), top_k=top_k)
  200. if documents:
  201. if (
  202. reranking_model
  203. and reranking_model.get("reranking_model_name")
  204. and reranking_model.get("reranking_provider_name")
  205. and retrieval_method == RetrievalMethod.FULL_TEXT_SEARCH.value
  206. ):
  207. data_post_processor = DataPostProcessor(
  208. str(dataset.tenant_id), str(RerankMode.RERANKING_MODEL.value), reranking_model, None, False
  209. )
  210. all_documents.extend(
  211. data_post_processor.invoke(
  212. query=query,
  213. documents=documents,
  214. score_threshold=score_threshold,
  215. top_n=len(documents),
  216. )
  217. )
  218. else:
  219. all_documents.extend(documents)
  220. except Exception as e:
  221. exceptions.append(str(e))
  222. @staticmethod
  223. def escape_query_for_search(query: str) -> str:
  224. return json.dumps(query).strip('"')
  225. @classmethod
  226. def format_retrieval_documents(cls, documents: list[Document]) -> list[RetrievalSegments]:
  227. """Format retrieval documents with optimized batch processing"""
  228. if not documents:
  229. return []
  230. try:
  231. # Collect document IDs
  232. document_ids = {doc.metadata.get("document_id") for doc in documents if "document_id" in doc.metadata}
  233. if not document_ids:
  234. return []
  235. # Batch query dataset documents
  236. dataset_documents = {
  237. doc.id: doc
  238. for doc in db.session.query(DatasetDocument)
  239. .filter(DatasetDocument.id.in_(document_ids))
  240. .options(load_only(DatasetDocument.id, DatasetDocument.doc_form, DatasetDocument.dataset_id))
  241. .all()
  242. }
  243. records = []
  244. include_segment_ids = set()
  245. segment_child_map = {}
  246. # Process documents
  247. for document in documents:
  248. document_id = document.metadata.get("document_id")
  249. if document_id not in dataset_documents:
  250. continue
  251. dataset_document = dataset_documents[document_id]
  252. if dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX:
  253. # Handle parent-child documents
  254. child_index_node_id = document.metadata.get("doc_id")
  255. child_chunk = (
  256. db.session.query(ChildChunk).filter(ChildChunk.index_node_id == child_index_node_id).first()
  257. )
  258. if not child_chunk:
  259. continue
  260. segment = (
  261. db.session.query(DocumentSegment)
  262. .filter(
  263. DocumentSegment.dataset_id == dataset_document.dataset_id,
  264. DocumentSegment.enabled == True,
  265. DocumentSegment.status == "completed",
  266. DocumentSegment.id == child_chunk.segment_id,
  267. )
  268. .options(
  269. load_only(
  270. DocumentSegment.id,
  271. DocumentSegment.content,
  272. DocumentSegment.answer,
  273. )
  274. )
  275. .first()
  276. )
  277. if not segment:
  278. continue
  279. if segment.id not in include_segment_ids:
  280. include_segment_ids.add(segment.id)
  281. child_chunk_detail = {
  282. "id": child_chunk.id,
  283. "content": child_chunk.content,
  284. "position": child_chunk.position,
  285. "score": document.metadata.get("score", 0.0),
  286. }
  287. map_detail = {
  288. "max_score": document.metadata.get("score", 0.0),
  289. "child_chunks": [child_chunk_detail],
  290. }
  291. segment_child_map[segment.id] = map_detail
  292. record = {
  293. "segment": segment,
  294. }
  295. records.append(record)
  296. else:
  297. child_chunk_detail = {
  298. "id": child_chunk.id,
  299. "content": child_chunk.content,
  300. "position": child_chunk.position,
  301. "score": document.metadata.get("score", 0.0),
  302. }
  303. segment_child_map[segment.id]["child_chunks"].append(child_chunk_detail)
  304. segment_child_map[segment.id]["max_score"] = max(
  305. segment_child_map[segment.id]["max_score"], document.metadata.get("score", 0.0)
  306. )
  307. else:
  308. # Handle normal documents
  309. index_node_id = document.metadata.get("doc_id")
  310. if not index_node_id:
  311. continue
  312. segment = (
  313. db.session.query(DocumentSegment)
  314. .filter(
  315. DocumentSegment.dataset_id == dataset_document.dataset_id,
  316. DocumentSegment.enabled == True,
  317. DocumentSegment.status == "completed",
  318. DocumentSegment.index_node_id == index_node_id,
  319. )
  320. .first()
  321. )
  322. if not segment:
  323. continue
  324. include_segment_ids.add(segment.id)
  325. record = {
  326. "segment": segment,
  327. "score": document.metadata.get("score"), # type: ignore
  328. }
  329. records.append(record)
  330. # Add child chunks information to records
  331. for record in records:
  332. if record["segment"].id in segment_child_map:
  333. record["child_chunks"] = segment_child_map[record["segment"].id].get("child_chunks") # type: ignore
  334. record["score"] = segment_child_map[record["segment"].id]["max_score"]
  335. return [RetrievalSegments(**record) for record in records]
  336. except Exception as e:
  337. db.session.rollback()
  338. raise e