qdrant.py 68 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731
  1. """Wrapper around Qdrant vector database."""
  2. from __future__ import annotations
  3. import asyncio
  4. import functools
  5. import uuid
  6. import warnings
  7. from itertools import islice
  8. from operator import itemgetter
  9. from typing import (
  10. TYPE_CHECKING,
  11. Any,
  12. Callable,
  13. Dict,
  14. Generator,
  15. Iterable,
  16. List,
  17. Optional,
  18. Sequence,
  19. Tuple,
  20. Type,
  21. Union,
  22. )
  23. import numpy as np
  24. from langchain.docstore.document import Document
  25. from langchain.embeddings.base import Embeddings
  26. from langchain.vectorstores import VectorStore
  27. from langchain.vectorstores.utils import maximal_marginal_relevance
  28. from qdrant_client.http.models import PayloadSchemaType
  29. if TYPE_CHECKING:
  30. from qdrant_client import grpc # noqa
  31. from qdrant_client.conversions import common_types
  32. from qdrant_client.http import models as rest
  33. DictFilter = Dict[str, Union[str, int, bool, dict, list]]
  34. MetadataFilter = Union[DictFilter, common_types.Filter]
  35. class QdrantException(Exception):
  36. """Base class for all the Qdrant related exceptions"""
  37. def sync_call_fallback(method: Callable) -> Callable:
  38. """
  39. Decorator to call the synchronous method of the class if the async method is not
  40. implemented. This decorator might be only used for the methods that are defined
  41. as async in the class.
  42. """
  43. @functools.wraps(method)
  44. async def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
  45. try:
  46. return await method(self, *args, **kwargs)
  47. except NotImplementedError:
  48. # If the async method is not implemented, call the synchronous method
  49. # by removing the first letter from the method name. For example,
  50. # if the async method is called ``aaad_texts``, the synchronous method
  51. # will be called ``aad_texts``.
  52. sync_method = functools.partial(
  53. getattr(self, method.__name__[1:]), *args, **kwargs
  54. )
  55. return await asyncio.get_event_loop().run_in_executor(None, sync_method)
  56. return wrapper
  57. class Qdrant(VectorStore):
  58. """Wrapper around Qdrant vector database.
  59. To use you should have the ``qdrant-client`` package installed.
  60. Example:
  61. .. code-block:: python
  62. from qdrant_client import QdrantClient
  63. from langchain import Qdrant
  64. client = QdrantClient()
  65. collection_name = "MyCollection"
  66. qdrant = Qdrant(client, collection_name, embedding_function)
  67. """
  68. CONTENT_KEY = "page_content"
  69. METADATA_KEY = "metadata"
  70. GROUP_KEY = "group_id"
  71. VECTOR_NAME = None
  72. def __init__(
  73. self,
  74. client: Any,
  75. collection_name: str,
  76. embeddings: Optional[Embeddings] = None,
  77. content_payload_key: str = CONTENT_KEY,
  78. metadata_payload_key: str = METADATA_KEY,
  79. group_payload_key: str = GROUP_KEY,
  80. group_id: str = None,
  81. distance_strategy: str = "COSINE",
  82. vector_name: Optional[str] = VECTOR_NAME,
  83. embedding_function: Optional[Callable] = None, # deprecated
  84. is_new_collection: bool = False
  85. ):
  86. """Initialize with necessary components."""
  87. try:
  88. import qdrant_client
  89. except ImportError:
  90. raise ValueError(
  91. "Could not import qdrant-client python package. "
  92. "Please install it with `pip install qdrant-client`."
  93. )
  94. if not isinstance(client, qdrant_client.QdrantClient):
  95. raise ValueError(
  96. f"client should be an instance of qdrant_client.QdrantClient, "
  97. f"got {type(client)}"
  98. )
  99. if embeddings is None and embedding_function is None:
  100. raise ValueError(
  101. "`embeddings` value can't be None. Pass `Embeddings` instance."
  102. )
  103. if embeddings is not None and embedding_function is not None:
  104. raise ValueError(
  105. "Both `embeddings` and `embedding_function` are passed. "
  106. "Use `embeddings` only."
  107. )
  108. self._embeddings = embeddings
  109. self._embeddings_function = embedding_function
  110. self.client: qdrant_client.QdrantClient = client
  111. self.collection_name = collection_name
  112. self.content_payload_key = content_payload_key or self.CONTENT_KEY
  113. self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
  114. self.group_payload_key = group_payload_key or self.GROUP_KEY
  115. self.vector_name = vector_name or self.VECTOR_NAME
  116. self.group_id = group_id
  117. self.is_new_collection= is_new_collection
  118. if embedding_function is not None:
  119. warnings.warn(
  120. "Using `embedding_function` is deprecated. "
  121. "Pass `Embeddings` instance to `embeddings` instead."
  122. )
  123. if not isinstance(embeddings, Embeddings):
  124. warnings.warn(
  125. "`embeddings` should be an instance of `Embeddings`."
  126. "Using `embeddings` as `embedding_function` which is deprecated"
  127. )
  128. self._embeddings_function = embeddings
  129. self._embeddings = None
  130. self.distance_strategy = distance_strategy.upper()
  131. @property
  132. def embeddings(self) -> Optional[Embeddings]:
  133. return self._embeddings
  134. def add_texts(
  135. self,
  136. texts: Iterable[str],
  137. metadatas: Optional[List[dict]] = None,
  138. ids: Optional[Sequence[str]] = None,
  139. batch_size: int = 64,
  140. **kwargs: Any,
  141. ) -> List[str]:
  142. """Run more texts through the embeddings and add to the vectorstore.
  143. Args:
  144. texts: Iterable of strings to add to the vectorstore.
  145. metadatas: Optional list of metadatas associated with the texts.
  146. ids:
  147. Optional list of ids to associate with the texts. Ids have to be
  148. uuid-like strings.
  149. batch_size:
  150. How many vectors upload per-request.
  151. Default: 64
  152. group_id:
  153. collection group
  154. Returns:
  155. List of ids from adding the texts into the vectorstore.
  156. """
  157. added_ids = []
  158. for batch_ids, points in self._generate_rest_batches(
  159. texts, metadatas, ids, batch_size
  160. ):
  161. self.client.upsert(
  162. collection_name=self.collection_name, points=points, **kwargs
  163. )
  164. added_ids.extend(batch_ids)
  165. # if is new collection, create payload index on group_id
  166. if self.is_new_collection:
  167. self.client.create_payload_index(self.collection_name, self.group_payload_key,
  168. field_schema=PayloadSchemaType.KEYWORD,
  169. field_type=PayloadSchemaType.KEYWORD)
  170. return added_ids
  171. @sync_call_fallback
  172. async def aadd_texts(
  173. self,
  174. texts: Iterable[str],
  175. metadatas: Optional[List[dict]] = None,
  176. ids: Optional[Sequence[str]] = None,
  177. batch_size: int = 64,
  178. **kwargs: Any,
  179. ) -> List[str]:
  180. """Run more texts through the embeddings and add to the vectorstore.
  181. Args:
  182. texts: Iterable of strings to add to the vectorstore.
  183. metadatas: Optional list of metadatas associated with the texts.
  184. ids:
  185. Optional list of ids to associate with the texts. Ids have to be
  186. uuid-like strings.
  187. batch_size:
  188. How many vectors upload per-request.
  189. Default: 64
  190. Returns:
  191. List of ids from adding the texts into the vectorstore.
  192. """
  193. from qdrant_client import grpc # noqa
  194. from qdrant_client.conversions.conversion import RestToGrpc
  195. added_ids = []
  196. for batch_ids, points in self._generate_rest_batches(
  197. texts, metadatas, ids, batch_size
  198. ):
  199. await self.client.async_grpc_points.Upsert(
  200. grpc.UpsertPoints(
  201. collection_name=self.collection_name,
  202. points=[RestToGrpc.convert_point_struct(point) for point in points],
  203. )
  204. )
  205. added_ids.extend(batch_ids)
  206. return added_ids
  207. def similarity_search(
  208. self,
  209. query: str,
  210. k: int = 4,
  211. filter: Optional[MetadataFilter] = None,
  212. search_params: Optional[common_types.SearchParams] = None,
  213. offset: int = 0,
  214. score_threshold: Optional[float] = None,
  215. consistency: Optional[common_types.ReadConsistency] = None,
  216. **kwargs: Any,
  217. ) -> List[Tuple[Document, float]]:
  218. """Return docs most similar to query.
  219. Args:
  220. query: Text to look up documents similar to.
  221. k: Number of Documents to return. Defaults to 4.
  222. filter: Filter by metadata. Defaults to None.
  223. search_params: Additional search params
  224. offset:
  225. Offset of the first result to return.
  226. May be used to paginate results.
  227. Note: large offset values may cause performance issues.
  228. score_threshold:
  229. Define a minimal score threshold for the result.
  230. If defined, less similar results will not be returned.
  231. Score of the returned result might be higher or smaller than the
  232. threshold depending on the Distance function used.
  233. E.g. for cosine similarity only higher scores will be returned.
  234. consistency:
  235. Read consistency of the search. Defines how many replicas should be
  236. queried before returning the result.
  237. Values:
  238. - int - number of replicas to query, values should present in all
  239. queried replicas
  240. - 'majority' - query all replicas, but return values present in the
  241. majority of replicas
  242. - 'quorum' - query the majority of replicas, return values present in
  243. all of them
  244. - 'all' - query all replicas, and return values present in all replicas
  245. Returns:
  246. List of Documents most similar to the query.
  247. """
  248. results = self.similarity_search_with_score(
  249. query,
  250. k,
  251. filter=filter,
  252. search_params=search_params,
  253. offset=offset,
  254. score_threshold=score_threshold,
  255. consistency=consistency,
  256. **kwargs,
  257. )
  258. return list(map(itemgetter(0), results))
  259. @sync_call_fallback
  260. async def asimilarity_search(
  261. self,
  262. query: str,
  263. k: int = 4,
  264. filter: Optional[MetadataFilter] = None,
  265. **kwargs: Any,
  266. ) -> List[Document]:
  267. """Return docs most similar to query.
  268. Args:
  269. query: Text to look up documents similar to.
  270. k: Number of Documents to return. Defaults to 4.
  271. filter: Filter by metadata. Defaults to None.
  272. Returns:
  273. List of Documents most similar to the query.
  274. """
  275. results = await self.asimilarity_search_with_score(query, k, filter, **kwargs)
  276. return list(map(itemgetter(0), results))
  277. def similarity_search_with_score(
  278. self,
  279. query: str,
  280. k: int = 4,
  281. filter: Optional[MetadataFilter] = None,
  282. search_params: Optional[common_types.SearchParams] = None,
  283. offset: int = 0,
  284. score_threshold: Optional[float] = None,
  285. consistency: Optional[common_types.ReadConsistency] = None,
  286. **kwargs: Any,
  287. ) -> List[Tuple[Document, float]]:
  288. """Return docs most similar to query.
  289. Args:
  290. query: Text to look up documents similar to.
  291. k: Number of Documents to return. Defaults to 4.
  292. filter: Filter by metadata. Defaults to None.
  293. search_params: Additional search params
  294. offset:
  295. Offset of the first result to return.
  296. May be used to paginate results.
  297. Note: large offset values may cause performance issues.
  298. score_threshold:
  299. Define a minimal score threshold for the result.
  300. If defined, less similar results will not be returned.
  301. Score of the returned result might be higher or smaller than the
  302. threshold depending on the Distance function used.
  303. E.g. for cosine similarity only higher scores will be returned.
  304. consistency:
  305. Read consistency of the search. Defines how many replicas should be
  306. queried before returning the result.
  307. Values:
  308. - int - number of replicas to query, values should present in all
  309. queried replicas
  310. - 'majority' - query all replicas, but return values present in the
  311. majority of replicas
  312. - 'quorum' - query the majority of replicas, return values present in
  313. all of them
  314. - 'all' - query all replicas, and return values present in all replicas
  315. Returns:
  316. List of documents most similar to the query text and distance for each.
  317. """
  318. return self.similarity_search_with_score_by_vector(
  319. self._embed_query(query),
  320. k,
  321. filter=filter,
  322. search_params=search_params,
  323. offset=offset,
  324. score_threshold=score_threshold,
  325. consistency=consistency,
  326. **kwargs,
  327. )
  328. @sync_call_fallback
  329. async def asimilarity_search_with_score(
  330. self,
  331. query: str,
  332. k: int = 4,
  333. filter: Optional[MetadataFilter] = None,
  334. search_params: Optional[common_types.SearchParams] = None,
  335. offset: int = 0,
  336. score_threshold: Optional[float] = None,
  337. consistency: Optional[common_types.ReadConsistency] = None,
  338. **kwargs: Any,
  339. ) -> List[Tuple[Document, float]]:
  340. """Return docs most similar to query.
  341. Args:
  342. query: Text to look up documents similar to.
  343. k: Number of Documents to return. Defaults to 4.
  344. filter: Filter by metadata. Defaults to None.
  345. search_params: Additional search params
  346. offset:
  347. Offset of the first result to return.
  348. May be used to paginate results.
  349. Note: large offset values may cause performance issues.
  350. score_threshold:
  351. Define a minimal score threshold for the result.
  352. If defined, less similar results will not be returned.
  353. Score of the returned result might be higher or smaller than the
  354. threshold depending on the Distance function used.
  355. E.g. for cosine similarity only higher scores will be returned.
  356. consistency:
  357. Read consistency of the search. Defines how many replicas should be
  358. queried before returning the result.
  359. Values:
  360. - int - number of replicas to query, values should present in all
  361. queried replicas
  362. - 'majority' - query all replicas, but return values present in the
  363. majority of replicas
  364. - 'quorum' - query the majority of replicas, return values present in
  365. all of them
  366. - 'all' - query all replicas, and return values present in all replicas
  367. Returns:
  368. List of documents most similar to the query text and distance for each.
  369. """
  370. return await self.asimilarity_search_with_score_by_vector(
  371. self._embed_query(query),
  372. k,
  373. filter=filter,
  374. search_params=search_params,
  375. offset=offset,
  376. score_threshold=score_threshold,
  377. consistency=consistency,
  378. **kwargs,
  379. )
  380. def similarity_search_by_vector(
  381. self,
  382. embedding: List[float],
  383. k: int = 4,
  384. filter: Optional[MetadataFilter] = None,
  385. search_params: Optional[common_types.SearchParams] = None,
  386. offset: int = 0,
  387. score_threshold: Optional[float] = None,
  388. consistency: Optional[common_types.ReadConsistency] = None,
  389. **kwargs: Any,
  390. ) -> List[Document]:
  391. """Return docs most similar to embedding vector.
  392. Args:
  393. embedding: Embedding vector to look up documents similar to.
  394. k: Number of Documents to return. Defaults to 4.
  395. filter: Filter by metadata. Defaults to None.
  396. search_params: Additional search params
  397. offset:
  398. Offset of the first result to return.
  399. May be used to paginate results.
  400. Note: large offset values may cause performance issues.
  401. score_threshold:
  402. Define a minimal score threshold for the result.
  403. If defined, less similar results will not be returned.
  404. Score of the returned result might be higher or smaller than the
  405. threshold depending on the Distance function used.
  406. E.g. for cosine similarity only higher scores will be returned.
  407. consistency:
  408. Read consistency of the search. Defines how many replicas should be
  409. queried before returning the result.
  410. Values:
  411. - int - number of replicas to query, values should present in all
  412. queried replicas
  413. - 'majority' - query all replicas, but return values present in the
  414. majority of replicas
  415. - 'quorum' - query the majority of replicas, return values present in
  416. all of them
  417. - 'all' - query all replicas, and return values present in all replicas
  418. Returns:
  419. List of Documents most similar to the query.
  420. """
  421. results = self.similarity_search_with_score_by_vector(
  422. embedding,
  423. k,
  424. filter=filter,
  425. search_params=search_params,
  426. offset=offset,
  427. score_threshold=score_threshold,
  428. consistency=consistency,
  429. **kwargs,
  430. )
  431. return list(map(itemgetter(0), results))
  432. @sync_call_fallback
  433. async def asimilarity_search_by_vector(
  434. self,
  435. embedding: List[float],
  436. k: int = 4,
  437. filter: Optional[MetadataFilter] = None,
  438. search_params: Optional[common_types.SearchParams] = None,
  439. offset: int = 0,
  440. score_threshold: Optional[float] = None,
  441. consistency: Optional[common_types.ReadConsistency] = None,
  442. **kwargs: Any,
  443. ) -> List[Document]:
  444. """Return docs most similar to embedding vector.
  445. Args:
  446. embedding: Embedding vector to look up documents similar to.
  447. k: Number of Documents to return. Defaults to 4.
  448. filter: Filter by metadata. Defaults to None.
  449. search_params: Additional search params
  450. offset:
  451. Offset of the first result to return.
  452. May be used to paginate results.
  453. Note: large offset values may cause performance issues.
  454. score_threshold:
  455. Define a minimal score threshold for the result.
  456. If defined, less similar results will not be returned.
  457. Score of the returned result might be higher or smaller than the
  458. threshold depending on the Distance function used.
  459. E.g. for cosine similarity only higher scores will be returned.
  460. consistency:
  461. Read consistency of the search. Defines how many replicas should be
  462. queried before returning the result.
  463. Values:
  464. - int - number of replicas to query, values should present in all
  465. queried replicas
  466. - 'majority' - query all replicas, but return values present in the
  467. majority of replicas
  468. - 'quorum' - query the majority of replicas, return values present in
  469. all of them
  470. - 'all' - query all replicas, and return values present in all replicas
  471. Returns:
  472. List of Documents most similar to the query.
  473. """
  474. results = await self.asimilarity_search_with_score_by_vector(
  475. embedding,
  476. k,
  477. filter=filter,
  478. search_params=search_params,
  479. offset=offset,
  480. score_threshold=score_threshold,
  481. consistency=consistency,
  482. **kwargs,
  483. )
  484. return list(map(itemgetter(0), results))
  485. def similarity_search_with_score_by_vector(
  486. self,
  487. embedding: List[float],
  488. k: int = 4,
  489. filter: Optional[MetadataFilter] = None,
  490. search_params: Optional[common_types.SearchParams] = None,
  491. offset: int = 0,
  492. score_threshold: Optional[float] = None,
  493. consistency: Optional[common_types.ReadConsistency] = None,
  494. **kwargs: Any,
  495. ) -> List[Tuple[Document, float]]:
  496. """Return docs most similar to embedding vector.
  497. Args:
  498. embedding: Embedding vector to look up documents similar to.
  499. k: Number of Documents to return. Defaults to 4.
  500. filter: Filter by metadata. Defaults to None.
  501. search_params: Additional search params
  502. offset:
  503. Offset of the first result to return.
  504. May be used to paginate results.
  505. Note: large offset values may cause performance issues.
  506. score_threshold:
  507. Define a minimal score threshold for the result.
  508. If defined, less similar results will not be returned.
  509. Score of the returned result might be higher or smaller than the
  510. threshold depending on the Distance function used.
  511. E.g. for cosine similarity only higher scores will be returned.
  512. consistency:
  513. Read consistency of the search. Defines how many replicas should be
  514. queried before returning the result.
  515. Values:
  516. - int - number of replicas to query, values should present in all
  517. queried replicas
  518. - 'majority' - query all replicas, but return values present in the
  519. majority of replicas
  520. - 'quorum' - query the majority of replicas, return values present in
  521. all of them
  522. - 'all' - query all replicas, and return values present in all replicas
  523. Returns:
  524. List of documents most similar to the query text and distance for each.
  525. """
  526. if filter is not None and isinstance(filter, dict):
  527. warnings.warn(
  528. "Using dict as a `filter` is deprecated. Please use qdrant-client "
  529. "filters directly: "
  530. "https://qdrant.tech/documentation/concepts/filtering/",
  531. DeprecationWarning,
  532. )
  533. qdrant_filter = self._qdrant_filter_from_dict(filter)
  534. else:
  535. qdrant_filter = filter
  536. query_vector = embedding
  537. if self.vector_name is not None:
  538. query_vector = (self.vector_name, embedding) # type: ignore[assignment]
  539. results = self.client.search(
  540. collection_name=self.collection_name,
  541. query_vector=query_vector,
  542. query_filter=qdrant_filter,
  543. search_params=search_params,
  544. limit=k,
  545. offset=offset,
  546. with_payload=True,
  547. with_vectors=True, # Langchain does not expect vectors to be returned
  548. score_threshold=score_threshold,
  549. consistency=consistency,
  550. **kwargs,
  551. )
  552. return [
  553. (
  554. self._document_from_scored_point(
  555. result, self.content_payload_key, self.metadata_payload_key
  556. ),
  557. result.score,
  558. )
  559. for result in results
  560. ]
  561. @sync_call_fallback
  562. async def asimilarity_search_with_score_by_vector(
  563. self,
  564. embedding: List[float],
  565. k: int = 4,
  566. filter: Optional[MetadataFilter] = None,
  567. search_params: Optional[common_types.SearchParams] = None,
  568. offset: int = 0,
  569. score_threshold: Optional[float] = None,
  570. consistency: Optional[common_types.ReadConsistency] = None,
  571. **kwargs: Any,
  572. ) -> List[Tuple[Document, float]]:
  573. """Return docs most similar to embedding vector.
  574. Args:
  575. embedding: Embedding vector to look up documents similar to.
  576. k: Number of Documents to return. Defaults to 4.
  577. filter: Filter by metadata. Defaults to None.
  578. search_params: Additional search params
  579. offset:
  580. Offset of the first result to return.
  581. May be used to paginate results.
  582. Note: large offset values may cause performance issues.
  583. score_threshold:
  584. Define a minimal score threshold for the result.
  585. If defined, less similar results will not be returned.
  586. Score of the returned result might be higher or smaller than the
  587. threshold depending on the Distance function used.
  588. E.g. for cosine similarity only higher scores will be returned.
  589. consistency:
  590. Read consistency of the search. Defines how many replicas should be
  591. queried before returning the result.
  592. Values:
  593. - int - number of replicas to query, values should present in all
  594. queried replicas
  595. - 'majority' - query all replicas, but return values present in the
  596. majority of replicas
  597. - 'quorum' - query the majority of replicas, return values present in
  598. all of them
  599. - 'all' - query all replicas, and return values present in all replicas
  600. Returns:
  601. List of documents most similar to the query text and distance for each.
  602. """
  603. from qdrant_client import grpc # noqa
  604. from qdrant_client.conversions.conversion import RestToGrpc
  605. from qdrant_client.http import models as rest
  606. if filter is not None and isinstance(filter, dict):
  607. warnings.warn(
  608. "Using dict as a `filter` is deprecated. Please use qdrant-client "
  609. "filters directly: "
  610. "https://qdrant.tech/documentation/concepts/filtering/",
  611. DeprecationWarning,
  612. )
  613. qdrant_filter = self._qdrant_filter_from_dict(filter)
  614. else:
  615. qdrant_filter = filter
  616. if qdrant_filter is not None and isinstance(qdrant_filter, rest.Filter):
  617. qdrant_filter = RestToGrpc.convert_filter(qdrant_filter)
  618. response = await self.client.async_grpc_points.Search(
  619. grpc.SearchPoints(
  620. collection_name=self.collection_name,
  621. vector_name=self.vector_name,
  622. vector=embedding,
  623. filter=qdrant_filter,
  624. params=search_params,
  625. limit=k,
  626. offset=offset,
  627. with_payload=grpc.WithPayloadSelector(enable=True),
  628. with_vectors=grpc.WithVectorsSelector(enable=False),
  629. score_threshold=score_threshold,
  630. read_consistency=consistency,
  631. **kwargs,
  632. )
  633. )
  634. return [
  635. (
  636. self._document_from_scored_point_grpc(
  637. result, self.content_payload_key, self.metadata_payload_key
  638. ),
  639. result.score,
  640. )
  641. for result in response.result
  642. ]
  643. def max_marginal_relevance_search(
  644. self,
  645. query: str,
  646. k: int = 4,
  647. fetch_k: int = 20,
  648. lambda_mult: float = 0.5,
  649. **kwargs: Any,
  650. ) -> List[Document]:
  651. """Return docs selected using the maximal marginal relevance.
  652. Maximal marginal relevance optimizes for similarity to query AND diversity
  653. among selected documents.
  654. Args:
  655. query: Text to look up documents similar to.
  656. k: Number of Documents to return. Defaults to 4.
  657. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  658. Defaults to 20.
  659. lambda_mult: Number between 0 and 1 that determines the degree
  660. of diversity among the results with 0 corresponding
  661. to maximum diversity and 1 to minimum diversity.
  662. Defaults to 0.5.
  663. Returns:
  664. List of Documents selected by maximal marginal relevance.
  665. """
  666. query_embedding = self._embed_query(query)
  667. return self.max_marginal_relevance_search_by_vector(
  668. query_embedding, k, fetch_k, lambda_mult, **kwargs
  669. )
  670. @sync_call_fallback
  671. async def amax_marginal_relevance_search(
  672. self,
  673. query: str,
  674. k: int = 4,
  675. fetch_k: int = 20,
  676. lambda_mult: float = 0.5,
  677. **kwargs: Any,
  678. ) -> List[Document]:
  679. """Return docs selected using the maximal marginal relevance.
  680. Maximal marginal relevance optimizes for similarity to query AND diversity
  681. among selected documents.
  682. Args:
  683. query: Text to look up documents similar to.
  684. k: Number of Documents to return. Defaults to 4.
  685. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  686. Defaults to 20.
  687. lambda_mult: Number between 0 and 1 that determines the degree
  688. of diversity among the results with 0 corresponding
  689. to maximum diversity and 1 to minimum diversity.
  690. Defaults to 0.5.
  691. Returns:
  692. List of Documents selected by maximal marginal relevance.
  693. """
  694. query_embedding = self._embed_query(query)
  695. return await self.amax_marginal_relevance_search_by_vector(
  696. query_embedding, k, fetch_k, lambda_mult, **kwargs
  697. )
  698. def max_marginal_relevance_search_by_vector(
  699. self,
  700. embedding: List[float],
  701. k: int = 4,
  702. fetch_k: int = 20,
  703. lambda_mult: float = 0.5,
  704. **kwargs: Any,
  705. ) -> List[Document]:
  706. """Return docs selected using the maximal marginal relevance.
  707. Maximal marginal relevance optimizes for similarity to query AND diversity
  708. among selected documents.
  709. Args:
  710. embedding: Embedding to look up documents similar to.
  711. k: Number of Documents to return. Defaults to 4.
  712. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  713. lambda_mult: Number between 0 and 1 that determines the degree
  714. of diversity among the results with 0 corresponding
  715. to maximum diversity and 1 to minimum diversity.
  716. Defaults to 0.5.
  717. Returns:
  718. List of Documents selected by maximal marginal relevance.
  719. """
  720. results = self.max_marginal_relevance_search_with_score_by_vector(
  721. embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs
  722. )
  723. return list(map(itemgetter(0), results))
  724. @sync_call_fallback
  725. async def amax_marginal_relevance_search_by_vector(
  726. self,
  727. embedding: List[float],
  728. k: int = 4,
  729. fetch_k: int = 20,
  730. lambda_mult: float = 0.5,
  731. **kwargs: Any,
  732. ) -> List[Document]:
  733. """Return docs selected using the maximal marginal relevance.
  734. Maximal marginal relevance optimizes for similarity to query AND diversity
  735. among selected documents.
  736. Args:
  737. query: Text to look up documents similar to.
  738. k: Number of Documents to return. Defaults to 4.
  739. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  740. Defaults to 20.
  741. lambda_mult: Number between 0 and 1 that determines the degree
  742. of diversity among the results with 0 corresponding
  743. to maximum diversity and 1 to minimum diversity.
  744. Defaults to 0.5.
  745. Returns:
  746. List of Documents selected by maximal marginal relevance and distance for
  747. each.
  748. """
  749. results = await self.amax_marginal_relevance_search_with_score_by_vector(
  750. embedding, k, fetch_k, lambda_mult, **kwargs
  751. )
  752. return list(map(itemgetter(0), results))
  753. def max_marginal_relevance_search_with_score_by_vector(
  754. self,
  755. embedding: List[float],
  756. k: int = 4,
  757. fetch_k: int = 20,
  758. lambda_mult: float = 0.5,
  759. **kwargs: Any,
  760. ) -> List[Tuple[Document, float]]:
  761. """Return docs selected using the maximal marginal relevance.
  762. Maximal marginal relevance optimizes for similarity to query AND diversity
  763. among selected documents.
  764. Args:
  765. query: Text to look up documents similar to.
  766. k: Number of Documents to return. Defaults to 4.
  767. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  768. Defaults to 20.
  769. lambda_mult: Number between 0 and 1 that determines the degree
  770. of diversity among the results with 0 corresponding
  771. to maximum diversity and 1 to minimum diversity.
  772. Defaults to 0.5.
  773. Returns:
  774. List of Documents selected by maximal marginal relevance and distance for
  775. each.
  776. """
  777. query_vector = embedding
  778. if self.vector_name is not None:
  779. query_vector = (self.vector_name, query_vector) # type: ignore[assignment]
  780. results = self.client.search(
  781. collection_name=self.collection_name,
  782. query_vector=query_vector,
  783. with_payload=True,
  784. with_vectors=True,
  785. limit=fetch_k,
  786. )
  787. embeddings = [
  788. result.vector.get(self.vector_name) # type: ignore[index, union-attr]
  789. if self.vector_name is not None
  790. else result.vector
  791. for result in results
  792. ]
  793. mmr_selected = maximal_marginal_relevance(
  794. np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult
  795. )
  796. return [
  797. (
  798. self._document_from_scored_point(
  799. results[i], self.content_payload_key, self.metadata_payload_key
  800. ),
  801. results[i].score,
  802. )
  803. for i in mmr_selected
  804. ]
  805. @sync_call_fallback
  806. async def amax_marginal_relevance_search_with_score_by_vector(
  807. self,
  808. embedding: List[float],
  809. k: int = 4,
  810. fetch_k: int = 20,
  811. lambda_mult: float = 0.5,
  812. **kwargs: Any,
  813. ) -> List[Tuple[Document, float]]:
  814. """Return docs selected using the maximal marginal relevance.
  815. Maximal marginal relevance optimizes for similarity to query AND diversity
  816. among selected documents.
  817. Args:
  818. query: Text to look up documents similar to.
  819. k: Number of Documents to return. Defaults to 4.
  820. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
  821. Defaults to 20.
  822. lambda_mult: Number between 0 and 1 that determines the degree
  823. of diversity among the results with 0 corresponding
  824. to maximum diversity and 1 to minimum diversity.
  825. Defaults to 0.5.
  826. Returns:
  827. List of Documents selected by maximal marginal relevance and distance for
  828. each.
  829. """
  830. from qdrant_client import grpc # noqa
  831. from qdrant_client.conversions.conversion import GrpcToRest
  832. response = await self.client.async_grpc_points.Search(
  833. grpc.SearchPoints(
  834. collection_name=self.collection_name,
  835. vector_name=self.vector_name,
  836. vector=embedding,
  837. with_payload=grpc.WithPayloadSelector(enable=True),
  838. with_vectors=grpc.WithVectorsSelector(enable=True),
  839. limit=fetch_k,
  840. )
  841. )
  842. results = [
  843. GrpcToRest.convert_vectors(result.vectors) for result in response.result
  844. ]
  845. embeddings: List[List[float]] = [
  846. result.get(self.vector_name) # type: ignore
  847. if isinstance(result, dict)
  848. else result
  849. for result in results
  850. ]
  851. mmr_selected: List[int] = maximal_marginal_relevance(
  852. np.array(embedding),
  853. embeddings,
  854. k=k,
  855. lambda_mult=lambda_mult,
  856. )
  857. return [
  858. (
  859. self._document_from_scored_point_grpc(
  860. response.result[i],
  861. self.content_payload_key,
  862. self.metadata_payload_key,
  863. ),
  864. response.result[i].score,
  865. )
  866. for i in mmr_selected
  867. ]
  868. def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
  869. """Delete by vector ID or other criteria.
  870. Args:
  871. ids: List of ids to delete.
  872. **kwargs: Other keyword arguments that subclasses might use.
  873. Returns:
  874. Optional[bool]: True if deletion is successful,
  875. False otherwise, None if not implemented.
  876. """
  877. from qdrant_client.http import models as rest
  878. result = self.client.delete(
  879. collection_name=self.collection_name,
  880. points_selector=ids,
  881. )
  882. return result.status == rest.UpdateStatus.COMPLETED
  883. @classmethod
  884. def from_texts(
  885. cls: Type[Qdrant],
  886. texts: List[str],
  887. embedding: Embeddings,
  888. metadatas: Optional[List[dict]] = None,
  889. ids: Optional[Sequence[str]] = None,
  890. location: Optional[str] = None,
  891. url: Optional[str] = None,
  892. port: Optional[int] = 6333,
  893. grpc_port: int = 6334,
  894. prefer_grpc: bool = False,
  895. https: Optional[bool] = None,
  896. api_key: Optional[str] = None,
  897. prefix: Optional[str] = None,
  898. timeout: Optional[float] = None,
  899. host: Optional[str] = None,
  900. path: Optional[str] = None,
  901. collection_name: Optional[str] = None,
  902. distance_func: str = "Cosine",
  903. content_payload_key: str = CONTENT_KEY,
  904. metadata_payload_key: str = METADATA_KEY,
  905. group_payload_key: str = GROUP_KEY,
  906. group_id: str = None,
  907. vector_name: Optional[str] = VECTOR_NAME,
  908. batch_size: int = 64,
  909. shard_number: Optional[int] = None,
  910. replication_factor: Optional[int] = None,
  911. write_consistency_factor: Optional[int] = None,
  912. on_disk_payload: Optional[bool] = None,
  913. hnsw_config: Optional[common_types.HnswConfigDiff] = None,
  914. optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
  915. wal_config: Optional[common_types.WalConfigDiff] = None,
  916. quantization_config: Optional[common_types.QuantizationConfig] = None,
  917. init_from: Optional[common_types.InitFrom] = None,
  918. force_recreate: bool = False,
  919. **kwargs: Any,
  920. ) -> Qdrant:
  921. """Construct Qdrant wrapper from a list of texts.
  922. Args:
  923. texts: A list of texts to be indexed in Qdrant.
  924. embedding: A subclass of `Embeddings`, responsible for text vectorization.
  925. metadatas:
  926. An optional list of metadata. If provided it has to be of the same
  927. length as a list of texts.
  928. ids:
  929. Optional list of ids to associate with the texts. Ids have to be
  930. uuid-like strings.
  931. location:
  932. If `:memory:` - use in-memory Qdrant instance.
  933. If `str` - use it as a `url` parameter.
  934. If `None` - fallback to relying on `host` and `port` parameters.
  935. url: either host or str of "Optional[scheme], host, Optional[port],
  936. Optional[prefix]". Default: `None`
  937. port: Port of the REST API interface. Default: 6333
  938. grpc_port: Port of the gRPC interface. Default: 6334
  939. prefer_grpc:
  940. If true - use gPRC interface whenever possible in custom methods.
  941. Default: False
  942. https: If true - use HTTPS(SSL) protocol. Default: None
  943. api_key: API key for authentication in Qdrant Cloud. Default: None
  944. prefix:
  945. If not None - add prefix to the REST URL path.
  946. Example: service/v1 will result in
  947. http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
  948. Default: None
  949. timeout:
  950. Timeout for REST and gRPC API requests.
  951. Default: 5.0 seconds for REST and unlimited for gRPC
  952. host:
  953. Host name of Qdrant service. If url and host are None, set to
  954. 'localhost'. Default: None
  955. path:
  956. Path in which the vectors will be stored while using local mode.
  957. Default: None
  958. collection_name:
  959. Name of the Qdrant collection to be used. If not provided,
  960. it will be created randomly. Default: None
  961. distance_func:
  962. Distance function. One of: "Cosine" / "Euclid" / "Dot".
  963. Default: "Cosine"
  964. content_payload_key:
  965. A payload key used to store the content of the document.
  966. Default: "page_content"
  967. metadata_payload_key:
  968. A payload key used to store the metadata of the document.
  969. Default: "metadata"
  970. group_payload_key:
  971. A payload key used to store the content of the document.
  972. Default: "group_id"
  973. group_id:
  974. collection group id
  975. vector_name:
  976. Name of the vector to be used internally in Qdrant.
  977. Default: None
  978. batch_size:
  979. How many vectors upload per-request.
  980. Default: 64
  981. shard_number: Number of shards in collection. Default is 1, minimum is 1.
  982. replication_factor:
  983. Replication factor for collection. Default is 1, minimum is 1.
  984. Defines how many copies of each shard will be created.
  985. Have effect only in distributed mode.
  986. write_consistency_factor:
  987. Write consistency factor for collection. Default is 1, minimum is 1.
  988. Defines how many replicas should apply the operation for us to consider
  989. it successful. Increasing this number will make the collection more
  990. resilient to inconsistencies, but will also make it fail if not enough
  991. replicas are available.
  992. Does not have any performance impact.
  993. Have effect only in distributed mode.
  994. on_disk_payload:
  995. If true - point`s payload will not be stored in memory.
  996. It will be read from the disk every time it is requested.
  997. This setting saves RAM by (slightly) increasing the response time.
  998. Note: those payload values that are involved in filtering and are
  999. indexed - remain in RAM.
  1000. hnsw_config: Params for HNSW index
  1001. optimizers_config: Params for optimizer
  1002. wal_config: Params for Write-Ahead-Log
  1003. quantization_config:
  1004. Params for quantization, if None - quantization will be disabled
  1005. init_from:
  1006. Use data stored in another collection to initialize this collection
  1007. force_recreate:
  1008. Force recreating the collection
  1009. **kwargs:
  1010. Additional arguments passed directly into REST client initialization
  1011. This is a user-friendly interface that:
  1012. 1. Creates embeddings, one for each text
  1013. 2. Initializes the Qdrant database as an in-memory docstore by default
  1014. (and overridable to a remote docstore)
  1015. 3. Adds the text embeddings to the Qdrant database
  1016. This is intended to be a quick way to get started.
  1017. Example:
  1018. .. code-block:: python
  1019. from langchain import Qdrant
  1020. from langchain.embeddings import OpenAIEmbeddings
  1021. embeddings = OpenAIEmbeddings()
  1022. qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
  1023. """
  1024. qdrant = cls._construct_instance(
  1025. texts,
  1026. embedding,
  1027. metadatas,
  1028. ids,
  1029. location,
  1030. url,
  1031. port,
  1032. grpc_port,
  1033. prefer_grpc,
  1034. https,
  1035. api_key,
  1036. prefix,
  1037. timeout,
  1038. host,
  1039. path,
  1040. collection_name,
  1041. distance_func,
  1042. content_payload_key,
  1043. metadata_payload_key,
  1044. group_payload_key,
  1045. group_id,
  1046. vector_name,
  1047. shard_number,
  1048. replication_factor,
  1049. write_consistency_factor,
  1050. on_disk_payload,
  1051. hnsw_config,
  1052. optimizers_config,
  1053. wal_config,
  1054. quantization_config,
  1055. init_from,
  1056. force_recreate,
  1057. **kwargs,
  1058. )
  1059. qdrant.add_texts(texts, metadatas, ids, batch_size)
  1060. return qdrant
  1061. @classmethod
  1062. @sync_call_fallback
  1063. async def afrom_texts(
  1064. cls: Type[Qdrant],
  1065. texts: List[str],
  1066. embedding: Embeddings,
  1067. metadatas: Optional[List[dict]] = None,
  1068. ids: Optional[Sequence[str]] = None,
  1069. location: Optional[str] = None,
  1070. url: Optional[str] = None,
  1071. port: Optional[int] = 6333,
  1072. grpc_port: int = 6334,
  1073. prefer_grpc: bool = False,
  1074. https: Optional[bool] = None,
  1075. api_key: Optional[str] = None,
  1076. prefix: Optional[str] = None,
  1077. timeout: Optional[float] = None,
  1078. host: Optional[str] = None,
  1079. path: Optional[str] = None,
  1080. collection_name: Optional[str] = None,
  1081. distance_func: str = "Cosine",
  1082. content_payload_key: str = CONTENT_KEY,
  1083. metadata_payload_key: str = METADATA_KEY,
  1084. vector_name: Optional[str] = VECTOR_NAME,
  1085. batch_size: int = 64,
  1086. shard_number: Optional[int] = None,
  1087. replication_factor: Optional[int] = None,
  1088. write_consistency_factor: Optional[int] = None,
  1089. on_disk_payload: Optional[bool] = None,
  1090. hnsw_config: Optional[common_types.HnswConfigDiff] = None,
  1091. optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
  1092. wal_config: Optional[common_types.WalConfigDiff] = None,
  1093. quantization_config: Optional[common_types.QuantizationConfig] = None,
  1094. init_from: Optional[common_types.InitFrom] = None,
  1095. force_recreate: bool = False,
  1096. **kwargs: Any,
  1097. ) -> Qdrant:
  1098. """Construct Qdrant wrapper from a list of texts.
  1099. Args:
  1100. texts: A list of texts to be indexed in Qdrant.
  1101. embedding: A subclass of `Embeddings`, responsible for text vectorization.
  1102. metadatas:
  1103. An optional list of metadata. If provided it has to be of the same
  1104. length as a list of texts.
  1105. ids:
  1106. Optional list of ids to associate with the texts. Ids have to be
  1107. uuid-like strings.
  1108. location:
  1109. If `:memory:` - use in-memory Qdrant instance.
  1110. If `str` - use it as a `url` parameter.
  1111. If `None` - fallback to relying on `host` and `port` parameters.
  1112. url: either host or str of "Optional[scheme], host, Optional[port],
  1113. Optional[prefix]". Default: `None`
  1114. port: Port of the REST API interface. Default: 6333
  1115. grpc_port: Port of the gRPC interface. Default: 6334
  1116. prefer_grpc:
  1117. If true - use gPRC interface whenever possible in custom methods.
  1118. Default: False
  1119. https: If true - use HTTPS(SSL) protocol. Default: None
  1120. api_key: API key for authentication in Qdrant Cloud. Default: None
  1121. prefix:
  1122. If not None - add prefix to the REST URL path.
  1123. Example: service/v1 will result in
  1124. http://localhost:6333/service/v1/{qdrant-endpoint} for REST API.
  1125. Default: None
  1126. timeout:
  1127. Timeout for REST and gRPC API requests.
  1128. Default: 5.0 seconds for REST and unlimited for gRPC
  1129. host:
  1130. Host name of Qdrant service. If url and host are None, set to
  1131. 'localhost'. Default: None
  1132. path:
  1133. Path in which the vectors will be stored while using local mode.
  1134. Default: None
  1135. collection_name:
  1136. Name of the Qdrant collection to be used. If not provided,
  1137. it will be created randomly. Default: None
  1138. distance_func:
  1139. Distance function. One of: "Cosine" / "Euclid" / "Dot".
  1140. Default: "Cosine"
  1141. content_payload_key:
  1142. A payload key used to store the content of the document.
  1143. Default: "page_content"
  1144. metadata_payload_key:
  1145. A payload key used to store the metadata of the document.
  1146. Default: "metadata"
  1147. vector_name:
  1148. Name of the vector to be used internally in Qdrant.
  1149. Default: None
  1150. batch_size:
  1151. How many vectors upload per-request.
  1152. Default: 64
  1153. shard_number: Number of shards in collection. Default is 1, minimum is 1.
  1154. replication_factor:
  1155. Replication factor for collection. Default is 1, minimum is 1.
  1156. Defines how many copies of each shard will be created.
  1157. Have effect only in distributed mode.
  1158. write_consistency_factor:
  1159. Write consistency factor for collection. Default is 1, minimum is 1.
  1160. Defines how many replicas should apply the operation for us to consider
  1161. it successful. Increasing this number will make the collection more
  1162. resilient to inconsistencies, but will also make it fail if not enough
  1163. replicas are available.
  1164. Does not have any performance impact.
  1165. Have effect only in distributed mode.
  1166. on_disk_payload:
  1167. If true - point`s payload will not be stored in memory.
  1168. It will be read from the disk every time it is requested.
  1169. This setting saves RAM by (slightly) increasing the response time.
  1170. Note: those payload values that are involved in filtering and are
  1171. indexed - remain in RAM.
  1172. hnsw_config: Params for HNSW index
  1173. optimizers_config: Params for optimizer
  1174. wal_config: Params for Write-Ahead-Log
  1175. quantization_config:
  1176. Params for quantization, if None - quantization will be disabled
  1177. init_from:
  1178. Use data stored in another collection to initialize this collection
  1179. force_recreate:
  1180. Force recreating the collection
  1181. **kwargs:
  1182. Additional arguments passed directly into REST client initialization
  1183. This is a user-friendly interface that:
  1184. 1. Creates embeddings, one for each text
  1185. 2. Initializes the Qdrant database as an in-memory docstore by default
  1186. (and overridable to a remote docstore)
  1187. 3. Adds the text embeddings to the Qdrant database
  1188. This is intended to be a quick way to get started.
  1189. Example:
  1190. .. code-block:: python
  1191. from langchain import Qdrant
  1192. from langchain.embeddings import OpenAIEmbeddings
  1193. embeddings = OpenAIEmbeddings()
  1194. qdrant = await Qdrant.afrom_texts(texts, embeddings, "localhost")
  1195. """
  1196. qdrant = cls._construct_instance(
  1197. texts,
  1198. embedding,
  1199. metadatas,
  1200. ids,
  1201. location,
  1202. url,
  1203. port,
  1204. grpc_port,
  1205. prefer_grpc,
  1206. https,
  1207. api_key,
  1208. prefix,
  1209. timeout,
  1210. host,
  1211. path,
  1212. collection_name,
  1213. distance_func,
  1214. content_payload_key,
  1215. metadata_payload_key,
  1216. vector_name,
  1217. shard_number,
  1218. replication_factor,
  1219. write_consistency_factor,
  1220. on_disk_payload,
  1221. hnsw_config,
  1222. optimizers_config,
  1223. wal_config,
  1224. quantization_config,
  1225. init_from,
  1226. force_recreate,
  1227. **kwargs,
  1228. )
  1229. await qdrant.aadd_texts(texts, metadatas, ids, batch_size)
  1230. return qdrant
  1231. @classmethod
  1232. def _construct_instance(
  1233. cls: Type[Qdrant],
  1234. texts: List[str],
  1235. embedding: Embeddings,
  1236. metadatas: Optional[List[dict]] = None,
  1237. ids: Optional[Sequence[str]] = None,
  1238. location: Optional[str] = None,
  1239. url: Optional[str] = None,
  1240. port: Optional[int] = 6333,
  1241. grpc_port: int = 6334,
  1242. prefer_grpc: bool = False,
  1243. https: Optional[bool] = None,
  1244. api_key: Optional[str] = None,
  1245. prefix: Optional[str] = None,
  1246. timeout: Optional[float] = None,
  1247. host: Optional[str] = None,
  1248. path: Optional[str] = None,
  1249. collection_name: Optional[str] = None,
  1250. distance_func: str = "Cosine",
  1251. content_payload_key: str = CONTENT_KEY,
  1252. metadata_payload_key: str = METADATA_KEY,
  1253. group_payload_key: str = GROUP_KEY,
  1254. group_id: str = None,
  1255. vector_name: Optional[str] = VECTOR_NAME,
  1256. shard_number: Optional[int] = None,
  1257. replication_factor: Optional[int] = None,
  1258. write_consistency_factor: Optional[int] = None,
  1259. on_disk_payload: Optional[bool] = None,
  1260. hnsw_config: Optional[common_types.HnswConfigDiff] = None,
  1261. optimizers_config: Optional[common_types.OptimizersConfigDiff] = None,
  1262. wal_config: Optional[common_types.WalConfigDiff] = None,
  1263. quantization_config: Optional[common_types.QuantizationConfig] = None,
  1264. init_from: Optional[common_types.InitFrom] = None,
  1265. force_recreate: bool = False,
  1266. **kwargs: Any,
  1267. ) -> Qdrant:
  1268. try:
  1269. import qdrant_client
  1270. except ImportError:
  1271. raise ValueError(
  1272. "Could not import qdrant-client python package. "
  1273. "Please install it with `pip install qdrant-client`."
  1274. )
  1275. from grpc import RpcError
  1276. from qdrant_client.http import models as rest
  1277. from qdrant_client.http.exceptions import UnexpectedResponse
  1278. # Just do a single quick embedding to get vector size
  1279. partial_embeddings = embedding.embed_documents(texts[:1])
  1280. vector_size = len(partial_embeddings[0])
  1281. collection_name = collection_name or uuid.uuid4().hex
  1282. distance_func = distance_func.upper()
  1283. is_new_collection = False
  1284. client = qdrant_client.QdrantClient(
  1285. location=location,
  1286. url=url,
  1287. port=port,
  1288. grpc_port=grpc_port,
  1289. prefer_grpc=prefer_grpc,
  1290. https=https,
  1291. api_key=api_key,
  1292. prefix=prefix,
  1293. timeout=timeout,
  1294. host=host,
  1295. path=path,
  1296. **kwargs,
  1297. )
  1298. all_collection_name = []
  1299. collections_response = client.get_collections()
  1300. collection_list = collections_response.collections
  1301. for collection in collection_list:
  1302. all_collection_name.append(collection.name)
  1303. if collection_name not in all_collection_name:
  1304. vectors_config = rest.VectorParams(
  1305. size=vector_size,
  1306. distance=rest.Distance[distance_func],
  1307. )
  1308. # If vector name was provided, we're going to use the named vectors feature
  1309. # with just a single vector.
  1310. if vector_name is not None:
  1311. vectors_config = { # type: ignore[assignment]
  1312. vector_name: vectors_config,
  1313. }
  1314. client.recreate_collection(
  1315. collection_name=collection_name,
  1316. vectors_config=vectors_config,
  1317. shard_number=shard_number,
  1318. replication_factor=replication_factor,
  1319. write_consistency_factor=write_consistency_factor,
  1320. on_disk_payload=on_disk_payload,
  1321. hnsw_config=hnsw_config,
  1322. optimizers_config=optimizers_config,
  1323. wal_config=wal_config,
  1324. quantization_config=quantization_config,
  1325. init_from=init_from,
  1326. timeout=timeout, # type: ignore[arg-type]
  1327. )
  1328. is_new_collection = True
  1329. if force_recreate:
  1330. raise ValueError
  1331. # Get the vector configuration of the existing collection and vector, if it
  1332. # was specified. If the old configuration does not match the current one,
  1333. # an exception is being thrown.
  1334. collection_info = client.get_collection(collection_name=collection_name)
  1335. current_vector_config = collection_info.config.params.vectors
  1336. if isinstance(current_vector_config, dict) and vector_name is not None:
  1337. if vector_name not in current_vector_config:
  1338. raise QdrantException(
  1339. f"Existing Qdrant collection {collection_name} does not "
  1340. f"contain vector named {vector_name}. Did you mean one of the "
  1341. f"existing vectors: {', '.join(current_vector_config.keys())}? "
  1342. f"If you want to recreate the collection, set `force_recreate` "
  1343. f"parameter to `True`."
  1344. )
  1345. current_vector_config = current_vector_config.get(
  1346. vector_name
  1347. ) # type: ignore[assignment]
  1348. elif isinstance(current_vector_config, dict) and vector_name is None:
  1349. raise QdrantException(
  1350. f"Existing Qdrant collection {collection_name} uses named vectors. "
  1351. f"If you want to reuse it, please set `vector_name` to any of the "
  1352. f"existing named vectors: "
  1353. f"{', '.join(current_vector_config.keys())}." # noqa
  1354. f"If you want to recreate the collection, set `force_recreate` "
  1355. f"parameter to `True`."
  1356. )
  1357. elif (
  1358. not isinstance(current_vector_config, dict) and vector_name is not None
  1359. ):
  1360. raise QdrantException(
  1361. f"Existing Qdrant collection {collection_name} doesn't use named "
  1362. f"vectors. If you want to reuse it, please set `vector_name` to "
  1363. f"`None`. If you want to recreate the collection, set "
  1364. f"`force_recreate` parameter to `True`."
  1365. )
  1366. # Check if the vector configuration has the same dimensionality.
  1367. if current_vector_config.size != vector_size: # type: ignore[union-attr]
  1368. raise QdrantException(
  1369. f"Existing Qdrant collection is configured for vectors with "
  1370. f"{current_vector_config.size} " # type: ignore[union-attr]
  1371. f"dimensions. Selected embeddings are {vector_size}-dimensional. "
  1372. f"If you want to recreate the collection, set `force_recreate` "
  1373. f"parameter to `True`."
  1374. )
  1375. current_distance_func = (
  1376. current_vector_config.distance.name.upper() # type: ignore[union-attr]
  1377. )
  1378. if current_distance_func != distance_func:
  1379. raise QdrantException(
  1380. f"Existing Qdrant collection is configured for "
  1381. f"{current_vector_config.distance} " # type: ignore[union-attr]
  1382. f"similarity. Please set `distance_func` parameter to "
  1383. f"`{distance_func}` if you want to reuse it. If you want to "
  1384. f"recreate the collection, set `force_recreate` parameter to "
  1385. f"`True`."
  1386. )
  1387. qdrant = cls(
  1388. client=client,
  1389. collection_name=collection_name,
  1390. embeddings=embedding,
  1391. content_payload_key=content_payload_key,
  1392. metadata_payload_key=metadata_payload_key,
  1393. distance_strategy=distance_func,
  1394. vector_name=vector_name,
  1395. group_id=group_id,
  1396. group_payload_key=group_payload_key,
  1397. is_new_collection=is_new_collection
  1398. )
  1399. return qdrant
  1400. def _select_relevance_score_fn(self) -> Callable[[float], float]:
  1401. """
  1402. The 'correct' relevance function
  1403. may differ depending on a few things, including:
  1404. - the distance / similarity metric used by the VectorStore
  1405. - the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
  1406. - embedding dimensionality
  1407. - etc.
  1408. """
  1409. if self.distance_strategy == "COSINE":
  1410. return self._cosine_relevance_score_fn
  1411. elif self.distance_strategy == "DOT":
  1412. return self._max_inner_product_relevance_score_fn
  1413. elif self.distance_strategy == "EUCLID":
  1414. return self._euclidean_relevance_score_fn
  1415. else:
  1416. raise ValueError(
  1417. "Unknown distance strategy, must be cosine, "
  1418. "max_inner_product, or euclidean"
  1419. )
  1420. def _similarity_search_with_relevance_scores(
  1421. self,
  1422. query: str,
  1423. k: int = 4,
  1424. **kwargs: Any,
  1425. ) -> List[Tuple[Document, float]]:
  1426. """Return docs and relevance scores in the range [0, 1].
  1427. 0 is dissimilar, 1 is most similar.
  1428. Args:
  1429. query: input text
  1430. k: Number of Documents to return. Defaults to 4.
  1431. **kwargs: kwargs to be passed to similarity search. Should include:
  1432. score_threshold: Optional, a floating point value between 0 to 1 to
  1433. filter the resulting set of retrieved docs
  1434. Returns:
  1435. List of Tuples of (doc, similarity_score)
  1436. """
  1437. return self.similarity_search_with_score(query, k, **kwargs)
  1438. @classmethod
  1439. def _build_payloads(
  1440. cls,
  1441. texts: Iterable[str],
  1442. metadatas: Optional[List[dict]],
  1443. content_payload_key: str,
  1444. metadata_payload_key: str,
  1445. group_id: str,
  1446. group_payload_key: str
  1447. ) -> List[dict]:
  1448. payloads = []
  1449. for i, text in enumerate(texts):
  1450. if text is None:
  1451. raise ValueError(
  1452. "At least one of the texts is None. Please remove it before "
  1453. "calling .from_texts or .add_texts on Qdrant instance."
  1454. )
  1455. metadata = metadatas[i] if metadatas is not None else None
  1456. payloads.append(
  1457. {
  1458. content_payload_key: text,
  1459. metadata_payload_key: metadata,
  1460. group_payload_key: group_id
  1461. }
  1462. )
  1463. return payloads
  1464. @classmethod
  1465. def _document_from_scored_point(
  1466. cls,
  1467. scored_point: Any,
  1468. content_payload_key: str,
  1469. metadata_payload_key: str,
  1470. ) -> Document:
  1471. return Document(
  1472. page_content=scored_point.payload.get(content_payload_key),
  1473. metadata=scored_point.payload.get(metadata_payload_key) or {},
  1474. )
  1475. @classmethod
  1476. def _document_from_scored_point_grpc(
  1477. cls,
  1478. scored_point: Any,
  1479. content_payload_key: str,
  1480. metadata_payload_key: str,
  1481. ) -> Document:
  1482. from qdrant_client.conversions.conversion import grpc_to_payload
  1483. payload = grpc_to_payload(scored_point.payload)
  1484. return Document(
  1485. page_content=payload[content_payload_key],
  1486. metadata=payload.get(metadata_payload_key) or {},
  1487. )
  1488. def _build_condition(self, key: str, value: Any) -> List[rest.FieldCondition]:
  1489. from qdrant_client.http import models as rest
  1490. out = []
  1491. if isinstance(value, dict):
  1492. for _key, value in value.items():
  1493. out.extend(self._build_condition(f"{key}.{_key}", value))
  1494. elif isinstance(value, list):
  1495. for _value in value:
  1496. if isinstance(_value, dict):
  1497. out.extend(self._build_condition(f"{key}[]", _value))
  1498. else:
  1499. out.extend(self._build_condition(f"{key}", _value))
  1500. else:
  1501. out.append(
  1502. rest.FieldCondition(
  1503. key=key,
  1504. match=rest.MatchValue(value=value),
  1505. )
  1506. )
  1507. return out
  1508. def _qdrant_filter_from_dict(
  1509. self, filter: Optional[DictFilter]
  1510. ) -> Optional[rest.Filter]:
  1511. from qdrant_client.http import models as rest
  1512. if not filter:
  1513. return None
  1514. return rest.Filter(
  1515. must=[
  1516. condition
  1517. for key, value in filter.items()
  1518. for condition in self._build_condition(key, value)
  1519. ]
  1520. )
  1521. def _embed_query(self, query: str) -> List[float]:
  1522. """Embed query text.
  1523. Used to provide backward compatibility with `embedding_function` argument.
  1524. Args:
  1525. query: Query text.
  1526. Returns:
  1527. List of floats representing the query embedding.
  1528. """
  1529. if self.embeddings is not None:
  1530. embedding = self.embeddings.embed_query(query)
  1531. else:
  1532. if self._embeddings_function is not None:
  1533. embedding = self._embeddings_function(query)
  1534. else:
  1535. raise ValueError("Neither of embeddings or embedding_function is set")
  1536. return embedding.tolist() if hasattr(embedding, "tolist") else embedding
  1537. def _embed_texts(self, texts: Iterable[str]) -> List[List[float]]:
  1538. """Embed search texts.
  1539. Used to provide backward compatibility with `embedding_function` argument.
  1540. Args:
  1541. texts: Iterable of texts to embed.
  1542. Returns:
  1543. List of floats representing the texts embedding.
  1544. """
  1545. if self.embeddings is not None:
  1546. embeddings = self.embeddings.embed_documents(list(texts))
  1547. if hasattr(embeddings, "tolist"):
  1548. embeddings = embeddings.tolist()
  1549. elif self._embeddings_function is not None:
  1550. embeddings = []
  1551. for text in texts:
  1552. embedding = self._embeddings_function(text)
  1553. if hasattr(embeddings, "tolist"):
  1554. embedding = embedding.tolist()
  1555. embeddings.append(embedding)
  1556. else:
  1557. raise ValueError("Neither of embeddings or embedding_function is set")
  1558. return embeddings
  1559. def _generate_rest_batches(
  1560. self,
  1561. texts: Iterable[str],
  1562. metadatas: Optional[List[dict]] = None,
  1563. ids: Optional[Sequence[str]] = None,
  1564. batch_size: int = 64,
  1565. group_id: Optional[str] = None,
  1566. ) -> Generator[Tuple[List[str], List[rest.PointStruct]], None, None]:
  1567. from qdrant_client.http import models as rest
  1568. texts_iterator = iter(texts)
  1569. metadatas_iterator = iter(metadatas or [])
  1570. ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
  1571. while batch_texts := list(islice(texts_iterator, batch_size)):
  1572. # Take the corresponding metadata and id for each text in a batch
  1573. batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
  1574. batch_ids = list(islice(ids_iterator, batch_size))
  1575. # Generate the embeddings for all the texts in a batch
  1576. batch_embeddings = self._embed_texts(batch_texts)
  1577. points = [
  1578. rest.PointStruct(
  1579. id=point_id,
  1580. vector=vector
  1581. if self.vector_name is None
  1582. else {self.vector_name: vector},
  1583. payload=payload,
  1584. )
  1585. for point_id, vector, payload in zip(
  1586. batch_ids,
  1587. batch_embeddings,
  1588. self._build_payloads(
  1589. batch_texts,
  1590. batch_metadatas,
  1591. self.content_payload_key,
  1592. self.metadata_payload_key,
  1593. self.group_id,
  1594. self.group_payload_key
  1595. ),
  1596. )
  1597. ]
  1598. yield batch_ids, points