datasets.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. import flask_restful
  2. from flask import request
  3. from flask_login import current_user
  4. from flask_restful import Resource, marshal, marshal_with, reqparse
  5. from werkzeug.exceptions import Forbidden, NotFound
  6. import services
  7. from configs import dify_config
  8. from controllers.console import api
  9. from controllers.console.apikey import api_key_fields, api_key_list
  10. from controllers.console.app.error import ProviderNotInitializeError
  11. from controllers.console.datasets.error import DatasetInUseError, DatasetNameDuplicateError, IndexingEstimateError
  12. from controllers.console.setup import setup_required
  13. from controllers.console.wraps import account_initialization_required
  14. from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
  15. from core.indexing_runner import IndexingRunner
  16. from core.model_runtime.entities.model_entities import ModelType
  17. from core.provider_manager import ProviderManager
  18. from core.rag.datasource.vdb.vector_type import VectorType
  19. from core.rag.extractor.entity.extract_setting import ExtractSetting
  20. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  21. from extensions.ext_database import db
  22. from fields.app_fields import related_app_list
  23. from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
  24. from fields.document_fields import document_status_fields
  25. from libs.login import login_required
  26. from models import ApiToken, Dataset, Document, DocumentSegment, UploadFile
  27. from models.dataset import DatasetPermissionEnum
  28. from services.dataset_service import DatasetPermissionService, DatasetService, DocumentService
  29. def _validate_name(name):
  30. if not name or len(name) < 1 or len(name) > 40:
  31. raise ValueError("Name must be between 1 to 40 characters.")
  32. return name
  33. def _validate_description_length(description):
  34. if len(description) > 400:
  35. raise ValueError("Description cannot exceed 400 characters.")
  36. return description
  37. class DatasetListApi(Resource):
  38. @setup_required
  39. @login_required
  40. @account_initialization_required
  41. def get(self):
  42. page = request.args.get("page", default=1, type=int)
  43. limit = request.args.get("limit", default=20, type=int)
  44. ids = request.args.getlist("ids")
  45. # provider = request.args.get("provider", default="vendor")
  46. search = request.args.get("keyword", default=None, type=str)
  47. tag_ids = request.args.getlist("tag_ids")
  48. if ids:
  49. datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)
  50. else:
  51. datasets, total = DatasetService.get_datasets(
  52. page, limit, current_user.current_tenant_id, current_user, search, tag_ids
  53. )
  54. # check embedding setting
  55. provider_manager = ProviderManager()
  56. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  57. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  58. model_names = []
  59. for embedding_model in embedding_models:
  60. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  61. data = marshal(datasets, dataset_detail_fields)
  62. for item in data:
  63. if item["indexing_technique"] == "high_quality":
  64. item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
  65. if item_model in model_names:
  66. item["embedding_available"] = True
  67. else:
  68. item["embedding_available"] = False
  69. else:
  70. item["embedding_available"] = True
  71. if item.get("permission") == "partial_members":
  72. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(item["id"])
  73. item.update({"partial_member_list": part_users_list})
  74. else:
  75. item.update({"partial_member_list": []})
  76. response = {"data": data, "has_more": len(datasets) == limit, "limit": limit, "total": total, "page": page}
  77. return response, 200
  78. @setup_required
  79. @login_required
  80. @account_initialization_required
  81. def post(self):
  82. parser = reqparse.RequestParser()
  83. parser.add_argument(
  84. "name",
  85. nullable=False,
  86. required=True,
  87. help="type is required. Name must be between 1 to 40 characters.",
  88. type=_validate_name,
  89. )
  90. parser.add_argument(
  91. "description",
  92. type=str,
  93. nullable=True,
  94. required=False,
  95. default="",
  96. )
  97. parser.add_argument(
  98. "indexing_technique",
  99. type=str,
  100. location="json",
  101. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  102. nullable=True,
  103. help="Invalid indexing technique.",
  104. )
  105. parser.add_argument(
  106. "external_knowledge_api_id",
  107. type=str,
  108. nullable=True,
  109. required=False,
  110. )
  111. parser.add_argument(
  112. "provider",
  113. type=str,
  114. nullable=True,
  115. choices=Dataset.PROVIDER_LIST,
  116. required=False,
  117. default="vendor",
  118. )
  119. parser.add_argument(
  120. "external_knowledge_id",
  121. type=str,
  122. nullable=True,
  123. required=False,
  124. )
  125. args = parser.parse_args()
  126. # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
  127. if not current_user.is_dataset_editor:
  128. raise Forbidden()
  129. try:
  130. dataset = DatasetService.create_empty_dataset(
  131. tenant_id=current_user.current_tenant_id,
  132. name=args["name"],
  133. description=args["description"],
  134. indexing_technique=args["indexing_technique"],
  135. account=current_user,
  136. permission=DatasetPermissionEnum.ONLY_ME,
  137. provider=args["provider"],
  138. external_knowledge_api_id=args["external_knowledge_api_id"],
  139. external_knowledge_id=args["external_knowledge_id"],
  140. )
  141. except services.errors.dataset.DatasetNameDuplicateError:
  142. raise DatasetNameDuplicateError()
  143. return marshal(dataset, dataset_detail_fields), 201
  144. class DatasetApi(Resource):
  145. @setup_required
  146. @login_required
  147. @account_initialization_required
  148. def get(self, dataset_id):
  149. dataset_id_str = str(dataset_id)
  150. dataset = DatasetService.get_dataset(dataset_id_str)
  151. if dataset is None:
  152. raise NotFound("Dataset not found.")
  153. try:
  154. DatasetService.check_dataset_permission(dataset, current_user)
  155. except services.errors.account.NoPermissionError as e:
  156. raise Forbidden(str(e))
  157. data = marshal(dataset, dataset_detail_fields)
  158. if data.get("permission") == "partial_members":
  159. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  160. data.update({"partial_member_list": part_users_list})
  161. # check embedding setting
  162. provider_manager = ProviderManager()
  163. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  164. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  165. model_names = []
  166. for embedding_model in embedding_models:
  167. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  168. if data["indexing_technique"] == "high_quality":
  169. item_model = f"{data['embedding_model']}:{data['embedding_model_provider']}"
  170. if item_model in model_names:
  171. data["embedding_available"] = True
  172. else:
  173. data["embedding_available"] = False
  174. else:
  175. data["embedding_available"] = True
  176. if data.get("permission") == "partial_members":
  177. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  178. data.update({"partial_member_list": part_users_list})
  179. return data, 200
  180. @setup_required
  181. @login_required
  182. @account_initialization_required
  183. def patch(self, dataset_id):
  184. dataset_id_str = str(dataset_id)
  185. dataset = DatasetService.get_dataset(dataset_id_str)
  186. if dataset is None:
  187. raise NotFound("Dataset not found.")
  188. parser = reqparse.RequestParser()
  189. parser.add_argument(
  190. "name",
  191. nullable=False,
  192. help="type is required. Name must be between 1 to 40 characters.",
  193. type=_validate_name,
  194. )
  195. parser.add_argument("description", location="json", store_missing=False, type=_validate_description_length)
  196. parser.add_argument(
  197. "indexing_technique",
  198. type=str,
  199. location="json",
  200. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  201. nullable=True,
  202. help="Invalid indexing technique.",
  203. )
  204. parser.add_argument(
  205. "permission",
  206. type=str,
  207. location="json",
  208. choices=(DatasetPermissionEnum.ONLY_ME, DatasetPermissionEnum.ALL_TEAM, DatasetPermissionEnum.PARTIAL_TEAM),
  209. help="Invalid permission.",
  210. )
  211. parser.add_argument("embedding_model", type=str, location="json", help="Invalid embedding model.")
  212. parser.add_argument(
  213. "embedding_model_provider", type=str, location="json", help="Invalid embedding model provider."
  214. )
  215. parser.add_argument("retrieval_model", type=dict, location="json", help="Invalid retrieval model.")
  216. parser.add_argument("partial_member_list", type=list, location="json", help="Invalid parent user list.")
  217. parser.add_argument(
  218. "external_retrieval_model",
  219. type=dict,
  220. required=False,
  221. nullable=True,
  222. location="json",
  223. help="Invalid external retrieval model.",
  224. )
  225. parser.add_argument(
  226. "external_knowledge_id",
  227. type=str,
  228. required=False,
  229. nullable=True,
  230. location="json",
  231. help="Invalid external knowledge id.",
  232. )
  233. parser.add_argument(
  234. "external_knowledge_api_id",
  235. type=str,
  236. required=False,
  237. nullable=True,
  238. location="json",
  239. help="Invalid external knowledge api id.",
  240. )
  241. args = parser.parse_args()
  242. data = request.get_json()
  243. # check embedding model setting
  244. if data.get("indexing_technique") == "high_quality":
  245. DatasetService.check_embedding_model_setting(
  246. dataset.tenant_id, data.get("embedding_model_provider"), data.get("embedding_model")
  247. )
  248. # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
  249. DatasetPermissionService.check_permission(
  250. current_user, dataset, data.get("permission"), data.get("partial_member_list")
  251. )
  252. dataset = DatasetService.update_dataset(dataset_id_str, args, current_user)
  253. if dataset is None:
  254. raise NotFound("Dataset not found.")
  255. result_data = marshal(dataset, dataset_detail_fields)
  256. tenant_id = current_user.current_tenant_id
  257. if data.get("partial_member_list") and data.get("permission") == "partial_members":
  258. DatasetPermissionService.update_partial_member_list(
  259. tenant_id, dataset_id_str, data.get("partial_member_list")
  260. )
  261. # clear partial member list when permission is only_me or all_team_members
  262. elif (
  263. data.get("permission") == DatasetPermissionEnum.ONLY_ME
  264. or data.get("permission") == DatasetPermissionEnum.ALL_TEAM
  265. ):
  266. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  267. partial_member_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  268. result_data.update({"partial_member_list": partial_member_list})
  269. return result_data, 200
  270. @setup_required
  271. @login_required
  272. @account_initialization_required
  273. def delete(self, dataset_id):
  274. dataset_id_str = str(dataset_id)
  275. # The role of the current user in the ta table must be admin, owner, or editor
  276. if not current_user.is_editor or current_user.is_dataset_operator:
  277. raise Forbidden()
  278. try:
  279. if DatasetService.delete_dataset(dataset_id_str, current_user):
  280. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  281. return {"result": "success"}, 204
  282. else:
  283. raise NotFound("Dataset not found.")
  284. except services.errors.dataset.DatasetInUseError:
  285. raise DatasetInUseError()
  286. class DatasetUseCheckApi(Resource):
  287. @setup_required
  288. @login_required
  289. @account_initialization_required
  290. def get(self, dataset_id):
  291. dataset_id_str = str(dataset_id)
  292. dataset_is_using = DatasetService.dataset_use_check(dataset_id_str)
  293. return {"is_using": dataset_is_using}, 200
  294. class DatasetQueryApi(Resource):
  295. @setup_required
  296. @login_required
  297. @account_initialization_required
  298. def get(self, dataset_id):
  299. dataset_id_str = str(dataset_id)
  300. dataset = DatasetService.get_dataset(dataset_id_str)
  301. if dataset is None:
  302. raise NotFound("Dataset not found.")
  303. try:
  304. DatasetService.check_dataset_permission(dataset, current_user)
  305. except services.errors.account.NoPermissionError as e:
  306. raise Forbidden(str(e))
  307. page = request.args.get("page", default=1, type=int)
  308. limit = request.args.get("limit", default=20, type=int)
  309. dataset_queries, total = DatasetService.get_dataset_queries(dataset_id=dataset.id, page=page, per_page=limit)
  310. response = {
  311. "data": marshal(dataset_queries, dataset_query_detail_fields),
  312. "has_more": len(dataset_queries) == limit,
  313. "limit": limit,
  314. "total": total,
  315. "page": page,
  316. }
  317. return response, 200
  318. class DatasetIndexingEstimateApi(Resource):
  319. @setup_required
  320. @login_required
  321. @account_initialization_required
  322. def post(self):
  323. parser = reqparse.RequestParser()
  324. parser.add_argument("info_list", type=dict, required=True, nullable=True, location="json")
  325. parser.add_argument("process_rule", type=dict, required=True, nullable=True, location="json")
  326. parser.add_argument(
  327. "indexing_technique",
  328. type=str,
  329. required=True,
  330. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  331. nullable=True,
  332. location="json",
  333. )
  334. parser.add_argument("doc_form", type=str, default="text_model", required=False, nullable=False, location="json")
  335. parser.add_argument("dataset_id", type=str, required=False, nullable=False, location="json")
  336. parser.add_argument(
  337. "doc_language", type=str, default="English", required=False, nullable=False, location="json"
  338. )
  339. args = parser.parse_args()
  340. # validate args
  341. DocumentService.estimate_args_validate(args)
  342. extract_settings = []
  343. if args["info_list"]["data_source_type"] == "upload_file":
  344. file_ids = args["info_list"]["file_info_list"]["file_ids"]
  345. file_details = (
  346. db.session.query(UploadFile)
  347. .filter(UploadFile.tenant_id == current_user.current_tenant_id, UploadFile.id.in_(file_ids))
  348. .all()
  349. )
  350. if file_details is None:
  351. raise NotFound("File not found.")
  352. if file_details:
  353. for file_detail in file_details:
  354. extract_setting = ExtractSetting(
  355. datasource_type="upload_file", upload_file=file_detail, document_model=args["doc_form"]
  356. )
  357. extract_settings.append(extract_setting)
  358. elif args["info_list"]["data_source_type"] == "notion_import":
  359. notion_info_list = args["info_list"]["notion_info_list"]
  360. for notion_info in notion_info_list:
  361. workspace_id = notion_info["workspace_id"]
  362. for page in notion_info["pages"]:
  363. extract_setting = ExtractSetting(
  364. datasource_type="notion_import",
  365. notion_info={
  366. "notion_workspace_id": workspace_id,
  367. "notion_obj_id": page["page_id"],
  368. "notion_page_type": page["type"],
  369. "tenant_id": current_user.current_tenant_id,
  370. },
  371. document_model=args["doc_form"],
  372. )
  373. extract_settings.append(extract_setting)
  374. elif args["info_list"]["data_source_type"] == "website_crawl":
  375. website_info_list = args["info_list"]["website_info_list"]
  376. for url in website_info_list["urls"]:
  377. extract_setting = ExtractSetting(
  378. datasource_type="website_crawl",
  379. website_info={
  380. "provider": website_info_list["provider"],
  381. "job_id": website_info_list["job_id"],
  382. "url": url,
  383. "tenant_id": current_user.current_tenant_id,
  384. "mode": "crawl",
  385. "only_main_content": website_info_list["only_main_content"],
  386. },
  387. document_model=args["doc_form"],
  388. )
  389. extract_settings.append(extract_setting)
  390. else:
  391. raise ValueError("Data source type not support")
  392. indexing_runner = IndexingRunner()
  393. try:
  394. response = indexing_runner.indexing_estimate(
  395. current_user.current_tenant_id,
  396. extract_settings,
  397. args["process_rule"],
  398. args["doc_form"],
  399. args["doc_language"],
  400. args["dataset_id"],
  401. args["indexing_technique"],
  402. )
  403. except LLMBadRequestError:
  404. raise ProviderNotInitializeError(
  405. "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
  406. )
  407. except ProviderTokenNotInitError as ex:
  408. raise ProviderNotInitializeError(ex.description)
  409. except Exception as e:
  410. raise IndexingEstimateError(str(e))
  411. return response, 200
  412. class DatasetRelatedAppListApi(Resource):
  413. @setup_required
  414. @login_required
  415. @account_initialization_required
  416. @marshal_with(related_app_list)
  417. def get(self, dataset_id):
  418. dataset_id_str = str(dataset_id)
  419. dataset = DatasetService.get_dataset(dataset_id_str)
  420. if dataset is None:
  421. raise NotFound("Dataset not found.")
  422. try:
  423. DatasetService.check_dataset_permission(dataset, current_user)
  424. except services.errors.account.NoPermissionError as e:
  425. raise Forbidden(str(e))
  426. app_dataset_joins = DatasetService.get_related_apps(dataset.id)
  427. related_apps = []
  428. for app_dataset_join in app_dataset_joins:
  429. app_model = app_dataset_join.app
  430. if app_model:
  431. related_apps.append(app_model)
  432. return {"data": related_apps, "total": len(related_apps)}, 200
  433. class DatasetIndexingStatusApi(Resource):
  434. @setup_required
  435. @login_required
  436. @account_initialization_required
  437. def get(self, dataset_id):
  438. dataset_id = str(dataset_id)
  439. documents = (
  440. db.session.query(Document)
  441. .filter(Document.dataset_id == dataset_id, Document.tenant_id == current_user.current_tenant_id)
  442. .all()
  443. )
  444. documents_status = []
  445. for document in documents:
  446. completed_segments = DocumentSegment.query.filter(
  447. DocumentSegment.completed_at.isnot(None),
  448. DocumentSegment.document_id == str(document.id),
  449. DocumentSegment.status != "re_segment",
  450. ).count()
  451. total_segments = DocumentSegment.query.filter(
  452. DocumentSegment.document_id == str(document.id), DocumentSegment.status != "re_segment"
  453. ).count()
  454. document.completed_segments = completed_segments
  455. document.total_segments = total_segments
  456. documents_status.append(marshal(document, document_status_fields))
  457. data = {"data": documents_status}
  458. return data
  459. class DatasetApiKeyApi(Resource):
  460. max_keys = 10
  461. token_prefix = "dataset-"
  462. resource_type = "dataset"
  463. @setup_required
  464. @login_required
  465. @account_initialization_required
  466. @marshal_with(api_key_list)
  467. def get(self):
  468. keys = (
  469. db.session.query(ApiToken)
  470. .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  471. .all()
  472. )
  473. return {"items": keys}
  474. @setup_required
  475. @login_required
  476. @account_initialization_required
  477. @marshal_with(api_key_fields)
  478. def post(self):
  479. # The role of the current user in the ta table must be admin or owner
  480. if not current_user.is_admin_or_owner:
  481. raise Forbidden()
  482. current_key_count = (
  483. db.session.query(ApiToken)
  484. .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  485. .count()
  486. )
  487. if current_key_count >= self.max_keys:
  488. flask_restful.abort(
  489. 400,
  490. message=f"Cannot create more than {self.max_keys} API keys for this resource type.",
  491. code="max_keys_exceeded",
  492. )
  493. key = ApiToken.generate_api_key(self.token_prefix, 24)
  494. api_token = ApiToken()
  495. api_token.tenant_id = current_user.current_tenant_id
  496. api_token.token = key
  497. api_token.type = self.resource_type
  498. db.session.add(api_token)
  499. db.session.commit()
  500. return api_token, 200
  501. class DatasetApiDeleteApi(Resource):
  502. resource_type = "dataset"
  503. @setup_required
  504. @login_required
  505. @account_initialization_required
  506. def delete(self, api_key_id):
  507. api_key_id = str(api_key_id)
  508. # The role of the current user in the ta table must be admin or owner
  509. if not current_user.is_admin_or_owner:
  510. raise Forbidden()
  511. key = (
  512. db.session.query(ApiToken)
  513. .filter(
  514. ApiToken.tenant_id == current_user.current_tenant_id,
  515. ApiToken.type == self.resource_type,
  516. ApiToken.id == api_key_id,
  517. )
  518. .first()
  519. )
  520. if key is None:
  521. flask_restful.abort(404, message="API key not found")
  522. db.session.query(ApiToken).filter(ApiToken.id == api_key_id).delete()
  523. db.session.commit()
  524. return {"result": "success"}, 204
  525. class DatasetApiBaseUrlApi(Resource):
  526. @setup_required
  527. @login_required
  528. @account_initialization_required
  529. def get(self):
  530. return {"api_base_url": (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1"}
  531. class DatasetRetrievalSettingApi(Resource):
  532. @setup_required
  533. @login_required
  534. @account_initialization_required
  535. def get(self):
  536. vector_type = dify_config.VECTOR_STORE
  537. match vector_type:
  538. case (
  539. VectorType.MILVUS
  540. | VectorType.RELYT
  541. | VectorType.TIDB_VECTOR
  542. | VectorType.CHROMA
  543. | VectorType.TENCENT
  544. | VectorType.PGVECTO_RS
  545. | VectorType.BAIDU
  546. | VectorType.VIKINGDB
  547. | VectorType.UPSTASH
  548. | VectorType.OCEANBASE
  549. ):
  550. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  551. case (
  552. VectorType.QDRANT
  553. | VectorType.WEAVIATE
  554. | VectorType.OPENSEARCH
  555. | VectorType.ANALYTICDB
  556. | VectorType.MYSCALE
  557. | VectorType.ORACLE
  558. | VectorType.ELASTICSEARCH
  559. | VectorType.PGVECTOR
  560. | VectorType.TIDB_ON_QDRANT
  561. | VectorType.COUCHBASE
  562. ):
  563. return {
  564. "retrieval_method": [
  565. RetrievalMethod.SEMANTIC_SEARCH.value,
  566. RetrievalMethod.FULL_TEXT_SEARCH.value,
  567. RetrievalMethod.HYBRID_SEARCH.value,
  568. ]
  569. }
  570. case _:
  571. raise ValueError(f"Unsupported vector db type {vector_type}.")
  572. class DatasetRetrievalSettingMockApi(Resource):
  573. @setup_required
  574. @login_required
  575. @account_initialization_required
  576. def get(self, vector_type):
  577. match vector_type:
  578. case (
  579. VectorType.MILVUS
  580. | VectorType.RELYT
  581. | VectorType.TIDB_VECTOR
  582. | VectorType.CHROMA
  583. | VectorType.TENCENT
  584. | VectorType.PGVECTO_RS
  585. | VectorType.BAIDU
  586. | VectorType.VIKINGDB
  587. | VectorType.UPSTASH
  588. | VectorType.OCEANBASE
  589. ):
  590. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  591. case (
  592. VectorType.QDRANT
  593. | VectorType.WEAVIATE
  594. | VectorType.OPENSEARCH
  595. | VectorType.ANALYTICDB
  596. | VectorType.MYSCALE
  597. | VectorType.ORACLE
  598. | VectorType.ELASTICSEARCH
  599. | VectorType.COUCHBASE
  600. | VectorType.PGVECTOR
  601. ):
  602. return {
  603. "retrieval_method": [
  604. RetrievalMethod.SEMANTIC_SEARCH.value,
  605. RetrievalMethod.FULL_TEXT_SEARCH.value,
  606. RetrievalMethod.HYBRID_SEARCH.value,
  607. ]
  608. }
  609. case _:
  610. raise ValueError(f"Unsupported vector db type {vector_type}.")
  611. class DatasetErrorDocs(Resource):
  612. @setup_required
  613. @login_required
  614. @account_initialization_required
  615. def get(self, dataset_id):
  616. dataset_id_str = str(dataset_id)
  617. dataset = DatasetService.get_dataset(dataset_id_str)
  618. if dataset is None:
  619. raise NotFound("Dataset not found.")
  620. results = DocumentService.get_error_documents_by_dataset_id(dataset_id_str)
  621. return {"data": [marshal(item, document_status_fields) for item in results], "total": len(results)}, 200
  622. class DatasetPermissionUserListApi(Resource):
  623. @setup_required
  624. @login_required
  625. @account_initialization_required
  626. def get(self, dataset_id):
  627. dataset_id_str = str(dataset_id)
  628. dataset = DatasetService.get_dataset(dataset_id_str)
  629. if dataset is None:
  630. raise NotFound("Dataset not found.")
  631. try:
  632. DatasetService.check_dataset_permission(dataset, current_user)
  633. except services.errors.account.NoPermissionError as e:
  634. raise Forbidden(str(e))
  635. partial_members_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  636. return {
  637. "data": partial_members_list,
  638. }, 200
  639. api.add_resource(DatasetListApi, "/datasets")
  640. api.add_resource(DatasetApi, "/datasets/<uuid:dataset_id>")
  641. api.add_resource(DatasetUseCheckApi, "/datasets/<uuid:dataset_id>/use-check")
  642. api.add_resource(DatasetQueryApi, "/datasets/<uuid:dataset_id>/queries")
  643. api.add_resource(DatasetErrorDocs, "/datasets/<uuid:dataset_id>/error-docs")
  644. api.add_resource(DatasetIndexingEstimateApi, "/datasets/indexing-estimate")
  645. api.add_resource(DatasetRelatedAppListApi, "/datasets/<uuid:dataset_id>/related-apps")
  646. api.add_resource(DatasetIndexingStatusApi, "/datasets/<uuid:dataset_id>/indexing-status")
  647. api.add_resource(DatasetApiKeyApi, "/datasets/api-keys")
  648. api.add_resource(DatasetApiDeleteApi, "/datasets/api-keys/<uuid:api_key_id>")
  649. api.add_resource(DatasetApiBaseUrlApi, "/datasets/api-base-info")
  650. api.add_resource(DatasetRetrievalSettingApi, "/datasets/retrieval-setting")
  651. api.add_resource(DatasetRetrievalSettingMockApi, "/datasets/retrieval-setting/<string:vector_type>")
  652. api.add_resource(DatasetPermissionUserListApi, "/datasets/<uuid:dataset_id>/permission-part-users")