datasets.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. import flask_restful # type: ignore
  2. from flask import request
  3. from flask_login import current_user # type: ignore # type: ignore
  4. from flask_restful import Resource, marshal, marshal_with, reqparse # type: ignore
  5. from werkzeug.exceptions import Forbidden, NotFound
  6. import services
  7. from configs import dify_config
  8. from controllers.console import api
  9. from controllers.console.apikey import api_key_fields, api_key_list
  10. from controllers.console.app.error import ProviderNotInitializeError
  11. from controllers.console.datasets.error import DatasetInUseError, DatasetNameDuplicateError, IndexingEstimateError
  12. from controllers.console.wraps import (
  13. account_initialization_required,
  14. cloud_edition_billing_rate_limit_check,
  15. enterprise_license_required,
  16. setup_required,
  17. )
  18. from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
  19. from core.indexing_runner import IndexingRunner
  20. from core.model_runtime.entities.model_entities import ModelType
  21. from core.plugin.entities.plugin import ModelProviderID
  22. from core.provider_manager import ProviderManager
  23. from core.rag.datasource.vdb.vector_type import VectorType
  24. from core.rag.extractor.entity.extract_setting import ExtractSetting
  25. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  26. from extensions.ext_database import db
  27. from fields.app_fields import related_app_list
  28. from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
  29. from fields.document_fields import document_status_fields
  30. from libs.login import login_required
  31. from models import ApiToken, Dataset, Document, DocumentSegment, UploadFile
  32. from models.dataset import DatasetPermissionEnum
  33. from services.dataset_service import DatasetPermissionService, DatasetService, DocumentService
  34. from services.dept_service import DeptService
  35. def _validate_name(name):
  36. if not name or len(name) < 1 or len(name) > 40:
  37. raise ValueError("Name must be between 1 to 40 characters.")
  38. return name
  39. def _validate_description_length(description):
  40. if len(description) > 400:
  41. raise ValueError("Description cannot exceed 400 characters.")
  42. return description
  43. class DatasetListApi(Resource):
  44. @setup_required
  45. @login_required
  46. @account_initialization_required
  47. @enterprise_license_required
  48. def get(self):
  49. page = request.args.get("page", default=1, type=int)
  50. limit = request.args.get("limit", default=20, type=int)
  51. ids = request.args.getlist("ids")
  52. # provider = request.args.get("provider", default="vendor")
  53. search = request.args.get("keyword", default=None, type=str)
  54. tag_ids = request.args.getlist("tag_ids")
  55. auth_type = request.args.get("authType", default=None, type=int)
  56. creator_dept = request.args.get("creatorDept")
  57. creator = request.args.get("creator", default=None, type=str)
  58. category_ids = request.args.getlist("category_ids")
  59. include_all = request.args.get("include_all", default="false").lower() == "true"
  60. if ids:
  61. datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)
  62. else:
  63. datasets, total = DatasetService.get_datasets2(
  64. page,
  65. limit,
  66. current_user.current_tenant_id,
  67. current_user,
  68. search,
  69. tag_ids,
  70. category_ids,
  71. auth_type,
  72. creator_dept,
  73. creator,
  74. include_all,
  75. )
  76. # check embedding setting
  77. provider_manager = ProviderManager()
  78. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  79. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  80. model_names = []
  81. for embedding_model in embedding_models:
  82. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  83. data = marshal(datasets, dataset_detail_fields)
  84. for item in data:
  85. # 返回编辑授权
  86. item["has_edit_permission"] = DatasetService.has_edit_permission(current_user.id, item["id"])
  87. # convert embedding_model_provider to plugin standard format
  88. if item["indexing_technique"] == "high_quality" and item["embedding_model_provider"]:
  89. item["embedding_model_provider"] = str(ModelProviderID(item["embedding_model_provider"]))
  90. item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
  91. if item_model in model_names:
  92. item["embedding_available"] = True
  93. else:
  94. item["embedding_available"] = False
  95. else:
  96. item["embedding_available"] = True
  97. if item.get("permission") == "partial_members":
  98. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(item["id"])
  99. item.update({"partial_member_list": part_users_list})
  100. else:
  101. item.update({"partial_member_list": []})
  102. response = {"data": data, "has_more": len(datasets) == limit, "limit": limit, "total": total, "page": page}
  103. return response, 200
  104. @setup_required
  105. @login_required
  106. @account_initialization_required
  107. @cloud_edition_billing_rate_limit_check("knowledge")
  108. def post(self):
  109. parser = reqparse.RequestParser()
  110. parser.add_argument(
  111. "name",
  112. nullable=False,
  113. required=True,
  114. help="type is required. Name must be between 1 to 40 characters.",
  115. type=_validate_name,
  116. )
  117. parser.add_argument(
  118. "description",
  119. type=str,
  120. nullable=True,
  121. required=False,
  122. default="",
  123. )
  124. parser.add_argument(
  125. "indexing_technique",
  126. type=str,
  127. location="json",
  128. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  129. nullable=True,
  130. help="Invalid indexing technique.",
  131. )
  132. parser.add_argument(
  133. "external_knowledge_api_id",
  134. type=str,
  135. nullable=True,
  136. required=False,
  137. )
  138. parser.add_argument(
  139. "provider",
  140. type=str,
  141. nullable=True,
  142. choices=Dataset.PROVIDER_LIST,
  143. required=False,
  144. default="vendor",
  145. )
  146. parser.add_argument(
  147. "external_knowledge_id",
  148. type=str,
  149. nullable=True,
  150. required=False,
  151. )
  152. args = parser.parse_args()
  153. # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
  154. if not current_user.is_dataset_editor:
  155. raise Forbidden()
  156. try:
  157. dataset = DatasetService.create_empty_dataset(
  158. tenant_id=current_user.current_tenant_id,
  159. name=args["name"],
  160. description=args["description"],
  161. indexing_technique=args["indexing_technique"],
  162. account=current_user,
  163. permission=DatasetPermissionEnum.ONLY_ME,
  164. provider=args["provider"],
  165. external_knowledge_api_id=args["external_knowledge_api_id"],
  166. external_knowledge_id=args["external_knowledge_id"],
  167. )
  168. except services.errors.dataset.DatasetNameDuplicateError:
  169. raise DatasetNameDuplicateError()
  170. return marshal(dataset, dataset_detail_fields), 201
  171. class DatasetApi(Resource):
  172. @setup_required
  173. @login_required
  174. @account_initialization_required
  175. def get(self, dataset_id):
  176. dataset_id_str = str(dataset_id)
  177. dataset = DatasetService.get_dataset(dataset_id_str)
  178. if dataset is None:
  179. raise NotFound("Dataset not found.")
  180. try:
  181. DatasetService.check_dataset_permission(dataset, current_user)
  182. except services.errors.account.NoPermissionError as e:
  183. raise Forbidden(str(e))
  184. data = marshal(dataset, dataset_detail_fields)
  185. if dataset.indexing_technique == "high_quality":
  186. if dataset.embedding_model_provider:
  187. provider_id = ModelProviderID(dataset.embedding_model_provider)
  188. data["embedding_model_provider"] = str(provider_id)
  189. if data.get("permission") == "partial_members":
  190. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  191. data.update({"partial_member_list": part_users_list})
  192. # check embedding setting
  193. provider_manager = ProviderManager()
  194. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  195. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  196. model_names = []
  197. for embedding_model in embedding_models:
  198. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  199. if data["indexing_technique"] == "high_quality":
  200. item_model = f"{data['embedding_model']}:{data['embedding_model_provider']}"
  201. if item_model in model_names:
  202. data["embedding_available"] = True
  203. else:
  204. data["embedding_available"] = False
  205. else:
  206. data["embedding_available"] = True
  207. if data.get("permission") == "partial_members":
  208. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  209. data.update({"partial_member_list": part_users_list})
  210. data["has_edit_permission"] = DatasetService.has_edit_permission(current_user.id, dataset_id_str)
  211. return data, 200
  212. @setup_required
  213. @login_required
  214. @account_initialization_required
  215. @cloud_edition_billing_rate_limit_check("knowledge")
  216. def patch(self, dataset_id):
  217. dataset_id_str = str(dataset_id)
  218. dataset = DatasetService.get_dataset(dataset_id_str)
  219. if dataset is None:
  220. raise NotFound("Dataset not found.")
  221. parser = reqparse.RequestParser()
  222. parser.add_argument(
  223. "name",
  224. nullable=False,
  225. help="type is required. Name must be between 1 to 40 characters.",
  226. type=_validate_name,
  227. )
  228. parser.add_argument("description", location="json", store_missing=False, type=_validate_description_length)
  229. parser.add_argument(
  230. "indexing_technique",
  231. type=str,
  232. location="json",
  233. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  234. nullable=True,
  235. help="Invalid indexing technique.",
  236. )
  237. parser.add_argument(
  238. "permission",
  239. type=str,
  240. location="json",
  241. choices=(DatasetPermissionEnum.ONLY_ME, DatasetPermissionEnum.ALL_TEAM, DatasetPermissionEnum.PARTIAL_TEAM),
  242. help="Invalid permission.",
  243. )
  244. parser.add_argument("embedding_model", type=str, location="json", help="Invalid embedding model.")
  245. parser.add_argument(
  246. "embedding_model_provider", type=str, location="json", help="Invalid embedding model provider."
  247. )
  248. parser.add_argument("retrieval_model", type=dict, location="json", help="Invalid retrieval model.")
  249. parser.add_argument("partial_member_list", type=list, location="json", help="Invalid parent user list.")
  250. parser.add_argument(
  251. "external_retrieval_model",
  252. type=dict,
  253. required=False,
  254. nullable=True,
  255. location="json",
  256. help="Invalid external retrieval model.",
  257. )
  258. parser.add_argument(
  259. "external_knowledge_id",
  260. type=str,
  261. required=False,
  262. nullable=True,
  263. location="json",
  264. help="Invalid external knowledge id.",
  265. )
  266. parser.add_argument(
  267. "external_knowledge_api_id",
  268. type=str,
  269. required=False,
  270. nullable=True,
  271. location="json",
  272. help="Invalid external knowledge api id.",
  273. )
  274. args = parser.parse_args()
  275. data = request.get_json()
  276. # check embedding model setting
  277. if (
  278. data.get("indexing_technique") == "high_quality"
  279. and data.get("embedding_model_provider") is not None
  280. and data.get("embedding_model") is not None
  281. ):
  282. DatasetService.check_embedding_model_setting(
  283. dataset.tenant_id, data.get("embedding_model_provider"), data.get("embedding_model")
  284. )
  285. # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
  286. DatasetPermissionService.check_permission(
  287. current_user, dataset, data.get("permission"), data.get("partial_member_list")
  288. )
  289. dataset = DatasetService.update_dataset(dataset_id_str, args, current_user)
  290. if dataset is None:
  291. raise NotFound("Dataset not found.")
  292. result_data = marshal(dataset, dataset_detail_fields)
  293. tenant_id = current_user.current_tenant_id
  294. if data.get("partial_member_list") and data.get("permission") == "partial_members":
  295. DatasetPermissionService.update_partial_member_list(
  296. tenant_id, dataset_id_str, data.get("partial_member_list")
  297. )
  298. # clear partial member list when permission is only_me or all_team_members
  299. elif (
  300. data.get("permission") == DatasetPermissionEnum.ONLY_ME
  301. or data.get("permission") == DatasetPermissionEnum.ALL_TEAM
  302. ):
  303. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  304. partial_member_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  305. result_data.update({"partial_member_list": partial_member_list})
  306. return result_data, 200
  307. @setup_required
  308. @login_required
  309. @account_initialization_required
  310. @cloud_edition_billing_rate_limit_check("knowledge")
  311. def delete(self, dataset_id):
  312. dataset_id_str = str(dataset_id)
  313. # The role of the current user in the ta table must be admin, owner, or editor
  314. if not current_user.is_editor or current_user.is_dataset_operator:
  315. raise Forbidden()
  316. try:
  317. if DatasetService.delete_dataset(dataset_id_str, current_user):
  318. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  319. return {"result": "success"}, 204
  320. else:
  321. raise NotFound("Dataset not found.")
  322. except services.errors.dataset.DatasetInUseError:
  323. raise DatasetInUseError()
  324. class DatasetUseCheckApi(Resource):
  325. @setup_required
  326. @login_required
  327. @account_initialization_required
  328. def get(self, dataset_id):
  329. dataset_id_str = str(dataset_id)
  330. dataset_is_using = DatasetService.dataset_use_check(dataset_id_str)
  331. return {"is_using": dataset_is_using}, 200
  332. class DatasetQueryApi(Resource):
  333. @setup_required
  334. @login_required
  335. @account_initialization_required
  336. def get(self, dataset_id):
  337. dataset_id_str = str(dataset_id)
  338. dataset = DatasetService.get_dataset(dataset_id_str)
  339. if dataset is None:
  340. raise NotFound("Dataset not found.")
  341. try:
  342. DatasetService.check_dataset_permission(dataset, current_user)
  343. except services.errors.account.NoPermissionError as e:
  344. raise Forbidden(str(e))
  345. page = request.args.get("page", default=1, type=int)
  346. limit = request.args.get("limit", default=20, type=int)
  347. dataset_queries, total = DatasetService.get_dataset_queries(dataset_id=dataset.id, page=page, per_page=limit)
  348. response = {
  349. "data": marshal(dataset_queries, dataset_query_detail_fields),
  350. "has_more": len(dataset_queries) == limit,
  351. "limit": limit,
  352. "total": total,
  353. "page": page,
  354. }
  355. return response, 200
  356. class DatasetIndexingEstimateApi(Resource):
  357. @setup_required
  358. @login_required
  359. @account_initialization_required
  360. def post(self):
  361. parser = reqparse.RequestParser()
  362. parser.add_argument("info_list", type=dict, required=True, nullable=True, location="json")
  363. parser.add_argument("process_rule", type=dict, required=True, nullable=True, location="json")
  364. parser.add_argument(
  365. "indexing_technique",
  366. type=str,
  367. required=True,
  368. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  369. nullable=True,
  370. location="json",
  371. )
  372. parser.add_argument("doc_form", type=str, default="text_model", required=False, nullable=False, location="json")
  373. parser.add_argument("dataset_id", type=str, required=False, nullable=False, location="json")
  374. parser.add_argument(
  375. "doc_language", type=str, default="English", required=False, nullable=False, location="json"
  376. )
  377. args = parser.parse_args()
  378. # validate args
  379. DocumentService.estimate_args_validate(args)
  380. extract_settings = []
  381. if args["info_list"]["data_source_type"] == "upload_file":
  382. file_ids = args["info_list"]["file_info_list"]["file_ids"]
  383. file_details = (
  384. db.session.query(UploadFile)
  385. .filter(UploadFile.tenant_id == current_user.current_tenant_id, UploadFile.id.in_(file_ids))
  386. .all()
  387. )
  388. if file_details is None:
  389. raise NotFound("File not found.")
  390. if file_details:
  391. for file_detail in file_details:
  392. extract_setting = ExtractSetting(
  393. datasource_type="upload_file", upload_file=file_detail, document_model=args["doc_form"]
  394. )
  395. extract_settings.append(extract_setting)
  396. elif args["info_list"]["data_source_type"] == "notion_import":
  397. notion_info_list = args["info_list"]["notion_info_list"]
  398. for notion_info in notion_info_list:
  399. workspace_id = notion_info["workspace_id"]
  400. for page in notion_info["pages"]:
  401. extract_setting = ExtractSetting(
  402. datasource_type="notion_import",
  403. notion_info={
  404. "notion_workspace_id": workspace_id,
  405. "notion_obj_id": page["page_id"],
  406. "notion_page_type": page["type"],
  407. "tenant_id": current_user.current_tenant_id,
  408. },
  409. document_model=args["doc_form"],
  410. )
  411. extract_settings.append(extract_setting)
  412. elif args["info_list"]["data_source_type"] == "website_crawl":
  413. website_info_list = args["info_list"]["website_info_list"]
  414. for url in website_info_list["urls"]:
  415. extract_setting = ExtractSetting(
  416. datasource_type="website_crawl",
  417. website_info={
  418. "provider": website_info_list["provider"],
  419. "job_id": website_info_list["job_id"],
  420. "url": url,
  421. "tenant_id": current_user.current_tenant_id,
  422. "mode": "crawl",
  423. "only_main_content": website_info_list["only_main_content"],
  424. },
  425. document_model=args["doc_form"],
  426. )
  427. extract_settings.append(extract_setting)
  428. else:
  429. raise ValueError("Data source type not support")
  430. indexing_runner = IndexingRunner()
  431. try:
  432. response = indexing_runner.indexing_estimate(
  433. current_user.current_tenant_id,
  434. extract_settings,
  435. args["process_rule"],
  436. args["doc_form"],
  437. args["doc_language"],
  438. args["dataset_id"],
  439. args["indexing_technique"],
  440. )
  441. except LLMBadRequestError:
  442. raise ProviderNotInitializeError(
  443. "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
  444. )
  445. except ProviderTokenNotInitError as ex:
  446. raise ProviderNotInitializeError(ex.description)
  447. except Exception as e:
  448. raise IndexingEstimateError(str(e))
  449. return response.model_dump(), 200
  450. class DatasetRelatedAppListApi(Resource):
  451. @setup_required
  452. @login_required
  453. @account_initialization_required
  454. @marshal_with(related_app_list)
  455. def get(self, dataset_id):
  456. dataset_id_str = str(dataset_id)
  457. dataset = DatasetService.get_dataset(dataset_id_str)
  458. if dataset is None:
  459. raise NotFound("Dataset not found.")
  460. try:
  461. DatasetService.check_dataset_permission(dataset, current_user)
  462. except services.errors.account.NoPermissionError as e:
  463. raise Forbidden(str(e))
  464. app_dataset_joins = DatasetService.get_related_apps(dataset.id)
  465. related_apps = []
  466. for app_dataset_join in app_dataset_joins:
  467. app_model = app_dataset_join.app
  468. if app_model:
  469. related_apps.append(app_model)
  470. return {"data": related_apps, "total": len(related_apps)}, 200
  471. class DatasetIndexingStatusApi(Resource):
  472. @setup_required
  473. @login_required
  474. @account_initialization_required
  475. def get(self, dataset_id):
  476. dataset_id = str(dataset_id)
  477. documents = (
  478. db.session.query(Document)
  479. .filter(Document.dataset_id == dataset_id, Document.tenant_id == current_user.current_tenant_id)
  480. .all()
  481. )
  482. documents_status = []
  483. for document in documents:
  484. completed_segments = DocumentSegment.query.filter(
  485. DocumentSegment.completed_at.isnot(None),
  486. DocumentSegment.document_id == str(document.id),
  487. DocumentSegment.status != "re_segment",
  488. ).count()
  489. total_segments = DocumentSegment.query.filter(
  490. DocumentSegment.document_id == str(document.id), DocumentSegment.status != "re_segment"
  491. ).count()
  492. document.completed_segments = completed_segments
  493. document.total_segments = total_segments
  494. documents_status.append(marshal(document, document_status_fields))
  495. data = {"data": documents_status}
  496. return data
  497. class DatasetApiKeyApi(Resource):
  498. max_keys = 10
  499. token_prefix = "dataset-"
  500. resource_type = "dataset"
  501. @setup_required
  502. @login_required
  503. @account_initialization_required
  504. @marshal_with(api_key_list)
  505. def get(self):
  506. keys = (
  507. db.session.query(ApiToken)
  508. .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  509. .all()
  510. )
  511. return {"items": keys}
  512. @setup_required
  513. @login_required
  514. @account_initialization_required
  515. @marshal_with(api_key_fields)
  516. def post(self):
  517. # The role of the current user in the ta table must be admin or owner
  518. if not current_user.is_admin_or_owner:
  519. raise Forbidden()
  520. current_key_count = (
  521. db.session.query(ApiToken)
  522. .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  523. .count()
  524. )
  525. if current_key_count >= self.max_keys:
  526. flask_restful.abort(
  527. 400,
  528. message=f"Cannot create more than {self.max_keys} API keys for this resource type.",
  529. code="max_keys_exceeded",
  530. )
  531. key = ApiToken.generate_api_key(self.token_prefix, 24)
  532. api_token = ApiToken()
  533. api_token.tenant_id = current_user.current_tenant_id
  534. api_token.token = key
  535. api_token.type = self.resource_type
  536. db.session.add(api_token)
  537. db.session.commit()
  538. return api_token, 200
  539. class DatasetApiDeleteApi(Resource):
  540. resource_type = "dataset"
  541. @setup_required
  542. @login_required
  543. @account_initialization_required
  544. def delete(self, api_key_id):
  545. api_key_id = str(api_key_id)
  546. # The role of the current user in the ta table must be admin or owner
  547. if not current_user.is_admin_or_owner:
  548. raise Forbidden()
  549. key = (
  550. db.session.query(ApiToken)
  551. .filter(
  552. ApiToken.tenant_id == current_user.current_tenant_id,
  553. ApiToken.type == self.resource_type,
  554. ApiToken.id == api_key_id,
  555. )
  556. .first()
  557. )
  558. if key is None:
  559. flask_restful.abort(404, message="API key not found")
  560. db.session.query(ApiToken).filter(ApiToken.id == api_key_id).delete()
  561. db.session.commit()
  562. return {"result": "success"}, 204
  563. class DatasetApiBaseUrlApi(Resource):
  564. @setup_required
  565. @login_required
  566. @account_initialization_required
  567. def get(self):
  568. return {"api_base_url": (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1"}
  569. class DatasetRetrievalSettingApi(Resource):
  570. @setup_required
  571. @login_required
  572. @account_initialization_required
  573. def get(self):
  574. vector_type = dify_config.VECTOR_STORE
  575. match vector_type:
  576. case (
  577. VectorType.RELYT
  578. | VectorType.TIDB_VECTOR
  579. | VectorType.CHROMA
  580. | VectorType.TENCENT
  581. | VectorType.PGVECTO_RS
  582. | VectorType.BAIDU
  583. | VectorType.VIKINGDB
  584. | VectorType.UPSTASH
  585. ):
  586. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  587. case (
  588. VectorType.QDRANT
  589. | VectorType.WEAVIATE
  590. | VectorType.OPENSEARCH
  591. | VectorType.ANALYTICDB
  592. | VectorType.MYSCALE
  593. | VectorType.ORACLE
  594. | VectorType.ELASTICSEARCH
  595. | VectorType.ELASTICSEARCH_JA
  596. | VectorType.PGVECTOR
  597. | VectorType.TIDB_ON_QDRANT
  598. | VectorType.LINDORM
  599. | VectorType.COUCHBASE
  600. | VectorType.MILVUS
  601. | VectorType.OPENGAUSS
  602. | VectorType.OCEANBASE
  603. ):
  604. return {
  605. "retrieval_method": [
  606. RetrievalMethod.SEMANTIC_SEARCH.value,
  607. RetrievalMethod.FULL_TEXT_SEARCH.value,
  608. RetrievalMethod.HYBRID_SEARCH.value,
  609. ]
  610. }
  611. case _:
  612. raise ValueError(f"Unsupported vector db type {vector_type}.")
  613. class DatasetRetrievalSettingMockApi(Resource):
  614. @setup_required
  615. @login_required
  616. @account_initialization_required
  617. def get(self, vector_type):
  618. match vector_type:
  619. case (
  620. VectorType.MILVUS
  621. | VectorType.RELYT
  622. | VectorType.TIDB_VECTOR
  623. | VectorType.CHROMA
  624. | VectorType.TENCENT
  625. | VectorType.PGVECTO_RS
  626. | VectorType.BAIDU
  627. | VectorType.VIKINGDB
  628. | VectorType.UPSTASH
  629. ):
  630. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  631. case (
  632. VectorType.QDRANT
  633. | VectorType.WEAVIATE
  634. | VectorType.OPENSEARCH
  635. | VectorType.ANALYTICDB
  636. | VectorType.MYSCALE
  637. | VectorType.ORACLE
  638. | VectorType.ELASTICSEARCH
  639. | VectorType.ELASTICSEARCH_JA
  640. | VectorType.COUCHBASE
  641. | VectorType.PGVECTOR
  642. | VectorType.LINDORM
  643. | VectorType.OPENGAUSS
  644. | VectorType.OCEANBASE
  645. ):
  646. return {
  647. "retrieval_method": [
  648. RetrievalMethod.SEMANTIC_SEARCH.value,
  649. RetrievalMethod.FULL_TEXT_SEARCH.value,
  650. RetrievalMethod.HYBRID_SEARCH.value,
  651. ]
  652. }
  653. case _:
  654. raise ValueError(f"Unsupported vector db type {vector_type}.")
  655. class DatasetErrorDocs(Resource):
  656. @setup_required
  657. @login_required
  658. @account_initialization_required
  659. def get(self, dataset_id):
  660. dataset_id_str = str(dataset_id)
  661. dataset = DatasetService.get_dataset(dataset_id_str)
  662. if dataset is None:
  663. raise NotFound("Dataset not found.")
  664. results = DocumentService.get_error_documents_by_dataset_id(dataset_id_str)
  665. return {"data": [marshal(item, document_status_fields) for item in results], "total": len(results)}, 200
  666. class DatasetPermissionUserListApi(Resource):
  667. @setup_required
  668. @login_required
  669. @account_initialization_required
  670. def get(self, dataset_id):
  671. dataset_id_str = str(dataset_id)
  672. dataset = DatasetService.get_dataset(dataset_id_str)
  673. if dataset is None:
  674. raise NotFound("Dataset not found.")
  675. try:
  676. DatasetService.check_dataset_permission(dataset, current_user)
  677. except services.errors.account.NoPermissionError as e:
  678. raise Forbidden(str(e))
  679. partial_members_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  680. return {
  681. "data": partial_members_list,
  682. }, 200
  683. class DatasetAutoDisableLogApi(Resource):
  684. @setup_required
  685. @login_required
  686. @account_initialization_required
  687. def get(self, dataset_id):
  688. dataset_id_str = str(dataset_id)
  689. dataset = DatasetService.get_dataset(dataset_id_str)
  690. if dataset is None:
  691. raise NotFound("Dataset not found.")
  692. return DatasetService.get_dataset_auto_disable_logs(dataset_id_str), 200
  693. class DatasetCountApi(Resource):
  694. @setup_required
  695. @login_required
  696. @account_initialization_required
  697. def get(self):
  698. # tenant_id = current_user.current_tenant_id
  699. tenant_id = request.args.get("tenant_id", default=None, type=str)
  700. datasets_count = DatasetService.get_datasets_count(tenant_id, current_user)
  701. tags_count = DatasetService.get_tags_count(tenant_id)
  702. depts_count = DeptService.get_depts_count()
  703. response = {"datasets_count": datasets_count, "tags_count": tags_count, "depts_count": depts_count}
  704. return {"data": response}, 200
  705. class DatasetUpdateStatsApi(Resource):
  706. @setup_required
  707. @login_required
  708. @account_initialization_required
  709. def get(self):
  710. """Get dataset update statistics."""
  711. tenant_id = request.args.get("tenant_id", default=None, type=str)
  712. stats = DatasetService.get_dataset_update_stats(tenant_id)
  713. # 转换为前端需要的格式
  714. response = {
  715. "data": [
  716. {"period": "半年以上", "count": stats["over_180_days"]},
  717. {"period": "半年以内", "count": stats["within_180_days"]},
  718. {"period": "90天内", "count": stats["within_90_days"]},
  719. {"period": "30天内", "count": stats["within_30_days"]},
  720. {"period": "7天内", "count": stats["within_7_days"]},
  721. {"period": "3天内", "count": stats["within_3_days"]},
  722. ]
  723. }
  724. return response, 200
  725. class DatasetTypeStatsApi(Resource):
  726. @setup_required
  727. @login_required
  728. @account_initialization_required
  729. def get(self):
  730. tenant_id = current_user.current_tenant_id
  731. response = DatasetService.get_dataset_type_stats(tenant_id)
  732. return {"data": response}, 200
  733. api.add_resource(DatasetListApi, "/datasets")
  734. api.add_resource(DatasetApi, "/datasets/<uuid:dataset_id>")
  735. api.add_resource(DatasetUseCheckApi, "/datasets/<uuid:dataset_id>/use-check")
  736. api.add_resource(DatasetQueryApi, "/datasets/<uuid:dataset_id>/queries")
  737. api.add_resource(DatasetErrorDocs, "/datasets/<uuid:dataset_id>/error-docs")
  738. api.add_resource(DatasetIndexingEstimateApi, "/datasets/indexing-estimate")
  739. api.add_resource(DatasetRelatedAppListApi, "/datasets/<uuid:dataset_id>/related-apps")
  740. api.add_resource(DatasetIndexingStatusApi, "/datasets/<uuid:dataset_id>/indexing-status")
  741. api.add_resource(DatasetApiKeyApi, "/datasets/api-keys")
  742. api.add_resource(DatasetApiDeleteApi, "/datasets/api-keys/<uuid:api_key_id>")
  743. api.add_resource(DatasetApiBaseUrlApi, "/datasets/api-base-info")
  744. api.add_resource(DatasetRetrievalSettingApi, "/datasets/retrieval-setting")
  745. api.add_resource(DatasetRetrievalSettingMockApi, "/datasets/retrieval-setting/<string:vector_type>")
  746. api.add_resource(DatasetPermissionUserListApi, "/datasets/<uuid:dataset_id>/permission-part-users")
  747. api.add_resource(DatasetAutoDisableLogApi, "/datasets/<uuid:dataset_id>/auto-disable-logs")
  748. api.add_resource(DatasetCountApi, "/datasets/count")
  749. api.add_resource(DatasetUpdateStatsApi, "/datasets/update-stats")
  750. api.add_resource(DatasetTypeStatsApi, "/datasets/type-stats")