datasets_document.py 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. import logging
  2. from argparse import ArgumentTypeError
  3. from datetime import datetime, timezone
  4. from flask import request
  5. from flask_login import current_user
  6. from flask_restful import Resource, fields, marshal, marshal_with, reqparse
  7. from sqlalchemy import asc, desc
  8. from transformers.hf_argparser import string_to_bool
  9. from werkzeug.exceptions import Forbidden, NotFound
  10. import services
  11. from controllers.console import api
  12. from controllers.console.app.error import (
  13. ProviderModelCurrentlyNotSupportError,
  14. ProviderNotInitializeError,
  15. ProviderQuotaExceededError,
  16. )
  17. from controllers.console.datasets.error import (
  18. ArchivedDocumentImmutableError,
  19. DocumentAlreadyFinishedError,
  20. DocumentIndexingError,
  21. IndexingEstimateError,
  22. InvalidActionError,
  23. InvalidMetadataError,
  24. )
  25. from controllers.console.setup import setup_required
  26. from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
  27. from core.errors.error import (
  28. LLMBadRequestError,
  29. ModelCurrentlyNotSupportError,
  30. ProviderTokenNotInitError,
  31. QuotaExceededError,
  32. )
  33. from core.indexing_runner import IndexingRunner
  34. from core.model_manager import ModelManager
  35. from core.model_runtime.entities.model_entities import ModelType
  36. from core.model_runtime.errors.invoke import InvokeAuthorizationError
  37. from core.rag.extractor.entity.extract_setting import ExtractSetting
  38. from extensions.ext_database import db
  39. from extensions.ext_redis import redis_client
  40. from fields.document_fields import (
  41. dataset_and_document_fields,
  42. document_fields,
  43. document_status_fields,
  44. document_with_segments_fields,
  45. )
  46. from libs.login import login_required
  47. from models.dataset import Dataset, DatasetProcessRule, Document, DocumentSegment
  48. from models.model import UploadFile
  49. from services.dataset_service import DatasetService, DocumentService
  50. from tasks.add_document_to_index_task import add_document_to_index_task
  51. from tasks.remove_document_from_index_task import remove_document_from_index_task
  52. class DocumentResource(Resource):
  53. def get_document(self, dataset_id: str, document_id: str) -> Document:
  54. dataset = DatasetService.get_dataset(dataset_id)
  55. if not dataset:
  56. raise NotFound('Dataset not found.')
  57. try:
  58. DatasetService.check_dataset_permission(dataset, current_user)
  59. except services.errors.account.NoPermissionError as e:
  60. raise Forbidden(str(e))
  61. document = DocumentService.get_document(dataset_id, document_id)
  62. if not document:
  63. raise NotFound('Document not found.')
  64. if document.tenant_id != current_user.current_tenant_id:
  65. raise Forbidden('No permission.')
  66. return document
  67. def get_batch_documents(self, dataset_id: str, batch: str) -> list[Document]:
  68. dataset = DatasetService.get_dataset(dataset_id)
  69. if not dataset:
  70. raise NotFound('Dataset not found.')
  71. try:
  72. DatasetService.check_dataset_permission(dataset, current_user)
  73. except services.errors.account.NoPermissionError as e:
  74. raise Forbidden(str(e))
  75. documents = DocumentService.get_batch_documents(dataset_id, batch)
  76. if not documents:
  77. raise NotFound('Documents not found.')
  78. return documents
  79. class GetProcessRuleApi(Resource):
  80. @setup_required
  81. @login_required
  82. @account_initialization_required
  83. def get(self):
  84. req_data = request.args
  85. document_id = req_data.get('document_id')
  86. # get default rules
  87. mode = DocumentService.DEFAULT_RULES['mode']
  88. rules = DocumentService.DEFAULT_RULES['rules']
  89. if document_id:
  90. # get the latest process rule
  91. document = Document.query.get_or_404(document_id)
  92. dataset = DatasetService.get_dataset(document.dataset_id)
  93. if not dataset:
  94. raise NotFound('Dataset not found.')
  95. try:
  96. DatasetService.check_dataset_permission(dataset, current_user)
  97. except services.errors.account.NoPermissionError as e:
  98. raise Forbidden(str(e))
  99. # get the latest process rule
  100. dataset_process_rule = db.session.query(DatasetProcessRule). \
  101. filter(DatasetProcessRule.dataset_id == document.dataset_id). \
  102. order_by(DatasetProcessRule.created_at.desc()). \
  103. limit(1). \
  104. one_or_none()
  105. if dataset_process_rule:
  106. mode = dataset_process_rule.mode
  107. rules = dataset_process_rule.rules_dict
  108. return {
  109. 'mode': mode,
  110. 'rules': rules
  111. }
  112. class DatasetDocumentListApi(Resource):
  113. @setup_required
  114. @login_required
  115. @account_initialization_required
  116. def get(self, dataset_id):
  117. dataset_id = str(dataset_id)
  118. page = request.args.get('page', default=1, type=int)
  119. limit = request.args.get('limit', default=20, type=int)
  120. search = request.args.get('keyword', default=None, type=str)
  121. sort = request.args.get('sort', default='-created_at', type=str)
  122. # "yes", "true", "t", "y", "1" convert to True, while others convert to False.
  123. try:
  124. fetch = string_to_bool(request.args.get('fetch', default='false'))
  125. except (ArgumentTypeError, ValueError, Exception) as e:
  126. fetch = False
  127. dataset = DatasetService.get_dataset(dataset_id)
  128. if not dataset:
  129. raise NotFound('Dataset not found.')
  130. try:
  131. DatasetService.check_dataset_permission(dataset, current_user)
  132. except services.errors.account.NoPermissionError as e:
  133. raise Forbidden(str(e))
  134. query = Document.query.filter_by(
  135. dataset_id=str(dataset_id), tenant_id=current_user.current_tenant_id)
  136. if search:
  137. search = f'%{search}%'
  138. query = query.filter(Document.name.like(search))
  139. if sort.startswith('-'):
  140. sort_logic = desc
  141. sort = sort[1:]
  142. else:
  143. sort_logic = asc
  144. if sort == 'hit_count':
  145. sub_query = db.select(DocumentSegment.document_id,
  146. db.func.sum(DocumentSegment.hit_count).label("total_hit_count")) \
  147. .group_by(DocumentSegment.document_id) \
  148. .subquery()
  149. query = query.outerjoin(sub_query, sub_query.c.document_id == Document.id) \
  150. .order_by(
  151. sort_logic(db.func.coalesce(sub_query.c.total_hit_count, 0)),
  152. sort_logic(Document.position),
  153. )
  154. elif sort == 'created_at':
  155. query = query.order_by(
  156. sort_logic(Document.created_at),
  157. sort_logic(Document.position),
  158. )
  159. else:
  160. query = query.order_by(
  161. desc(Document.created_at),
  162. desc(Document.position),
  163. )
  164. paginated_documents = query.paginate(
  165. page=page, per_page=limit, max_per_page=100, error_out=False)
  166. documents = paginated_documents.items
  167. if fetch:
  168. for document in documents:
  169. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  170. DocumentSegment.document_id == str(document.id),
  171. DocumentSegment.status != 're_segment').count()
  172. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  173. DocumentSegment.status != 're_segment').count()
  174. document.completed_segments = completed_segments
  175. document.total_segments = total_segments
  176. data = marshal(documents, document_with_segments_fields)
  177. else:
  178. data = marshal(documents, document_fields)
  179. response = {
  180. 'data': data,
  181. 'has_more': len(documents) == limit,
  182. 'limit': limit,
  183. 'total': paginated_documents.total,
  184. 'page': page
  185. }
  186. return response
  187. documents_and_batch_fields = {
  188. 'documents': fields.List(fields.Nested(document_fields)),
  189. 'batch': fields.String
  190. }
  191. @setup_required
  192. @login_required
  193. @account_initialization_required
  194. @marshal_with(documents_and_batch_fields)
  195. @cloud_edition_billing_resource_check('vector_space')
  196. def post(self, dataset_id):
  197. dataset_id = str(dataset_id)
  198. dataset = DatasetService.get_dataset(dataset_id)
  199. if not dataset:
  200. raise NotFound('Dataset not found.')
  201. # The role of the current user in the ta table must be admin, owner, or editor
  202. if not current_user.is_dataset_editor:
  203. raise Forbidden()
  204. try:
  205. DatasetService.check_dataset_permission(dataset, current_user)
  206. except services.errors.account.NoPermissionError as e:
  207. raise Forbidden(str(e))
  208. parser = reqparse.RequestParser()
  209. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
  210. location='json')
  211. parser.add_argument('data_source', type=dict, required=False, location='json')
  212. parser.add_argument('process_rule', type=dict, required=False, location='json')
  213. parser.add_argument('duplicate', type=bool, default=True, nullable=False, location='json')
  214. parser.add_argument('original_document_id', type=str, required=False, location='json')
  215. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  216. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  217. location='json')
  218. parser.add_argument('retrieval_model', type=dict, required=False, nullable=False,
  219. location='json')
  220. args = parser.parse_args()
  221. if not dataset.indexing_technique and not args['indexing_technique']:
  222. raise ValueError('indexing_technique is required.')
  223. # validate args
  224. DocumentService.document_create_args_validate(args)
  225. try:
  226. documents, batch = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
  227. except ProviderTokenNotInitError as ex:
  228. raise ProviderNotInitializeError(ex.description)
  229. except QuotaExceededError:
  230. raise ProviderQuotaExceededError()
  231. except ModelCurrentlyNotSupportError:
  232. raise ProviderModelCurrentlyNotSupportError()
  233. return {
  234. 'documents': documents,
  235. 'batch': batch
  236. }
  237. class DatasetInitApi(Resource):
  238. @setup_required
  239. @login_required
  240. @account_initialization_required
  241. @marshal_with(dataset_and_document_fields)
  242. @cloud_edition_billing_resource_check('vector_space')
  243. def post(self):
  244. # The role of the current user in the ta table must be admin, owner, or editor
  245. if not current_user.is_editor:
  246. raise Forbidden()
  247. parser = reqparse.RequestParser()
  248. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, required=True,
  249. nullable=False, location='json')
  250. parser.add_argument('data_source', type=dict, required=True, nullable=True, location='json')
  251. parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
  252. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  253. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  254. location='json')
  255. parser.add_argument('retrieval_model', type=dict, required=False, nullable=False,
  256. location='json')
  257. args = parser.parse_args()
  258. # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
  259. if not current_user.is_dataset_editor:
  260. raise Forbidden()
  261. if args['indexing_technique'] == 'high_quality':
  262. try:
  263. model_manager = ModelManager()
  264. model_manager.get_default_model_instance(
  265. tenant_id=current_user.current_tenant_id,
  266. model_type=ModelType.TEXT_EMBEDDING
  267. )
  268. except InvokeAuthorizationError:
  269. raise ProviderNotInitializeError(
  270. "No Embedding Model available. Please configure a valid provider "
  271. "in the Settings -> Model Provider.")
  272. except ProviderTokenNotInitError as ex:
  273. raise ProviderNotInitializeError(ex.description)
  274. # validate args
  275. DocumentService.document_create_args_validate(args)
  276. try:
  277. dataset, documents, batch = DocumentService.save_document_without_dataset_id(
  278. tenant_id=current_user.current_tenant_id,
  279. document_data=args,
  280. account=current_user
  281. )
  282. except ProviderTokenNotInitError as ex:
  283. raise ProviderNotInitializeError(ex.description)
  284. except QuotaExceededError:
  285. raise ProviderQuotaExceededError()
  286. except ModelCurrentlyNotSupportError:
  287. raise ProviderModelCurrentlyNotSupportError()
  288. response = {
  289. 'dataset': dataset,
  290. 'documents': documents,
  291. 'batch': batch
  292. }
  293. return response
  294. class DocumentIndexingEstimateApi(DocumentResource):
  295. @setup_required
  296. @login_required
  297. @account_initialization_required
  298. def get(self, dataset_id, document_id):
  299. dataset_id = str(dataset_id)
  300. document_id = str(document_id)
  301. document = self.get_document(dataset_id, document_id)
  302. if document.indexing_status in ['completed', 'error']:
  303. raise DocumentAlreadyFinishedError()
  304. data_process_rule = document.dataset_process_rule
  305. data_process_rule_dict = data_process_rule.to_dict()
  306. response = {
  307. "tokens": 0,
  308. "total_price": 0,
  309. "currency": "USD",
  310. "total_segments": 0,
  311. "preview": []
  312. }
  313. if document.data_source_type == 'upload_file':
  314. data_source_info = document.data_source_info_dict
  315. if data_source_info and 'upload_file_id' in data_source_info:
  316. file_id = data_source_info['upload_file_id']
  317. file = db.session.query(UploadFile).filter(
  318. UploadFile.tenant_id == document.tenant_id,
  319. UploadFile.id == file_id
  320. ).first()
  321. # raise error if file not found
  322. if not file:
  323. raise NotFound('File not found.')
  324. extract_setting = ExtractSetting(
  325. datasource_type="upload_file",
  326. upload_file=file,
  327. document_model=document.doc_form
  328. )
  329. indexing_runner = IndexingRunner()
  330. try:
  331. response = indexing_runner.indexing_estimate(current_user.current_tenant_id, [extract_setting],
  332. data_process_rule_dict, document.doc_form,
  333. 'English', dataset_id)
  334. except LLMBadRequestError:
  335. raise ProviderNotInitializeError(
  336. "No Embedding Model available. Please configure a valid provider "
  337. "in the Settings -> Model Provider.")
  338. except ProviderTokenNotInitError as ex:
  339. raise ProviderNotInitializeError(ex.description)
  340. except Exception as e:
  341. raise IndexingEstimateError(str(e))
  342. return response
  343. class DocumentBatchIndexingEstimateApi(DocumentResource):
  344. @setup_required
  345. @login_required
  346. @account_initialization_required
  347. def get(self, dataset_id, batch):
  348. dataset_id = str(dataset_id)
  349. batch = str(batch)
  350. documents = self.get_batch_documents(dataset_id, batch)
  351. response = {
  352. "tokens": 0,
  353. "total_price": 0,
  354. "currency": "USD",
  355. "total_segments": 0,
  356. "preview": []
  357. }
  358. if not documents:
  359. return response
  360. data_process_rule = documents[0].dataset_process_rule
  361. data_process_rule_dict = data_process_rule.to_dict()
  362. info_list = []
  363. extract_settings = []
  364. for document in documents:
  365. if document.indexing_status in ['completed', 'error']:
  366. raise DocumentAlreadyFinishedError()
  367. data_source_info = document.data_source_info_dict
  368. # format document files info
  369. if data_source_info and 'upload_file_id' in data_source_info:
  370. file_id = data_source_info['upload_file_id']
  371. info_list.append(file_id)
  372. # format document notion info
  373. elif data_source_info and 'notion_workspace_id' in data_source_info and 'notion_page_id' in data_source_info:
  374. pages = []
  375. page = {
  376. 'page_id': data_source_info['notion_page_id'],
  377. 'type': data_source_info['type']
  378. }
  379. pages.append(page)
  380. notion_info = {
  381. 'workspace_id': data_source_info['notion_workspace_id'],
  382. 'pages': pages
  383. }
  384. info_list.append(notion_info)
  385. if document.data_source_type == 'upload_file':
  386. file_id = data_source_info['upload_file_id']
  387. file_detail = db.session.query(UploadFile).filter(
  388. UploadFile.tenant_id == current_user.current_tenant_id,
  389. UploadFile.id == file_id
  390. ).first()
  391. if file_detail is None:
  392. raise NotFound("File not found.")
  393. extract_setting = ExtractSetting(
  394. datasource_type="upload_file",
  395. upload_file=file_detail,
  396. document_model=document.doc_form
  397. )
  398. extract_settings.append(extract_setting)
  399. elif document.data_source_type == 'notion_import':
  400. extract_setting = ExtractSetting(
  401. datasource_type="notion_import",
  402. notion_info={
  403. "notion_workspace_id": data_source_info['notion_workspace_id'],
  404. "notion_obj_id": data_source_info['notion_page_id'],
  405. "notion_page_type": data_source_info['type'],
  406. "tenant_id": current_user.current_tenant_id
  407. },
  408. document_model=document.doc_form
  409. )
  410. extract_settings.append(extract_setting)
  411. elif document.data_source_type == 'website_crawl':
  412. extract_setting = ExtractSetting(
  413. datasource_type="website_crawl",
  414. website_info={
  415. "provider": data_source_info['provider'],
  416. "job_id": data_source_info['job_id'],
  417. "url": data_source_info['url'],
  418. "tenant_id": current_user.current_tenant_id,
  419. "mode": data_source_info['mode'],
  420. "only_main_content": data_source_info['only_main_content']
  421. },
  422. document_model=document.doc_form
  423. )
  424. extract_settings.append(extract_setting)
  425. else:
  426. raise ValueError('Data source type not support')
  427. indexing_runner = IndexingRunner()
  428. try:
  429. response = indexing_runner.indexing_estimate(current_user.current_tenant_id, extract_settings,
  430. data_process_rule_dict, document.doc_form,
  431. 'English', dataset_id)
  432. except LLMBadRequestError:
  433. raise ProviderNotInitializeError(
  434. "No Embedding Model available. Please configure a valid provider "
  435. "in the Settings -> Model Provider.")
  436. except ProviderTokenNotInitError as ex:
  437. raise ProviderNotInitializeError(ex.description)
  438. except Exception as e:
  439. raise IndexingEstimateError(str(e))
  440. return response
  441. class DocumentBatchIndexingStatusApi(DocumentResource):
  442. @setup_required
  443. @login_required
  444. @account_initialization_required
  445. def get(self, dataset_id, batch):
  446. dataset_id = str(dataset_id)
  447. batch = str(batch)
  448. documents = self.get_batch_documents(dataset_id, batch)
  449. documents_status = []
  450. for document in documents:
  451. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  452. DocumentSegment.document_id == str(document.id),
  453. DocumentSegment.status != 're_segment').count()
  454. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  455. DocumentSegment.status != 're_segment').count()
  456. document.completed_segments = completed_segments
  457. document.total_segments = total_segments
  458. if document.is_paused:
  459. document.indexing_status = 'paused'
  460. documents_status.append(marshal(document, document_status_fields))
  461. data = {
  462. 'data': documents_status
  463. }
  464. return data
  465. class DocumentIndexingStatusApi(DocumentResource):
  466. @setup_required
  467. @login_required
  468. @account_initialization_required
  469. def get(self, dataset_id, document_id):
  470. dataset_id = str(dataset_id)
  471. document_id = str(document_id)
  472. document = self.get_document(dataset_id, document_id)
  473. completed_segments = DocumentSegment.query \
  474. .filter(DocumentSegment.completed_at.isnot(None),
  475. DocumentSegment.document_id == str(document_id),
  476. DocumentSegment.status != 're_segment') \
  477. .count()
  478. total_segments = DocumentSegment.query \
  479. .filter(DocumentSegment.document_id == str(document_id),
  480. DocumentSegment.status != 're_segment') \
  481. .count()
  482. document.completed_segments = completed_segments
  483. document.total_segments = total_segments
  484. if document.is_paused:
  485. document.indexing_status = 'paused'
  486. return marshal(document, document_status_fields)
  487. class DocumentDetailApi(DocumentResource):
  488. METADATA_CHOICES = {'all', 'only', 'without'}
  489. @setup_required
  490. @login_required
  491. @account_initialization_required
  492. def get(self, dataset_id, document_id):
  493. dataset_id = str(dataset_id)
  494. document_id = str(document_id)
  495. document = self.get_document(dataset_id, document_id)
  496. metadata = request.args.get('metadata', 'all')
  497. if metadata not in self.METADATA_CHOICES:
  498. raise InvalidMetadataError(f'Invalid metadata value: {metadata}')
  499. if metadata == 'only':
  500. response = {
  501. 'id': document.id,
  502. 'doc_type': document.doc_type,
  503. 'doc_metadata': document.doc_metadata
  504. }
  505. elif metadata == 'without':
  506. process_rules = DatasetService.get_process_rules(dataset_id)
  507. data_source_info = document.data_source_detail_dict
  508. response = {
  509. 'id': document.id,
  510. 'position': document.position,
  511. 'data_source_type': document.data_source_type,
  512. 'data_source_info': data_source_info,
  513. 'dataset_process_rule_id': document.dataset_process_rule_id,
  514. 'dataset_process_rule': process_rules,
  515. 'name': document.name,
  516. 'created_from': document.created_from,
  517. 'created_by': document.created_by,
  518. 'created_at': document.created_at.timestamp(),
  519. 'tokens': document.tokens,
  520. 'indexing_status': document.indexing_status,
  521. 'completed_at': int(document.completed_at.timestamp()) if document.completed_at else None,
  522. 'updated_at': int(document.updated_at.timestamp()) if document.updated_at else None,
  523. 'indexing_latency': document.indexing_latency,
  524. 'error': document.error,
  525. 'enabled': document.enabled,
  526. 'disabled_at': int(document.disabled_at.timestamp()) if document.disabled_at else None,
  527. 'disabled_by': document.disabled_by,
  528. 'archived': document.archived,
  529. 'segment_count': document.segment_count,
  530. 'average_segment_length': document.average_segment_length,
  531. 'hit_count': document.hit_count,
  532. 'display_status': document.display_status,
  533. 'doc_form': document.doc_form
  534. }
  535. else:
  536. process_rules = DatasetService.get_process_rules(dataset_id)
  537. data_source_info = document.data_source_detail_dict
  538. response = {
  539. 'id': document.id,
  540. 'position': document.position,
  541. 'data_source_type': document.data_source_type,
  542. 'data_source_info': data_source_info,
  543. 'dataset_process_rule_id': document.dataset_process_rule_id,
  544. 'dataset_process_rule': process_rules,
  545. 'name': document.name,
  546. 'created_from': document.created_from,
  547. 'created_by': document.created_by,
  548. 'created_at': document.created_at.timestamp(),
  549. 'tokens': document.tokens,
  550. 'indexing_status': document.indexing_status,
  551. 'completed_at': int(document.completed_at.timestamp()) if document.completed_at else None,
  552. 'updated_at': int(document.updated_at.timestamp()) if document.updated_at else None,
  553. 'indexing_latency': document.indexing_latency,
  554. 'error': document.error,
  555. 'enabled': document.enabled,
  556. 'disabled_at': int(document.disabled_at.timestamp()) if document.disabled_at else None,
  557. 'disabled_by': document.disabled_by,
  558. 'archived': document.archived,
  559. 'doc_type': document.doc_type,
  560. 'doc_metadata': document.doc_metadata,
  561. 'segment_count': document.segment_count,
  562. 'average_segment_length': document.average_segment_length,
  563. 'hit_count': document.hit_count,
  564. 'display_status': document.display_status,
  565. 'doc_form': document.doc_form
  566. }
  567. return response, 200
  568. class DocumentProcessingApi(DocumentResource):
  569. @setup_required
  570. @login_required
  571. @account_initialization_required
  572. def patch(self, dataset_id, document_id, action):
  573. dataset_id = str(dataset_id)
  574. document_id = str(document_id)
  575. document = self.get_document(dataset_id, document_id)
  576. # The role of the current user in the ta table must be admin, owner, or editor
  577. if not current_user.is_editor:
  578. raise Forbidden()
  579. if action == "pause":
  580. if document.indexing_status != "indexing":
  581. raise InvalidActionError('Document not in indexing state.')
  582. document.paused_by = current_user.id
  583. document.paused_at = datetime.now(timezone.utc).replace(tzinfo=None)
  584. document.is_paused = True
  585. db.session.commit()
  586. elif action == "resume":
  587. if document.indexing_status not in ["paused", "error"]:
  588. raise InvalidActionError('Document not in paused or error state.')
  589. document.paused_by = None
  590. document.paused_at = None
  591. document.is_paused = False
  592. db.session.commit()
  593. else:
  594. raise InvalidActionError()
  595. return {'result': 'success'}, 200
  596. class DocumentDeleteApi(DocumentResource):
  597. @setup_required
  598. @login_required
  599. @account_initialization_required
  600. def delete(self, dataset_id, document_id):
  601. dataset_id = str(dataset_id)
  602. document_id = str(document_id)
  603. dataset = DatasetService.get_dataset(dataset_id)
  604. if dataset is None:
  605. raise NotFound("Dataset not found.")
  606. # check user's model setting
  607. DatasetService.check_dataset_model_setting(dataset)
  608. document = self.get_document(dataset_id, document_id)
  609. try:
  610. DocumentService.delete_document(document)
  611. except services.errors.document.DocumentIndexingError:
  612. raise DocumentIndexingError('Cannot delete document during indexing.')
  613. return {'result': 'success'}, 204
  614. class DocumentMetadataApi(DocumentResource):
  615. @setup_required
  616. @login_required
  617. @account_initialization_required
  618. def put(self, dataset_id, document_id):
  619. dataset_id = str(dataset_id)
  620. document_id = str(document_id)
  621. document = self.get_document(dataset_id, document_id)
  622. req_data = request.get_json()
  623. doc_type = req_data.get('doc_type')
  624. doc_metadata = req_data.get('doc_metadata')
  625. # The role of the current user in the ta table must be admin, owner, or editor
  626. if not current_user.is_editor:
  627. raise Forbidden()
  628. if doc_type is None or doc_metadata is None:
  629. raise ValueError('Both doc_type and doc_metadata must be provided.')
  630. if doc_type not in DocumentService.DOCUMENT_METADATA_SCHEMA:
  631. raise ValueError('Invalid doc_type.')
  632. if not isinstance(doc_metadata, dict):
  633. raise ValueError('doc_metadata must be a dictionary.')
  634. metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[doc_type]
  635. document.doc_metadata = {}
  636. if doc_type == 'others':
  637. document.doc_metadata = doc_metadata
  638. else:
  639. for key, value_type in metadata_schema.items():
  640. value = doc_metadata.get(key)
  641. if value is not None and isinstance(value, value_type):
  642. document.doc_metadata[key] = value
  643. document.doc_type = doc_type
  644. document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
  645. db.session.commit()
  646. return {'result': 'success', 'message': 'Document metadata updated.'}, 200
  647. class DocumentStatusApi(DocumentResource):
  648. @setup_required
  649. @login_required
  650. @account_initialization_required
  651. @cloud_edition_billing_resource_check('vector_space')
  652. def patch(self, dataset_id, document_id, action):
  653. dataset_id = str(dataset_id)
  654. document_id = str(document_id)
  655. dataset = DatasetService.get_dataset(dataset_id)
  656. if dataset is None:
  657. raise NotFound("Dataset not found.")
  658. # The role of the current user in the ta table must be admin, owner, or editor
  659. if not current_user.is_dataset_editor:
  660. raise Forbidden()
  661. # check user's model setting
  662. DatasetService.check_dataset_model_setting(dataset)
  663. # check user's permission
  664. DatasetService.check_dataset_permission(dataset, current_user)
  665. document = self.get_document(dataset_id, document_id)
  666. indexing_cache_key = 'document_{}_indexing'.format(document.id)
  667. cache_result = redis_client.get(indexing_cache_key)
  668. if cache_result is not None:
  669. raise InvalidActionError("Document is being indexed, please try again later")
  670. if action == "enable":
  671. if document.enabled:
  672. raise InvalidActionError('Document already enabled.')
  673. document.enabled = True
  674. document.disabled_at = None
  675. document.disabled_by = None
  676. document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
  677. db.session.commit()
  678. # Set cache to prevent indexing the same document multiple times
  679. redis_client.setex(indexing_cache_key, 600, 1)
  680. add_document_to_index_task.delay(document_id)
  681. return {'result': 'success'}, 200
  682. elif action == "disable":
  683. if not document.completed_at or document.indexing_status != 'completed':
  684. raise InvalidActionError('Document is not completed.')
  685. if not document.enabled:
  686. raise InvalidActionError('Document already disabled.')
  687. document.enabled = False
  688. document.disabled_at = datetime.now(timezone.utc).replace(tzinfo=None)
  689. document.disabled_by = current_user.id
  690. document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
  691. db.session.commit()
  692. # Set cache to prevent indexing the same document multiple times
  693. redis_client.setex(indexing_cache_key, 600, 1)
  694. remove_document_from_index_task.delay(document_id)
  695. return {'result': 'success'}, 200
  696. elif action == "archive":
  697. if document.archived:
  698. raise InvalidActionError('Document already archived.')
  699. document.archived = True
  700. document.archived_at = datetime.now(timezone.utc).replace(tzinfo=None)
  701. document.archived_by = current_user.id
  702. document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
  703. db.session.commit()
  704. if document.enabled:
  705. # Set cache to prevent indexing the same document multiple times
  706. redis_client.setex(indexing_cache_key, 600, 1)
  707. remove_document_from_index_task.delay(document_id)
  708. return {'result': 'success'}, 200
  709. elif action == "un_archive":
  710. if not document.archived:
  711. raise InvalidActionError('Document is not archived.')
  712. document.archived = False
  713. document.archived_at = None
  714. document.archived_by = None
  715. document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
  716. db.session.commit()
  717. # Set cache to prevent indexing the same document multiple times
  718. redis_client.setex(indexing_cache_key, 600, 1)
  719. add_document_to_index_task.delay(document_id)
  720. return {'result': 'success'}, 200
  721. else:
  722. raise InvalidActionError()
  723. class DocumentPauseApi(DocumentResource):
  724. @setup_required
  725. @login_required
  726. @account_initialization_required
  727. def patch(self, dataset_id, document_id):
  728. """pause document."""
  729. dataset_id = str(dataset_id)
  730. document_id = str(document_id)
  731. dataset = DatasetService.get_dataset(dataset_id)
  732. if not dataset:
  733. raise NotFound('Dataset not found.')
  734. document = DocumentService.get_document(dataset.id, document_id)
  735. # 404 if document not found
  736. if document is None:
  737. raise NotFound("Document Not Exists.")
  738. # 403 if document is archived
  739. if DocumentService.check_archived(document):
  740. raise ArchivedDocumentImmutableError()
  741. try:
  742. # pause document
  743. DocumentService.pause_document(document)
  744. except services.errors.document.DocumentIndexingError:
  745. raise DocumentIndexingError('Cannot pause completed document.')
  746. return {'result': 'success'}, 204
  747. class DocumentRecoverApi(DocumentResource):
  748. @setup_required
  749. @login_required
  750. @account_initialization_required
  751. def patch(self, dataset_id, document_id):
  752. """recover document."""
  753. dataset_id = str(dataset_id)
  754. document_id = str(document_id)
  755. dataset = DatasetService.get_dataset(dataset_id)
  756. if not dataset:
  757. raise NotFound('Dataset not found.')
  758. document = DocumentService.get_document(dataset.id, document_id)
  759. # 404 if document not found
  760. if document is None:
  761. raise NotFound("Document Not Exists.")
  762. # 403 if document is archived
  763. if DocumentService.check_archived(document):
  764. raise ArchivedDocumentImmutableError()
  765. try:
  766. # pause document
  767. DocumentService.recover_document(document)
  768. except services.errors.document.DocumentIndexingError:
  769. raise DocumentIndexingError('Document is not in paused status.')
  770. return {'result': 'success'}, 204
  771. class DocumentRetryApi(DocumentResource):
  772. @setup_required
  773. @login_required
  774. @account_initialization_required
  775. def post(self, dataset_id):
  776. """retry document."""
  777. parser = reqparse.RequestParser()
  778. parser.add_argument('document_ids', type=list, required=True, nullable=False,
  779. location='json')
  780. args = parser.parse_args()
  781. dataset_id = str(dataset_id)
  782. dataset = DatasetService.get_dataset(dataset_id)
  783. retry_documents = []
  784. if not dataset:
  785. raise NotFound('Dataset not found.')
  786. for document_id in args['document_ids']:
  787. try:
  788. document_id = str(document_id)
  789. document = DocumentService.get_document(dataset.id, document_id)
  790. # 404 if document not found
  791. if document is None:
  792. raise NotFound("Document Not Exists.")
  793. # 403 if document is archived
  794. if DocumentService.check_archived(document):
  795. raise ArchivedDocumentImmutableError()
  796. # 400 if document is completed
  797. if document.indexing_status == 'completed':
  798. raise DocumentAlreadyFinishedError()
  799. retry_documents.append(document)
  800. except Exception as e:
  801. logging.error(f"Document {document_id} retry failed: {str(e)}")
  802. continue
  803. # retry document
  804. DocumentService.retry_document(dataset_id, retry_documents)
  805. return {'result': 'success'}, 204
  806. class DocumentRenameApi(DocumentResource):
  807. @setup_required
  808. @login_required
  809. @account_initialization_required
  810. @marshal_with(document_fields)
  811. def post(self, dataset_id, document_id):
  812. # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
  813. if not current_user.is_dataset_editor:
  814. raise Forbidden()
  815. dataset = DatasetService.get_dataset(dataset_id)
  816. DatasetService.check_dataset_operator_permission(current_user, dataset)
  817. parser = reqparse.RequestParser()
  818. parser.add_argument('name', type=str, required=True, nullable=False, location='json')
  819. args = parser.parse_args()
  820. try:
  821. document = DocumentService.rename_document(dataset_id, document_id, args['name'])
  822. except services.errors.document.DocumentIndexingError:
  823. raise DocumentIndexingError('Cannot delete document during indexing.')
  824. return document
  825. class WebsiteDocumentSyncApi(DocumentResource):
  826. @setup_required
  827. @login_required
  828. @account_initialization_required
  829. def get(self, dataset_id, document_id):
  830. """sync website document."""
  831. dataset_id = str(dataset_id)
  832. dataset = DatasetService.get_dataset(dataset_id)
  833. if not dataset:
  834. raise NotFound('Dataset not found.')
  835. document_id = str(document_id)
  836. document = DocumentService.get_document(dataset.id, document_id)
  837. if not document:
  838. raise NotFound('Document not found.')
  839. if document.tenant_id != current_user.current_tenant_id:
  840. raise Forbidden('No permission.')
  841. if document.data_source_type != 'website_crawl':
  842. raise ValueError('Document is not a website document.')
  843. # 403 if document is archived
  844. if DocumentService.check_archived(document):
  845. raise ArchivedDocumentImmutableError()
  846. # sync document
  847. DocumentService.sync_website_document(dataset_id, document)
  848. return {'result': 'success'}, 200
  849. api.add_resource(GetProcessRuleApi, '/datasets/process-rule')
  850. api.add_resource(DatasetDocumentListApi,
  851. '/datasets/<uuid:dataset_id>/documents')
  852. api.add_resource(DatasetInitApi,
  853. '/datasets/init')
  854. api.add_resource(DocumentIndexingEstimateApi,
  855. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-estimate')
  856. api.add_resource(DocumentBatchIndexingEstimateApi,
  857. '/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-estimate')
  858. api.add_resource(DocumentBatchIndexingStatusApi,
  859. '/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-status')
  860. api.add_resource(DocumentIndexingStatusApi,
  861. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-status')
  862. api.add_resource(DocumentDetailApi,
  863. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  864. api.add_resource(DocumentProcessingApi,
  865. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/<string:action>')
  866. api.add_resource(DocumentDeleteApi,
  867. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  868. api.add_resource(DocumentMetadataApi,
  869. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/metadata')
  870. api.add_resource(DocumentStatusApi,
  871. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/status/<string:action>')
  872. api.add_resource(DocumentPauseApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/pause')
  873. api.add_resource(DocumentRecoverApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/resume')
  874. api.add_resource(DocumentRetryApi, '/datasets/<uuid:dataset_id>/retry')
  875. api.add_resource(DocumentRenameApi,
  876. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/rename')
  877. api.add_resource(WebsiteDocumentSyncApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/website-sync')