datasets_document.py 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. import logging
  2. from argparse import ArgumentTypeError
  3. from datetime import datetime, timezone
  4. from flask import request
  5. from flask_login import current_user
  6. from flask_restful import Resource, fields, marshal, marshal_with, reqparse
  7. from sqlalchemy import asc, desc
  8. from transformers.hf_argparser import string_to_bool
  9. from werkzeug.exceptions import Forbidden, NotFound
  10. import services
  11. from controllers.console import api
  12. from controllers.console.app.error import (
  13. ProviderModelCurrentlyNotSupportError,
  14. ProviderNotInitializeError,
  15. ProviderQuotaExceededError,
  16. )
  17. from controllers.console.datasets.error import (
  18. ArchivedDocumentImmutableError,
  19. DocumentAlreadyFinishedError,
  20. DocumentIndexingError,
  21. IndexingEstimateError,
  22. InvalidActionError,
  23. InvalidMetadataError,
  24. )
  25. from controllers.console.setup import setup_required
  26. from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
  27. from core.errors.error import (
  28. LLMBadRequestError,
  29. ModelCurrentlyNotSupportError,
  30. ProviderTokenNotInitError,
  31. QuotaExceededError,
  32. )
  33. from core.indexing_runner import IndexingRunner
  34. from core.model_manager import ModelManager
  35. from core.model_runtime.entities.model_entities import ModelType
  36. from core.model_runtime.errors.invoke import InvokeAuthorizationError
  37. from core.rag.extractor.entity.extract_setting import ExtractSetting
  38. from extensions.ext_database import db
  39. from extensions.ext_redis import redis_client
  40. from fields.document_fields import (
  41. dataset_and_document_fields,
  42. document_fields,
  43. document_status_fields,
  44. document_with_segments_fields,
  45. )
  46. from libs.login import login_required
  47. from models.dataset import Dataset, DatasetProcessRule, Document, DocumentSegment
  48. from models.model import UploadFile
  49. from services.dataset_service import DatasetService, DocumentService
  50. from tasks.add_document_to_index_task import add_document_to_index_task
  51. from tasks.remove_document_from_index_task import remove_document_from_index_task
  52. class DocumentResource(Resource):
  53. def get_document(self, dataset_id: str, document_id: str) -> Document:
  54. dataset = DatasetService.get_dataset(dataset_id)
  55. if not dataset:
  56. raise NotFound('Dataset not found.')
  57. try:
  58. DatasetService.check_dataset_permission(dataset, current_user)
  59. except services.errors.account.NoPermissionError as e:
  60. raise Forbidden(str(e))
  61. document = DocumentService.get_document(dataset_id, document_id)
  62. if not document:
  63. raise NotFound('Document not found.')
  64. if document.tenant_id != current_user.current_tenant_id:
  65. raise Forbidden('No permission.')
  66. return document
  67. def get_batch_documents(self, dataset_id: str, batch: str) -> list[Document]:
  68. dataset = DatasetService.get_dataset(dataset_id)
  69. if not dataset:
  70. raise NotFound('Dataset not found.')
  71. try:
  72. DatasetService.check_dataset_permission(dataset, current_user)
  73. except services.errors.account.NoPermissionError as e:
  74. raise Forbidden(str(e))
  75. documents = DocumentService.get_batch_documents(dataset_id, batch)
  76. if not documents:
  77. raise NotFound('Documents not found.')
  78. return documents
  79. class GetProcessRuleApi(Resource):
  80. @setup_required
  81. @login_required
  82. @account_initialization_required
  83. def get(self):
  84. req_data = request.args
  85. document_id = req_data.get('document_id')
  86. # get default rules
  87. mode = DocumentService.DEFAULT_RULES['mode']
  88. rules = DocumentService.DEFAULT_RULES['rules']
  89. if document_id:
  90. # get the latest process rule
  91. document = Document.query.get_or_404(document_id)
  92. dataset = DatasetService.get_dataset(document.dataset_id)
  93. if not dataset:
  94. raise NotFound('Dataset not found.')
  95. try:
  96. DatasetService.check_dataset_permission(dataset, current_user)
  97. except services.errors.account.NoPermissionError as e:
  98. raise Forbidden(str(e))
  99. # get the latest process rule
  100. dataset_process_rule = db.session.query(DatasetProcessRule). \
  101. filter(DatasetProcessRule.dataset_id == document.dataset_id). \
  102. order_by(DatasetProcessRule.created_at.desc()). \
  103. limit(1). \
  104. one_or_none()
  105. if dataset_process_rule:
  106. mode = dataset_process_rule.mode
  107. rules = dataset_process_rule.rules_dict
  108. return {
  109. 'mode': mode,
  110. 'rules': rules
  111. }
  112. class DatasetDocumentListApi(Resource):
  113. @setup_required
  114. @login_required
  115. @account_initialization_required
  116. def get(self, dataset_id):
  117. dataset_id = str(dataset_id)
  118. page = request.args.get('page', default=1, type=int)
  119. limit = request.args.get('limit', default=20, type=int)
  120. search = request.args.get('keyword', default=None, type=str)
  121. sort = request.args.get('sort', default='-created_at', type=str)
  122. # "yes", "true", "t", "y", "1" convert to True, while others convert to False.
  123. try:
  124. fetch = string_to_bool(request.args.get('fetch', default='false'))
  125. except (ArgumentTypeError, ValueError, Exception) as e:
  126. fetch = False
  127. dataset = DatasetService.get_dataset(dataset_id)
  128. if not dataset:
  129. raise NotFound('Dataset not found.')
  130. try:
  131. DatasetService.check_dataset_permission(dataset, current_user)
  132. except services.errors.account.NoPermissionError as e:
  133. raise Forbidden(str(e))
  134. query = Document.query.filter_by(
  135. dataset_id=str(dataset_id), tenant_id=current_user.current_tenant_id)
  136. if search:
  137. search = f'%{search}%'
  138. query = query.filter(Document.name.like(search))
  139. if sort.startswith('-'):
  140. sort_logic = desc
  141. sort = sort[1:]
  142. else:
  143. sort_logic = asc
  144. if sort == 'hit_count':
  145. sub_query = db.select(DocumentSegment.document_id,
  146. db.func.sum(DocumentSegment.hit_count).label("total_hit_count")) \
  147. .group_by(DocumentSegment.document_id) \
  148. .subquery()
  149. query = query.outerjoin(sub_query, sub_query.c.document_id == Document.id) \
  150. .order_by(sort_logic(db.func.coalesce(sub_query.c.total_hit_count, 0)))
  151. elif sort == 'created_at':
  152. query = query.order_by(sort_logic(Document.created_at))
  153. else:
  154. query = query.order_by(desc(Document.created_at))
  155. paginated_documents = query.paginate(
  156. page=page, per_page=limit, max_per_page=100, error_out=False)
  157. documents = paginated_documents.items
  158. if fetch:
  159. for document in documents:
  160. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  161. DocumentSegment.document_id == str(document.id),
  162. DocumentSegment.status != 're_segment').count()
  163. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  164. DocumentSegment.status != 're_segment').count()
  165. document.completed_segments = completed_segments
  166. document.total_segments = total_segments
  167. data = marshal(documents, document_with_segments_fields)
  168. else:
  169. data = marshal(documents, document_fields)
  170. response = {
  171. 'data': data,
  172. 'has_more': len(documents) == limit,
  173. 'limit': limit,
  174. 'total': paginated_documents.total,
  175. 'page': page
  176. }
  177. return response
  178. documents_and_batch_fields = {
  179. 'documents': fields.List(fields.Nested(document_fields)),
  180. 'batch': fields.String
  181. }
  182. @setup_required
  183. @login_required
  184. @account_initialization_required
  185. @marshal_with(documents_and_batch_fields)
  186. @cloud_edition_billing_resource_check('vector_space')
  187. def post(self, dataset_id):
  188. dataset_id = str(dataset_id)
  189. dataset = DatasetService.get_dataset(dataset_id)
  190. if not dataset:
  191. raise NotFound('Dataset not found.')
  192. # The role of the current user in the ta table must be admin, owner, or editor
  193. if not current_user.is_editor:
  194. raise Forbidden()
  195. try:
  196. DatasetService.check_dataset_permission(dataset, current_user)
  197. except services.errors.account.NoPermissionError as e:
  198. raise Forbidden(str(e))
  199. parser = reqparse.RequestParser()
  200. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
  201. location='json')
  202. parser.add_argument('data_source', type=dict, required=False, location='json')
  203. parser.add_argument('process_rule', type=dict, required=False, location='json')
  204. parser.add_argument('duplicate', type=bool, default=True, nullable=False, location='json')
  205. parser.add_argument('original_document_id', type=str, required=False, location='json')
  206. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  207. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  208. location='json')
  209. parser.add_argument('retrieval_model', type=dict, required=False, nullable=False,
  210. location='json')
  211. args = parser.parse_args()
  212. if not dataset.indexing_technique and not args['indexing_technique']:
  213. raise ValueError('indexing_technique is required.')
  214. # validate args
  215. DocumentService.document_create_args_validate(args)
  216. try:
  217. documents, batch = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
  218. except ProviderTokenNotInitError as ex:
  219. raise ProviderNotInitializeError(ex.description)
  220. except QuotaExceededError:
  221. raise ProviderQuotaExceededError()
  222. except ModelCurrentlyNotSupportError:
  223. raise ProviderModelCurrentlyNotSupportError()
  224. return {
  225. 'documents': documents,
  226. 'batch': batch
  227. }
  228. class DatasetInitApi(Resource):
  229. @setup_required
  230. @login_required
  231. @account_initialization_required
  232. @marshal_with(dataset_and_document_fields)
  233. @cloud_edition_billing_resource_check('vector_space')
  234. def post(self):
  235. # The role of the current user in the ta table must be admin, owner, or editor
  236. if not current_user.is_editor:
  237. raise Forbidden()
  238. parser = reqparse.RequestParser()
  239. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, required=True,
  240. nullable=False, location='json')
  241. parser.add_argument('data_source', type=dict, required=True, nullable=True, location='json')
  242. parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
  243. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  244. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  245. location='json')
  246. parser.add_argument('retrieval_model', type=dict, required=False, nullable=False,
  247. location='json')
  248. args = parser.parse_args()
  249. if args['indexing_technique'] == 'high_quality':
  250. try:
  251. model_manager = ModelManager()
  252. model_manager.get_default_model_instance(
  253. tenant_id=current_user.current_tenant_id,
  254. model_type=ModelType.TEXT_EMBEDDING
  255. )
  256. except InvokeAuthorizationError:
  257. raise ProviderNotInitializeError(
  258. "No Embedding Model available. Please configure a valid provider "
  259. "in the Settings -> Model Provider.")
  260. except ProviderTokenNotInitError as ex:
  261. raise ProviderNotInitializeError(ex.description)
  262. # validate args
  263. DocumentService.document_create_args_validate(args)
  264. try:
  265. dataset, documents, batch = DocumentService.save_document_without_dataset_id(
  266. tenant_id=current_user.current_tenant_id,
  267. document_data=args,
  268. account=current_user
  269. )
  270. except ProviderTokenNotInitError as ex:
  271. raise ProviderNotInitializeError(ex.description)
  272. except QuotaExceededError:
  273. raise ProviderQuotaExceededError()
  274. except ModelCurrentlyNotSupportError:
  275. raise ProviderModelCurrentlyNotSupportError()
  276. response = {
  277. 'dataset': dataset,
  278. 'documents': documents,
  279. 'batch': batch
  280. }
  281. return response
  282. class DocumentIndexingEstimateApi(DocumentResource):
  283. @setup_required
  284. @login_required
  285. @account_initialization_required
  286. def get(self, dataset_id, document_id):
  287. dataset_id = str(dataset_id)
  288. document_id = str(document_id)
  289. document = self.get_document(dataset_id, document_id)
  290. if document.indexing_status in ['completed', 'error']:
  291. raise DocumentAlreadyFinishedError()
  292. data_process_rule = document.dataset_process_rule
  293. data_process_rule_dict = data_process_rule.to_dict()
  294. response = {
  295. "tokens": 0,
  296. "total_price": 0,
  297. "currency": "USD",
  298. "total_segments": 0,
  299. "preview": []
  300. }
  301. if document.data_source_type == 'upload_file':
  302. data_source_info = document.data_source_info_dict
  303. if data_source_info and 'upload_file_id' in data_source_info:
  304. file_id = data_source_info['upload_file_id']
  305. file = db.session.query(UploadFile).filter(
  306. UploadFile.tenant_id == document.tenant_id,
  307. UploadFile.id == file_id
  308. ).first()
  309. # raise error if file not found
  310. if not file:
  311. raise NotFound('File not found.')
  312. extract_setting = ExtractSetting(
  313. datasource_type="upload_file",
  314. upload_file=file,
  315. document_model=document.doc_form
  316. )
  317. indexing_runner = IndexingRunner()
  318. try:
  319. response = indexing_runner.indexing_estimate(current_user.current_tenant_id, [extract_setting],
  320. data_process_rule_dict, document.doc_form,
  321. 'English', dataset_id)
  322. except LLMBadRequestError:
  323. raise ProviderNotInitializeError(
  324. "No Embedding Model available. Please configure a valid provider "
  325. "in the Settings -> Model Provider.")
  326. except ProviderTokenNotInitError as ex:
  327. raise ProviderNotInitializeError(ex.description)
  328. except Exception as e:
  329. raise IndexingEstimateError(str(e))
  330. return response
  331. class DocumentBatchIndexingEstimateApi(DocumentResource):
  332. @setup_required
  333. @login_required
  334. @account_initialization_required
  335. def get(self, dataset_id, batch):
  336. dataset_id = str(dataset_id)
  337. batch = str(batch)
  338. documents = self.get_batch_documents(dataset_id, batch)
  339. response = {
  340. "tokens": 0,
  341. "total_price": 0,
  342. "currency": "USD",
  343. "total_segments": 0,
  344. "preview": []
  345. }
  346. if not documents:
  347. return response
  348. data_process_rule = documents[0].dataset_process_rule
  349. data_process_rule_dict = data_process_rule.to_dict()
  350. info_list = []
  351. extract_settings = []
  352. for document in documents:
  353. if document.indexing_status in ['completed', 'error']:
  354. raise DocumentAlreadyFinishedError()
  355. data_source_info = document.data_source_info_dict
  356. # format document files info
  357. if data_source_info and 'upload_file_id' in data_source_info:
  358. file_id = data_source_info['upload_file_id']
  359. info_list.append(file_id)
  360. # format document notion info
  361. elif data_source_info and 'notion_workspace_id' in data_source_info and 'notion_page_id' in data_source_info:
  362. pages = []
  363. page = {
  364. 'page_id': data_source_info['notion_page_id'],
  365. 'type': data_source_info['type']
  366. }
  367. pages.append(page)
  368. notion_info = {
  369. 'workspace_id': data_source_info['notion_workspace_id'],
  370. 'pages': pages
  371. }
  372. info_list.append(notion_info)
  373. if document.data_source_type == 'upload_file':
  374. file_id = data_source_info['upload_file_id']
  375. file_detail = db.session.query(UploadFile).filter(
  376. UploadFile.tenant_id == current_user.current_tenant_id,
  377. UploadFile.id == file_id
  378. ).first()
  379. if file_detail is None:
  380. raise NotFound("File not found.")
  381. extract_setting = ExtractSetting(
  382. datasource_type="upload_file",
  383. upload_file=file_detail,
  384. document_model=document.doc_form
  385. )
  386. extract_settings.append(extract_setting)
  387. elif document.data_source_type == 'notion_import':
  388. extract_setting = ExtractSetting(
  389. datasource_type="notion_import",
  390. notion_info={
  391. "notion_workspace_id": data_source_info['notion_workspace_id'],
  392. "notion_obj_id": data_source_info['notion_page_id'],
  393. "notion_page_type": data_source_info['type'],
  394. "tenant_id": current_user.current_tenant_id
  395. },
  396. document_model=document.doc_form
  397. )
  398. extract_settings.append(extract_setting)
  399. elif document.data_source_type == 'website_crawl':
  400. extract_setting = ExtractSetting(
  401. datasource_type="website_crawl",
  402. website_info={
  403. "provider": data_source_info['provider'],
  404. "job_id": data_source_info['job_id'],
  405. "url": data_source_info['url'],
  406. "tenant_id": current_user.current_tenant_id,
  407. "mode": data_source_info['mode'],
  408. "only_main_content": data_source_info['only_main_content']
  409. },
  410. document_model=document.doc_form
  411. )
  412. extract_settings.append(extract_setting)
  413. else:
  414. raise ValueError('Data source type not support')
  415. indexing_runner = IndexingRunner()
  416. try:
  417. response = indexing_runner.indexing_estimate(current_user.current_tenant_id, extract_settings,
  418. data_process_rule_dict, document.doc_form,
  419. 'English', dataset_id)
  420. except LLMBadRequestError:
  421. raise ProviderNotInitializeError(
  422. "No Embedding Model available. Please configure a valid provider "
  423. "in the Settings -> Model Provider.")
  424. except ProviderTokenNotInitError as ex:
  425. raise ProviderNotInitializeError(ex.description)
  426. except Exception as e:
  427. raise IndexingEstimateError(str(e))
  428. return response
  429. class DocumentBatchIndexingStatusApi(DocumentResource):
  430. @setup_required
  431. @login_required
  432. @account_initialization_required
  433. def get(self, dataset_id, batch):
  434. dataset_id = str(dataset_id)
  435. batch = str(batch)
  436. documents = self.get_batch_documents(dataset_id, batch)
  437. documents_status = []
  438. for document in documents:
  439. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  440. DocumentSegment.document_id == str(document.id),
  441. DocumentSegment.status != 're_segment').count()
  442. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  443. DocumentSegment.status != 're_segment').count()
  444. document.completed_segments = completed_segments
  445. document.total_segments = total_segments
  446. if document.is_paused:
  447. document.indexing_status = 'paused'
  448. documents_status.append(marshal(document, document_status_fields))
  449. data = {
  450. 'data': documents_status
  451. }
  452. return data
  453. class DocumentIndexingStatusApi(DocumentResource):
  454. @setup_required
  455. @login_required
  456. @account_initialization_required
  457. def get(self, dataset_id, document_id):
  458. dataset_id = str(dataset_id)
  459. document_id = str(document_id)
  460. document = self.get_document(dataset_id, document_id)
  461. completed_segments = DocumentSegment.query \
  462. .filter(DocumentSegment.completed_at.isnot(None),
  463. DocumentSegment.document_id == str(document_id),
  464. DocumentSegment.status != 're_segment') \
  465. .count()
  466. total_segments = DocumentSegment.query \
  467. .filter(DocumentSegment.document_id == str(document_id),
  468. DocumentSegment.status != 're_segment') \
  469. .count()
  470. document.completed_segments = completed_segments
  471. document.total_segments = total_segments
  472. if document.is_paused:
  473. document.indexing_status = 'paused'
  474. return marshal(document, document_status_fields)
  475. class DocumentDetailApi(DocumentResource):
  476. METADATA_CHOICES = {'all', 'only', 'without'}
  477. @setup_required
  478. @login_required
  479. @account_initialization_required
  480. def get(self, dataset_id, document_id):
  481. dataset_id = str(dataset_id)
  482. document_id = str(document_id)
  483. document = self.get_document(dataset_id, document_id)
  484. metadata = request.args.get('metadata', 'all')
  485. if metadata not in self.METADATA_CHOICES:
  486. raise InvalidMetadataError(f'Invalid metadata value: {metadata}')
  487. if metadata == 'only':
  488. response = {
  489. 'id': document.id,
  490. 'doc_type': document.doc_type,
  491. 'doc_metadata': document.doc_metadata
  492. }
  493. elif metadata == 'without':
  494. process_rules = DatasetService.get_process_rules(dataset_id)
  495. data_source_info = document.data_source_detail_dict
  496. response = {
  497. 'id': document.id,
  498. 'position': document.position,
  499. 'data_source_type': document.data_source_type,
  500. 'data_source_info': data_source_info,
  501. 'dataset_process_rule_id': document.dataset_process_rule_id,
  502. 'dataset_process_rule': process_rules,
  503. 'name': document.name,
  504. 'created_from': document.created_from,
  505. 'created_by': document.created_by,
  506. 'created_at': document.created_at.timestamp(),
  507. 'tokens': document.tokens,
  508. 'indexing_status': document.indexing_status,
  509. 'completed_at': int(document.completed_at.timestamp()) if document.completed_at else None,
  510. 'updated_at': int(document.updated_at.timestamp()) if document.updated_at else None,
  511. 'indexing_latency': document.indexing_latency,
  512. 'error': document.error,
  513. 'enabled': document.enabled,
  514. 'disabled_at': int(document.disabled_at.timestamp()) if document.disabled_at else None,
  515. 'disabled_by': document.disabled_by,
  516. 'archived': document.archived,
  517. 'segment_count': document.segment_count,
  518. 'average_segment_length': document.average_segment_length,
  519. 'hit_count': document.hit_count,
  520. 'display_status': document.display_status,
  521. 'doc_form': document.doc_form
  522. }
  523. else:
  524. process_rules = DatasetService.get_process_rules(dataset_id)
  525. data_source_info = document.data_source_detail_dict
  526. response = {
  527. 'id': document.id,
  528. 'position': document.position,
  529. 'data_source_type': document.data_source_type,
  530. 'data_source_info': data_source_info,
  531. 'dataset_process_rule_id': document.dataset_process_rule_id,
  532. 'dataset_process_rule': process_rules,
  533. 'name': document.name,
  534. 'created_from': document.created_from,
  535. 'created_by': document.created_by,
  536. 'created_at': document.created_at.timestamp(),
  537. 'tokens': document.tokens,
  538. 'indexing_status': document.indexing_status,
  539. 'completed_at': int(document.completed_at.timestamp()) if document.completed_at else None,
  540. 'updated_at': int(document.updated_at.timestamp()) if document.updated_at else None,
  541. 'indexing_latency': document.indexing_latency,
  542. 'error': document.error,
  543. 'enabled': document.enabled,
  544. 'disabled_at': int(document.disabled_at.timestamp()) if document.disabled_at else None,
  545. 'disabled_by': document.disabled_by,
  546. 'archived': document.archived,
  547. 'doc_type': document.doc_type,
  548. 'doc_metadata': document.doc_metadata,
  549. 'segment_count': document.segment_count,
  550. 'average_segment_length': document.average_segment_length,
  551. 'hit_count': document.hit_count,
  552. 'display_status': document.display_status,
  553. 'doc_form': document.doc_form
  554. }
  555. return response, 200
  556. class DocumentProcessingApi(DocumentResource):
  557. @setup_required
  558. @login_required
  559. @account_initialization_required
  560. def patch(self, dataset_id, document_id, action):
  561. dataset_id = str(dataset_id)
  562. document_id = str(document_id)
  563. document = self.get_document(dataset_id, document_id)
  564. # The role of the current user in the ta table must be admin, owner, or editor
  565. if not current_user.is_editor:
  566. raise Forbidden()
  567. if action == "pause":
  568. if document.indexing_status != "indexing":
  569. raise InvalidActionError('Document not in indexing state.')
  570. document.paused_by = current_user.id
  571. document.paused_at = datetime.now(timezone.utc).replace(tzinfo=None)
  572. document.is_paused = True
  573. db.session.commit()
  574. elif action == "resume":
  575. if document.indexing_status not in ["paused", "error"]:
  576. raise InvalidActionError('Document not in paused or error state.')
  577. document.paused_by = None
  578. document.paused_at = None
  579. document.is_paused = False
  580. db.session.commit()
  581. else:
  582. raise InvalidActionError()
  583. return {'result': 'success'}, 200
  584. class DocumentDeleteApi(DocumentResource):
  585. @setup_required
  586. @login_required
  587. @account_initialization_required
  588. def delete(self, dataset_id, document_id):
  589. dataset_id = str(dataset_id)
  590. document_id = str(document_id)
  591. dataset = DatasetService.get_dataset(dataset_id)
  592. if dataset is None:
  593. raise NotFound("Dataset not found.")
  594. # check user's model setting
  595. DatasetService.check_dataset_model_setting(dataset)
  596. document = self.get_document(dataset_id, document_id)
  597. try:
  598. DocumentService.delete_document(document)
  599. except services.errors.document.DocumentIndexingError:
  600. raise DocumentIndexingError('Cannot delete document during indexing.')
  601. return {'result': 'success'}, 204
  602. class DocumentMetadataApi(DocumentResource):
  603. @setup_required
  604. @login_required
  605. @account_initialization_required
  606. def put(self, dataset_id, document_id):
  607. dataset_id = str(dataset_id)
  608. document_id = str(document_id)
  609. document = self.get_document(dataset_id, document_id)
  610. req_data = request.get_json()
  611. doc_type = req_data.get('doc_type')
  612. doc_metadata = req_data.get('doc_metadata')
  613. # The role of the current user in the ta table must be admin, owner, or editor
  614. if not current_user.is_editor:
  615. raise Forbidden()
  616. if doc_type is None or doc_metadata is None:
  617. raise ValueError('Both doc_type and doc_metadata must be provided.')
  618. if doc_type not in DocumentService.DOCUMENT_METADATA_SCHEMA:
  619. raise ValueError('Invalid doc_type.')
  620. if not isinstance(doc_metadata, dict):
  621. raise ValueError('doc_metadata must be a dictionary.')
  622. metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[doc_type]
  623. document.doc_metadata = {}
  624. if doc_type == 'others':
  625. document.doc_metadata = doc_metadata
  626. else:
  627. for key, value_type in metadata_schema.items():
  628. value = doc_metadata.get(key)
  629. if value is not None and isinstance(value, value_type):
  630. document.doc_metadata[key] = value
  631. document.doc_type = doc_type
  632. document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
  633. db.session.commit()
  634. return {'result': 'success', 'message': 'Document metadata updated.'}, 200
  635. class DocumentStatusApi(DocumentResource):
  636. @setup_required
  637. @login_required
  638. @account_initialization_required
  639. @cloud_edition_billing_resource_check('vector_space')
  640. def patch(self, dataset_id, document_id, action):
  641. dataset_id = str(dataset_id)
  642. document_id = str(document_id)
  643. dataset = DatasetService.get_dataset(dataset_id)
  644. if dataset is None:
  645. raise NotFound("Dataset not found.")
  646. # check user's model setting
  647. DatasetService.check_dataset_model_setting(dataset)
  648. document = self.get_document(dataset_id, document_id)
  649. # The role of the current user in the ta table must be admin, owner, or editor
  650. if not current_user.is_editor:
  651. raise Forbidden()
  652. indexing_cache_key = 'document_{}_indexing'.format(document.id)
  653. cache_result = redis_client.get(indexing_cache_key)
  654. if cache_result is not None:
  655. raise InvalidActionError("Document is being indexed, please try again later")
  656. if action == "enable":
  657. if document.enabled:
  658. raise InvalidActionError('Document already enabled.')
  659. document.enabled = True
  660. document.disabled_at = None
  661. document.disabled_by = None
  662. document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
  663. db.session.commit()
  664. # Set cache to prevent indexing the same document multiple times
  665. redis_client.setex(indexing_cache_key, 600, 1)
  666. add_document_to_index_task.delay(document_id)
  667. return {'result': 'success'}, 200
  668. elif action == "disable":
  669. if not document.completed_at or document.indexing_status != 'completed':
  670. raise InvalidActionError('Document is not completed.')
  671. if not document.enabled:
  672. raise InvalidActionError('Document already disabled.')
  673. document.enabled = False
  674. document.disabled_at = datetime.now(timezone.utc).replace(tzinfo=None)
  675. document.disabled_by = current_user.id
  676. document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
  677. db.session.commit()
  678. # Set cache to prevent indexing the same document multiple times
  679. redis_client.setex(indexing_cache_key, 600, 1)
  680. remove_document_from_index_task.delay(document_id)
  681. return {'result': 'success'}, 200
  682. elif action == "archive":
  683. if document.archived:
  684. raise InvalidActionError('Document already archived.')
  685. document.archived = True
  686. document.archived_at = datetime.now(timezone.utc).replace(tzinfo=None)
  687. document.archived_by = current_user.id
  688. document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
  689. db.session.commit()
  690. if document.enabled:
  691. # Set cache to prevent indexing the same document multiple times
  692. redis_client.setex(indexing_cache_key, 600, 1)
  693. remove_document_from_index_task.delay(document_id)
  694. return {'result': 'success'}, 200
  695. elif action == "un_archive":
  696. if not document.archived:
  697. raise InvalidActionError('Document is not archived.')
  698. document.archived = False
  699. document.archived_at = None
  700. document.archived_by = None
  701. document.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
  702. db.session.commit()
  703. # Set cache to prevent indexing the same document multiple times
  704. redis_client.setex(indexing_cache_key, 600, 1)
  705. add_document_to_index_task.delay(document_id)
  706. return {'result': 'success'}, 200
  707. else:
  708. raise InvalidActionError()
  709. class DocumentPauseApi(DocumentResource):
  710. @setup_required
  711. @login_required
  712. @account_initialization_required
  713. def patch(self, dataset_id, document_id):
  714. """pause document."""
  715. dataset_id = str(dataset_id)
  716. document_id = str(document_id)
  717. dataset = DatasetService.get_dataset(dataset_id)
  718. if not dataset:
  719. raise NotFound('Dataset not found.')
  720. document = DocumentService.get_document(dataset.id, document_id)
  721. # 404 if document not found
  722. if document is None:
  723. raise NotFound("Document Not Exists.")
  724. # 403 if document is archived
  725. if DocumentService.check_archived(document):
  726. raise ArchivedDocumentImmutableError()
  727. try:
  728. # pause document
  729. DocumentService.pause_document(document)
  730. except services.errors.document.DocumentIndexingError:
  731. raise DocumentIndexingError('Cannot pause completed document.')
  732. return {'result': 'success'}, 204
  733. class DocumentRecoverApi(DocumentResource):
  734. @setup_required
  735. @login_required
  736. @account_initialization_required
  737. def patch(self, dataset_id, document_id):
  738. """recover document."""
  739. dataset_id = str(dataset_id)
  740. document_id = str(document_id)
  741. dataset = DatasetService.get_dataset(dataset_id)
  742. if not dataset:
  743. raise NotFound('Dataset not found.')
  744. document = DocumentService.get_document(dataset.id, document_id)
  745. # 404 if document not found
  746. if document is None:
  747. raise NotFound("Document Not Exists.")
  748. # 403 if document is archived
  749. if DocumentService.check_archived(document):
  750. raise ArchivedDocumentImmutableError()
  751. try:
  752. # pause document
  753. DocumentService.recover_document(document)
  754. except services.errors.document.DocumentIndexingError:
  755. raise DocumentIndexingError('Document is not in paused status.')
  756. return {'result': 'success'}, 204
  757. class DocumentRetryApi(DocumentResource):
  758. @setup_required
  759. @login_required
  760. @account_initialization_required
  761. def post(self, dataset_id):
  762. """retry document."""
  763. parser = reqparse.RequestParser()
  764. parser.add_argument('document_ids', type=list, required=True, nullable=False,
  765. location='json')
  766. args = parser.parse_args()
  767. dataset_id = str(dataset_id)
  768. dataset = DatasetService.get_dataset(dataset_id)
  769. retry_documents = []
  770. if not dataset:
  771. raise NotFound('Dataset not found.')
  772. for document_id in args['document_ids']:
  773. try:
  774. document_id = str(document_id)
  775. document = DocumentService.get_document(dataset.id, document_id)
  776. # 404 if document not found
  777. if document is None:
  778. raise NotFound("Document Not Exists.")
  779. # 403 if document is archived
  780. if DocumentService.check_archived(document):
  781. raise ArchivedDocumentImmutableError()
  782. # 400 if document is completed
  783. if document.indexing_status == 'completed':
  784. raise DocumentAlreadyFinishedError()
  785. retry_documents.append(document)
  786. except Exception as e:
  787. logging.error(f"Document {document_id} retry failed: {str(e)}")
  788. continue
  789. # retry document
  790. DocumentService.retry_document(dataset_id, retry_documents)
  791. return {'result': 'success'}, 204
  792. class DocumentRenameApi(DocumentResource):
  793. @setup_required
  794. @login_required
  795. @account_initialization_required
  796. @marshal_with(document_fields)
  797. def post(self, dataset_id, document_id):
  798. # The role of the current user in the ta table must be admin or owner
  799. if not current_user.is_admin_or_owner:
  800. raise Forbidden()
  801. parser = reqparse.RequestParser()
  802. parser.add_argument('name', type=str, required=True, nullable=False, location='json')
  803. args = parser.parse_args()
  804. try:
  805. document = DocumentService.rename_document(dataset_id, document_id, args['name'])
  806. except services.errors.document.DocumentIndexingError:
  807. raise DocumentIndexingError('Cannot delete document during indexing.')
  808. return document
  809. class WebsiteDocumentSyncApi(DocumentResource):
  810. @setup_required
  811. @login_required
  812. @account_initialization_required
  813. def get(self, dataset_id, document_id):
  814. """sync website document."""
  815. dataset_id = str(dataset_id)
  816. dataset = DatasetService.get_dataset(dataset_id)
  817. if not dataset:
  818. raise NotFound('Dataset not found.')
  819. document_id = str(document_id)
  820. document = DocumentService.get_document(dataset.id, document_id)
  821. if not document:
  822. raise NotFound('Document not found.')
  823. if document.tenant_id != current_user.current_tenant_id:
  824. raise Forbidden('No permission.')
  825. if document.data_source_type != 'website_crawl':
  826. raise ValueError('Document is not a website document.')
  827. # 403 if document is archived
  828. if DocumentService.check_archived(document):
  829. raise ArchivedDocumentImmutableError()
  830. # sync document
  831. DocumentService.sync_website_document(dataset_id, document)
  832. return {'result': 'success'}, 200
  833. api.add_resource(GetProcessRuleApi, '/datasets/process-rule')
  834. api.add_resource(DatasetDocumentListApi,
  835. '/datasets/<uuid:dataset_id>/documents')
  836. api.add_resource(DatasetInitApi,
  837. '/datasets/init')
  838. api.add_resource(DocumentIndexingEstimateApi,
  839. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-estimate')
  840. api.add_resource(DocumentBatchIndexingEstimateApi,
  841. '/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-estimate')
  842. api.add_resource(DocumentBatchIndexingStatusApi,
  843. '/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-status')
  844. api.add_resource(DocumentIndexingStatusApi,
  845. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-status')
  846. api.add_resource(DocumentDetailApi,
  847. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  848. api.add_resource(DocumentProcessingApi,
  849. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/<string:action>')
  850. api.add_resource(DocumentDeleteApi,
  851. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  852. api.add_resource(DocumentMetadataApi,
  853. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/metadata')
  854. api.add_resource(DocumentStatusApi,
  855. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/status/<string:action>')
  856. api.add_resource(DocumentPauseApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/pause')
  857. api.add_resource(DocumentRecoverApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/resume')
  858. api.add_resource(DocumentRetryApi, '/datasets/<uuid:dataset_id>/retry')
  859. api.add_resource(DocumentRenameApi,
  860. '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/rename')
  861. api.add_resource(WebsiteDocumentSyncApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/website-sync')