document.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. import json
  2. from flask import request
  3. from flask_restful import reqparse, marshal
  4. from flask_login import current_user
  5. from sqlalchemy import desc
  6. from werkzeug.exceptions import NotFound
  7. import services.dataset_service
  8. from controllers.service_api import api
  9. from controllers.service_api.app.error import ProviderNotInitializeError
  10. from controllers.service_api.dataset.error import ArchivedDocumentImmutableError, DocumentIndexingError, \
  11. NoFileUploadedError, TooManyFilesError
  12. from controllers.service_api.wraps import DatasetApiResource
  13. from libs.login import current_user
  14. from core.model_providers.error import ProviderTokenNotInitError
  15. from extensions.ext_database import db
  16. from fields.document_fields import document_fields, document_status_fields
  17. from models.dataset import Dataset, Document, DocumentSegment
  18. from services.dataset_service import DocumentService
  19. from services.file_service import FileService
  20. class DocumentAddByTextApi(DatasetApiResource):
  21. """Resource for documents."""
  22. def post(self, tenant_id, dataset_id):
  23. """Create document by text."""
  24. parser = reqparse.RequestParser()
  25. parser.add_argument('name', type=str, required=True, nullable=False, location='json')
  26. parser.add_argument('text', type=str, required=True, nullable=False, location='json')
  27. parser.add_argument('process_rule', type=dict, required=False, nullable=True, location='json')
  28. parser.add_argument('original_document_id', type=str, required=False, location='json')
  29. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  30. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  31. location='json')
  32. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
  33. location='json')
  34. args = parser.parse_args()
  35. dataset_id = str(dataset_id)
  36. tenant_id = str(tenant_id)
  37. dataset = db.session.query(Dataset).filter(
  38. Dataset.tenant_id == tenant_id,
  39. Dataset.id == dataset_id
  40. ).first()
  41. if not dataset:
  42. raise ValueError('Dataset is not exist.')
  43. if not dataset.indexing_technique and not args['indexing_technique']:
  44. raise ValueError('indexing_technique is required.')
  45. upload_file = FileService.upload_text(args.get('text'), args.get('name'))
  46. data_source = {
  47. 'type': 'upload_file',
  48. 'info_list': {
  49. 'data_source_type': 'upload_file',
  50. 'file_info_list': {
  51. 'file_ids': [upload_file.id]
  52. }
  53. }
  54. }
  55. args['data_source'] = data_source
  56. # validate args
  57. DocumentService.document_create_args_validate(args)
  58. try:
  59. documents, batch = DocumentService.save_document_with_dataset_id(
  60. dataset=dataset,
  61. document_data=args,
  62. account=current_user,
  63. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  64. created_from='api'
  65. )
  66. except ProviderTokenNotInitError as ex:
  67. raise ProviderNotInitializeError(ex.description)
  68. document = documents[0]
  69. documents_and_batch_fields = {
  70. 'document': marshal(document, document_fields),
  71. 'batch': batch
  72. }
  73. return documents_and_batch_fields, 200
  74. class DocumentUpdateByTextApi(DatasetApiResource):
  75. """Resource for update documents."""
  76. def post(self, tenant_id, dataset_id, document_id):
  77. """Update document by text."""
  78. parser = reqparse.RequestParser()
  79. parser.add_argument('name', type=str, required=False, nullable=True, location='json')
  80. parser.add_argument('text', type=str, required=False, nullable=True, location='json')
  81. parser.add_argument('process_rule', type=dict, required=False, nullable=True, location='json')
  82. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  83. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  84. location='json')
  85. args = parser.parse_args()
  86. dataset_id = str(dataset_id)
  87. tenant_id = str(tenant_id)
  88. dataset = db.session.query(Dataset).filter(
  89. Dataset.tenant_id == tenant_id,
  90. Dataset.id == dataset_id
  91. ).first()
  92. if not dataset:
  93. raise ValueError('Dataset is not exist.')
  94. if args['text']:
  95. upload_file = FileService.upload_text(args.get('text'), args.get('name'))
  96. data_source = {
  97. 'type': 'upload_file',
  98. 'info_list': {
  99. 'data_source_type': 'upload_file',
  100. 'file_info_list': {
  101. 'file_ids': [upload_file.id]
  102. }
  103. }
  104. }
  105. args['data_source'] = data_source
  106. # validate args
  107. args['original_document_id'] = str(document_id)
  108. DocumentService.document_create_args_validate(args)
  109. try:
  110. documents, batch = DocumentService.save_document_with_dataset_id(
  111. dataset=dataset,
  112. document_data=args,
  113. account=current_user,
  114. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  115. created_from='api'
  116. )
  117. except ProviderTokenNotInitError as ex:
  118. raise ProviderNotInitializeError(ex.description)
  119. document = documents[0]
  120. documents_and_batch_fields = {
  121. 'document': marshal(document, document_fields),
  122. 'batch': batch
  123. }
  124. return documents_and_batch_fields, 200
  125. class DocumentAddByFileApi(DatasetApiResource):
  126. """Resource for documents."""
  127. def post(self, tenant_id, dataset_id):
  128. """Create document by upload file."""
  129. args = {}
  130. if 'data' in request.form:
  131. args = json.loads(request.form['data'])
  132. if 'doc_form' not in args:
  133. args['doc_form'] = 'text_model'
  134. if 'doc_language' not in args:
  135. args['doc_language'] = 'English'
  136. # get dataset info
  137. dataset_id = str(dataset_id)
  138. tenant_id = str(tenant_id)
  139. dataset = db.session.query(Dataset).filter(
  140. Dataset.tenant_id == tenant_id,
  141. Dataset.id == dataset_id
  142. ).first()
  143. if not dataset:
  144. raise ValueError('Dataset is not exist.')
  145. if not dataset.indexing_technique and not args['indexing_technique']:
  146. raise ValueError('indexing_technique is required.')
  147. # save file info
  148. file = request.files['file']
  149. # check file
  150. if 'file' not in request.files:
  151. raise NoFileUploadedError()
  152. if len(request.files) > 1:
  153. raise TooManyFilesError()
  154. upload_file = FileService.upload_file(file, current_user)
  155. data_source = {
  156. 'type': 'upload_file',
  157. 'info_list': {
  158. 'file_info_list': {
  159. 'file_ids': [upload_file.id]
  160. }
  161. }
  162. }
  163. args['data_source'] = data_source
  164. # validate args
  165. DocumentService.document_create_args_validate(args)
  166. try:
  167. documents, batch = DocumentService.save_document_with_dataset_id(
  168. dataset=dataset,
  169. document_data=args,
  170. account=dataset.created_by_account,
  171. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  172. created_from='api'
  173. )
  174. except ProviderTokenNotInitError as ex:
  175. raise ProviderNotInitializeError(ex.description)
  176. document = documents[0]
  177. documents_and_batch_fields = {
  178. 'document': marshal(document, document_fields),
  179. 'batch': batch
  180. }
  181. return documents_and_batch_fields, 200
  182. class DocumentUpdateByFileApi(DatasetApiResource):
  183. """Resource for update documents."""
  184. def post(self, tenant_id, dataset_id, document_id):
  185. """Update document by upload file."""
  186. args = {}
  187. if 'data' in request.form:
  188. args = json.loads(request.form['data'])
  189. if 'doc_form' not in args:
  190. args['doc_form'] = 'text_model'
  191. if 'doc_language' not in args:
  192. args['doc_language'] = 'English'
  193. # get dataset info
  194. dataset_id = str(dataset_id)
  195. tenant_id = str(tenant_id)
  196. dataset = db.session.query(Dataset).filter(
  197. Dataset.tenant_id == tenant_id,
  198. Dataset.id == dataset_id
  199. ).first()
  200. if not dataset:
  201. raise ValueError('Dataset is not exist.')
  202. if 'file' in request.files:
  203. # save file info
  204. file = request.files['file']
  205. if len(request.files) > 1:
  206. raise TooManyFilesError()
  207. upload_file = FileService.upload_file(file, current_user)
  208. data_source = {
  209. 'type': 'upload_file',
  210. 'info_list': {
  211. 'file_info_list': {
  212. 'file_ids': [upload_file.id]
  213. }
  214. }
  215. }
  216. args['data_source'] = data_source
  217. # validate args
  218. args['original_document_id'] = str(document_id)
  219. DocumentService.document_create_args_validate(args)
  220. try:
  221. documents, batch = DocumentService.save_document_with_dataset_id(
  222. dataset=dataset,
  223. document_data=args,
  224. account=dataset.created_by_account,
  225. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  226. created_from='api'
  227. )
  228. except ProviderTokenNotInitError as ex:
  229. raise ProviderNotInitializeError(ex.description)
  230. document = documents[0]
  231. documents_and_batch_fields = {
  232. 'document': marshal(document, document_fields),
  233. 'batch': batch
  234. }
  235. return documents_and_batch_fields, 200
  236. class DocumentDeleteApi(DatasetApiResource):
  237. def delete(self, tenant_id, dataset_id, document_id):
  238. """Delete document."""
  239. document_id = str(document_id)
  240. dataset_id = str(dataset_id)
  241. tenant_id = str(tenant_id)
  242. # get dataset info
  243. dataset = db.session.query(Dataset).filter(
  244. Dataset.tenant_id == tenant_id,
  245. Dataset.id == dataset_id
  246. ).first()
  247. if not dataset:
  248. raise ValueError('Dataset is not exist.')
  249. document = DocumentService.get_document(dataset.id, document_id)
  250. # 404 if document not found
  251. if document is None:
  252. raise NotFound("Document Not Exists.")
  253. # 403 if document is archived
  254. if DocumentService.check_archived(document):
  255. raise ArchivedDocumentImmutableError()
  256. try:
  257. # delete document
  258. DocumentService.delete_document(document)
  259. except services.errors.document.DocumentIndexingError:
  260. raise DocumentIndexingError('Cannot delete document during indexing.')
  261. return {'result': 'success'}, 200
  262. class DocumentListApi(DatasetApiResource):
  263. def get(self, tenant_id, dataset_id):
  264. dataset_id = str(dataset_id)
  265. tenant_id = str(tenant_id)
  266. page = request.args.get('page', default=1, type=int)
  267. limit = request.args.get('limit', default=20, type=int)
  268. search = request.args.get('keyword', default=None, type=str)
  269. dataset = db.session.query(Dataset).filter(
  270. Dataset.tenant_id == tenant_id,
  271. Dataset.id == dataset_id
  272. ).first()
  273. if not dataset:
  274. raise NotFound('Dataset not found.')
  275. query = Document.query.filter_by(
  276. dataset_id=str(dataset_id), tenant_id=tenant_id)
  277. if search:
  278. search = f'%{search}%'
  279. query = query.filter(Document.name.like(search))
  280. query = query.order_by(desc(Document.created_at))
  281. paginated_documents = query.paginate(
  282. page=page, per_page=limit, max_per_page=100, error_out=False)
  283. documents = paginated_documents.items
  284. response = {
  285. 'data': marshal(documents, document_fields),
  286. 'has_more': len(documents) == limit,
  287. 'limit': limit,
  288. 'total': paginated_documents.total,
  289. 'page': page
  290. }
  291. return response
  292. class DocumentIndexingStatusApi(DatasetApiResource):
  293. def get(self, tenant_id, dataset_id, batch):
  294. dataset_id = str(dataset_id)
  295. batch = str(batch)
  296. tenant_id = str(tenant_id)
  297. # get dataset
  298. dataset = db.session.query(Dataset).filter(
  299. Dataset.tenant_id == tenant_id,
  300. Dataset.id == dataset_id
  301. ).first()
  302. if not dataset:
  303. raise NotFound('Dataset not found.')
  304. # get documents
  305. documents = DocumentService.get_batch_documents(dataset_id, batch)
  306. if not documents:
  307. raise NotFound('Documents not found.')
  308. documents_status = []
  309. for document in documents:
  310. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  311. DocumentSegment.document_id == str(document.id),
  312. DocumentSegment.status != 're_segment').count()
  313. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  314. DocumentSegment.status != 're_segment').count()
  315. document.completed_segments = completed_segments
  316. document.total_segments = total_segments
  317. if document.is_paused:
  318. document.indexing_status = 'paused'
  319. documents_status.append(marshal(document, document_status_fields))
  320. data = {
  321. 'data': documents_status
  322. }
  323. return data
  324. api.add_resource(DocumentAddByTextApi, '/datasets/<uuid:dataset_id>/document/create_by_text')
  325. api.add_resource(DocumentAddByFileApi, '/datasets/<uuid:dataset_id>/document/create_by_file')
  326. api.add_resource(DocumentUpdateByTextApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_text')
  327. api.add_resource(DocumentUpdateByFileApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_file')
  328. api.add_resource(DocumentDeleteApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  329. api.add_resource(DocumentListApi, '/datasets/<uuid:dataset_id>/documents')
  330. api.add_resource(DocumentIndexingStatusApi, '/datasets/<uuid:dataset_id>/documents/<string:batch>/indexing-status')