document.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. import datetime
  2. import json
  3. import uuid
  4. from flask import current_app, request
  5. from flask_restful import reqparse, marshal
  6. from sqlalchemy import desc
  7. from werkzeug.exceptions import NotFound
  8. import services.dataset_service
  9. from controllers.service_api import api
  10. from controllers.service_api.app.error import ProviderNotInitializeError
  11. from controllers.service_api.dataset.error import ArchivedDocumentImmutableError, DocumentIndexingError, \
  12. NoFileUploadedError, TooManyFilesError
  13. from controllers.service_api.wraps import DatasetApiResource
  14. from core.login.login import current_user
  15. from core.model_providers.error import ProviderTokenNotInitError
  16. from extensions.ext_database import db
  17. from extensions.ext_storage import storage
  18. from fields.document_fields import document_fields, document_status_fields
  19. from models.dataset import Dataset, Document, DocumentSegment
  20. from models.model import UploadFile
  21. from services.dataset_service import DocumentService
  22. from services.file_service import FileService
  23. class DocumentAddByTextApi(DatasetApiResource):
  24. """Resource for documents."""
  25. def post(self, tenant_id, dataset_id):
  26. """Create document by text."""
  27. parser = reqparse.RequestParser()
  28. parser.add_argument('name', type=str, required=True, nullable=False, location='json')
  29. parser.add_argument('text', type=str, required=True, nullable=False, location='json')
  30. parser.add_argument('process_rule', type=dict, required=False, nullable=True, location='json')
  31. parser.add_argument('original_document_id', type=str, required=False, location='json')
  32. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  33. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  34. location='json')
  35. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
  36. location='json')
  37. args = parser.parse_args()
  38. dataset_id = str(dataset_id)
  39. tenant_id = str(tenant_id)
  40. dataset = db.session.query(Dataset).filter(
  41. Dataset.tenant_id == tenant_id,
  42. Dataset.id == dataset_id
  43. ).first()
  44. if not dataset:
  45. raise ValueError('Dataset is not exist.')
  46. if not dataset.indexing_technique and not args['indexing_technique']:
  47. raise ValueError('indexing_technique is required.')
  48. upload_file = FileService.upload_text(args.get('text'), args.get('name'))
  49. data_source = {
  50. 'type': 'upload_file',
  51. 'info_list': {
  52. 'data_source_type': 'upload_file',
  53. 'file_info_list': {
  54. 'file_ids': [upload_file.id]
  55. }
  56. }
  57. }
  58. args['data_source'] = data_source
  59. # validate args
  60. DocumentService.document_create_args_validate(args)
  61. try:
  62. documents, batch = DocumentService.save_document_with_dataset_id(
  63. dataset=dataset,
  64. document_data=args,
  65. account=current_user,
  66. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  67. created_from='api'
  68. )
  69. except ProviderTokenNotInitError as ex:
  70. raise ProviderNotInitializeError(ex.description)
  71. document = documents[0]
  72. documents_and_batch_fields = {
  73. 'document': marshal(document, document_fields),
  74. 'batch': batch
  75. }
  76. return documents_and_batch_fields, 200
  77. class DocumentUpdateByTextApi(DatasetApiResource):
  78. """Resource for update documents."""
  79. def post(self, tenant_id, dataset_id, document_id):
  80. """Update document by text."""
  81. parser = reqparse.RequestParser()
  82. parser.add_argument('name', type=str, required=False, nullable=True, location='json')
  83. parser.add_argument('text', type=str, required=False, nullable=True, location='json')
  84. parser.add_argument('process_rule', type=dict, required=False, nullable=True, location='json')
  85. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  86. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  87. location='json')
  88. args = parser.parse_args()
  89. dataset_id = str(dataset_id)
  90. tenant_id = str(tenant_id)
  91. dataset = db.session.query(Dataset).filter(
  92. Dataset.tenant_id == tenant_id,
  93. Dataset.id == dataset_id
  94. ).first()
  95. if not dataset:
  96. raise ValueError('Dataset is not exist.')
  97. if args['text']:
  98. upload_file = FileService.upload_text(args.get('text'), args.get('name'))
  99. data_source = {
  100. 'type': 'upload_file',
  101. 'info_list': {
  102. 'data_source_type': 'upload_file',
  103. 'file_info_list': {
  104. 'file_ids': [upload_file.id]
  105. }
  106. }
  107. }
  108. args['data_source'] = data_source
  109. # validate args
  110. args['original_document_id'] = str(document_id)
  111. DocumentService.document_create_args_validate(args)
  112. try:
  113. documents, batch = DocumentService.save_document_with_dataset_id(
  114. dataset=dataset,
  115. document_data=args,
  116. account=current_user,
  117. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  118. created_from='api'
  119. )
  120. except ProviderTokenNotInitError as ex:
  121. raise ProviderNotInitializeError(ex.description)
  122. document = documents[0]
  123. documents_and_batch_fields = {
  124. 'document': marshal(document, document_fields),
  125. 'batch': batch
  126. }
  127. return documents_and_batch_fields, 200
  128. class DocumentAddByFileApi(DatasetApiResource):
  129. """Resource for documents."""
  130. def post(self, tenant_id, dataset_id):
  131. """Create document by upload file."""
  132. args = {}
  133. if 'data' in request.form:
  134. args = json.loads(request.form['data'])
  135. if 'doc_form' not in args:
  136. args['doc_form'] = 'text_model'
  137. if 'doc_language' not in args:
  138. args['doc_language'] = 'English'
  139. # get dataset info
  140. dataset_id = str(dataset_id)
  141. tenant_id = str(tenant_id)
  142. dataset = db.session.query(Dataset).filter(
  143. Dataset.tenant_id == tenant_id,
  144. Dataset.id == dataset_id
  145. ).first()
  146. if not dataset:
  147. raise ValueError('Dataset is not exist.')
  148. if not dataset.indexing_technique and not args['indexing_technique']:
  149. raise ValueError('indexing_technique is required.')
  150. # save file info
  151. file = request.files['file']
  152. # check file
  153. if 'file' not in request.files:
  154. raise NoFileUploadedError()
  155. if len(request.files) > 1:
  156. raise TooManyFilesError()
  157. upload_file = FileService.upload_file(file)
  158. data_source = {
  159. 'type': 'upload_file',
  160. 'info_list': {
  161. 'file_info_list': {
  162. 'file_ids': [upload_file.id]
  163. }
  164. }
  165. }
  166. args['data_source'] = data_source
  167. # validate args
  168. DocumentService.document_create_args_validate(args)
  169. try:
  170. documents, batch = DocumentService.save_document_with_dataset_id(
  171. dataset=dataset,
  172. document_data=args,
  173. account=dataset.created_by_account,
  174. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  175. created_from='api'
  176. )
  177. except ProviderTokenNotInitError as ex:
  178. raise ProviderNotInitializeError(ex.description)
  179. document = documents[0]
  180. documents_and_batch_fields = {
  181. 'document': marshal(document, document_fields),
  182. 'batch': batch
  183. }
  184. return documents_and_batch_fields, 200
  185. class DocumentUpdateByFileApi(DatasetApiResource):
  186. """Resource for update documents."""
  187. def post(self, tenant_id, dataset_id, document_id):
  188. """Update document by upload file."""
  189. args = {}
  190. if 'data' in request.form:
  191. args = json.loads(request.form['data'])
  192. if 'doc_form' not in args:
  193. args['doc_form'] = 'text_model'
  194. if 'doc_language' not in args:
  195. args['doc_language'] = 'English'
  196. # get dataset info
  197. dataset_id = str(dataset_id)
  198. tenant_id = str(tenant_id)
  199. dataset = db.session.query(Dataset).filter(
  200. Dataset.tenant_id == tenant_id,
  201. Dataset.id == dataset_id
  202. ).first()
  203. if not dataset:
  204. raise ValueError('Dataset is not exist.')
  205. if 'file' in request.files:
  206. # save file info
  207. file = request.files['file']
  208. if len(request.files) > 1:
  209. raise TooManyFilesError()
  210. upload_file = FileService.upload_file(file)
  211. data_source = {
  212. 'type': 'upload_file',
  213. 'info_list': {
  214. 'file_info_list': {
  215. 'file_ids': [upload_file.id]
  216. }
  217. }
  218. }
  219. args['data_source'] = data_source
  220. # validate args
  221. args['original_document_id'] = str(document_id)
  222. DocumentService.document_create_args_validate(args)
  223. try:
  224. documents, batch = DocumentService.save_document_with_dataset_id(
  225. dataset=dataset,
  226. document_data=args,
  227. account=dataset.created_by_account,
  228. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  229. created_from='api'
  230. )
  231. except ProviderTokenNotInitError as ex:
  232. raise ProviderNotInitializeError(ex.description)
  233. document = documents[0]
  234. documents_and_batch_fields = {
  235. 'document': marshal(document, document_fields),
  236. 'batch': batch
  237. }
  238. return documents_and_batch_fields, 200
  239. class DocumentDeleteApi(DatasetApiResource):
  240. def delete(self, tenant_id, dataset_id, document_id):
  241. """Delete document."""
  242. document_id = str(document_id)
  243. dataset_id = str(dataset_id)
  244. tenant_id = str(tenant_id)
  245. # get dataset info
  246. dataset = db.session.query(Dataset).filter(
  247. Dataset.tenant_id == tenant_id,
  248. Dataset.id == dataset_id
  249. ).first()
  250. if not dataset:
  251. raise ValueError('Dataset is not exist.')
  252. document = DocumentService.get_document(dataset.id, document_id)
  253. # 404 if document not found
  254. if document is None:
  255. raise NotFound("Document Not Exists.")
  256. # 403 if document is archived
  257. if DocumentService.check_archived(document):
  258. raise ArchivedDocumentImmutableError()
  259. try:
  260. # delete document
  261. DocumentService.delete_document(document)
  262. except services.errors.document.DocumentIndexingError:
  263. raise DocumentIndexingError('Cannot delete document during indexing.')
  264. return {'result': 'success'}, 200
  265. class DocumentListApi(DatasetApiResource):
  266. def get(self, tenant_id, dataset_id):
  267. dataset_id = str(dataset_id)
  268. tenant_id = str(tenant_id)
  269. page = request.args.get('page', default=1, type=int)
  270. limit = request.args.get('limit', default=20, type=int)
  271. search = request.args.get('keyword', default=None, type=str)
  272. dataset = db.session.query(Dataset).filter(
  273. Dataset.tenant_id == tenant_id,
  274. Dataset.id == dataset_id
  275. ).first()
  276. if not dataset:
  277. raise NotFound('Dataset not found.')
  278. query = Document.query.filter_by(
  279. dataset_id=str(dataset_id), tenant_id=tenant_id)
  280. if search:
  281. search = f'%{search}%'
  282. query = query.filter(Document.name.like(search))
  283. query = query.order_by(desc(Document.created_at))
  284. paginated_documents = query.paginate(
  285. page=page, per_page=limit, max_per_page=100, error_out=False)
  286. documents = paginated_documents.items
  287. response = {
  288. 'data': marshal(documents, document_fields),
  289. 'has_more': len(documents) == limit,
  290. 'limit': limit,
  291. 'total': paginated_documents.total,
  292. 'page': page
  293. }
  294. return response
  295. class DocumentIndexingStatusApi(DatasetApiResource):
  296. def get(self, tenant_id, dataset_id, batch):
  297. dataset_id = str(dataset_id)
  298. batch = str(batch)
  299. tenant_id = str(tenant_id)
  300. # get dataset
  301. dataset = db.session.query(Dataset).filter(
  302. Dataset.tenant_id == tenant_id,
  303. Dataset.id == dataset_id
  304. ).first()
  305. if not dataset:
  306. raise NotFound('Dataset not found.')
  307. # get documents
  308. documents = DocumentService.get_batch_documents(dataset_id, batch)
  309. if not documents:
  310. raise NotFound('Documents not found.')
  311. documents_status = []
  312. for document in documents:
  313. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  314. DocumentSegment.document_id == str(document.id),
  315. DocumentSegment.status != 're_segment').count()
  316. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  317. DocumentSegment.status != 're_segment').count()
  318. document.completed_segments = completed_segments
  319. document.total_segments = total_segments
  320. if document.is_paused:
  321. document.indexing_status = 'paused'
  322. documents_status.append(marshal(document, document_status_fields))
  323. data = {
  324. 'data': documents_status
  325. }
  326. return data
  327. api.add_resource(DocumentAddByTextApi, '/datasets/<uuid:dataset_id>/document/create_by_text')
  328. api.add_resource(DocumentAddByFileApi, '/datasets/<uuid:dataset_id>/document/create_by_file')
  329. api.add_resource(DocumentUpdateByTextApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_text')
  330. api.add_resource(DocumentUpdateByFileApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_file')
  331. api.add_resource(DocumentDeleteApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  332. api.add_resource(DocumentListApi, '/datasets/<uuid:dataset_id>/documents')
  333. api.add_resource(DocumentIndexingStatusApi, '/datasets/<uuid:dataset_id>/documents/<string:batch>/indexing-status')