document.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. import json
  2. from flask import request
  3. from flask_restful import reqparse, marshal
  4. from flask_login import current_user
  5. from sqlalchemy import desc
  6. from werkzeug.exceptions import NotFound
  7. import services.dataset_service
  8. from controllers.service_api import api
  9. from controllers.service_api.app.error import ProviderNotInitializeError
  10. from controllers.service_api.dataset.error import ArchivedDocumentImmutableError, DocumentIndexingError, \
  11. NoFileUploadedError, TooManyFilesError
  12. from controllers.service_api.wraps import DatasetApiResource
  13. from libs.login import current_user
  14. from core.model_providers.error import ProviderTokenNotInitError
  15. from extensions.ext_database import db
  16. from fields.document_fields import document_fields, document_status_fields
  17. from models.dataset import Dataset, Document, DocumentSegment
  18. from services.dataset_service import DocumentService
  19. from services.file_service import FileService
  20. class DocumentAddByTextApi(DatasetApiResource):
  21. """Resource for documents."""
  22. def post(self, tenant_id, dataset_id):
  23. """Create document by text."""
  24. parser = reqparse.RequestParser()
  25. parser.add_argument('name', type=str, required=True, nullable=False, location='json')
  26. parser.add_argument('text', type=str, required=True, nullable=False, location='json')
  27. parser.add_argument('process_rule', type=dict, required=False, nullable=True, location='json')
  28. parser.add_argument('original_document_id', type=str, required=False, location='json')
  29. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  30. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  31. location='json')
  32. parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
  33. location='json')
  34. parser.add_argument('retrieval_model', type=dict, required=False, nullable=False,
  35. location='json')
  36. args = parser.parse_args()
  37. dataset_id = str(dataset_id)
  38. tenant_id = str(tenant_id)
  39. dataset = db.session.query(Dataset).filter(
  40. Dataset.tenant_id == tenant_id,
  41. Dataset.id == dataset_id
  42. ).first()
  43. if not dataset:
  44. raise ValueError('Dataset is not exist.')
  45. if not dataset.indexing_technique and not args['indexing_technique']:
  46. raise ValueError('indexing_technique is required.')
  47. upload_file = FileService.upload_text(args.get('text'), args.get('name'))
  48. data_source = {
  49. 'type': 'upload_file',
  50. 'info_list': {
  51. 'data_source_type': 'upload_file',
  52. 'file_info_list': {
  53. 'file_ids': [upload_file.id]
  54. }
  55. }
  56. }
  57. args['data_source'] = data_source
  58. # validate args
  59. DocumentService.document_create_args_validate(args)
  60. try:
  61. documents, batch = DocumentService.save_document_with_dataset_id(
  62. dataset=dataset,
  63. document_data=args,
  64. account=current_user,
  65. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  66. created_from='api'
  67. )
  68. except ProviderTokenNotInitError as ex:
  69. raise ProviderNotInitializeError(ex.description)
  70. document = documents[0]
  71. documents_and_batch_fields = {
  72. 'document': marshal(document, document_fields),
  73. 'batch': batch
  74. }
  75. return documents_and_batch_fields, 200
  76. class DocumentUpdateByTextApi(DatasetApiResource):
  77. """Resource for update documents."""
  78. def post(self, tenant_id, dataset_id, document_id):
  79. """Update document by text."""
  80. parser = reqparse.RequestParser()
  81. parser.add_argument('name', type=str, required=False, nullable=True, location='json')
  82. parser.add_argument('text', type=str, required=False, nullable=True, location='json')
  83. parser.add_argument('process_rule', type=dict, required=False, nullable=True, location='json')
  84. parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json')
  85. parser.add_argument('doc_language', type=str, default='English', required=False, nullable=False,
  86. location='json')
  87. parser.add_argument('retrieval_model', type=dict, required=False, nullable=False,
  88. location='json')
  89. args = parser.parse_args()
  90. dataset_id = str(dataset_id)
  91. tenant_id = str(tenant_id)
  92. dataset = db.session.query(Dataset).filter(
  93. Dataset.tenant_id == tenant_id,
  94. Dataset.id == dataset_id
  95. ).first()
  96. if not dataset:
  97. raise ValueError('Dataset is not exist.')
  98. if args['text']:
  99. upload_file = FileService.upload_text(args.get('text'), args.get('name'))
  100. data_source = {
  101. 'type': 'upload_file',
  102. 'info_list': {
  103. 'data_source_type': 'upload_file',
  104. 'file_info_list': {
  105. 'file_ids': [upload_file.id]
  106. }
  107. }
  108. }
  109. args['data_source'] = data_source
  110. # validate args
  111. args['original_document_id'] = str(document_id)
  112. DocumentService.document_create_args_validate(args)
  113. try:
  114. documents, batch = DocumentService.save_document_with_dataset_id(
  115. dataset=dataset,
  116. document_data=args,
  117. account=current_user,
  118. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  119. created_from='api'
  120. )
  121. except ProviderTokenNotInitError as ex:
  122. raise ProviderNotInitializeError(ex.description)
  123. document = documents[0]
  124. documents_and_batch_fields = {
  125. 'document': marshal(document, document_fields),
  126. 'batch': batch
  127. }
  128. return documents_and_batch_fields, 200
  129. class DocumentAddByFileApi(DatasetApiResource):
  130. """Resource for documents."""
  131. def post(self, tenant_id, dataset_id):
  132. """Create document by upload file."""
  133. args = {}
  134. if 'data' in request.form:
  135. args = json.loads(request.form['data'])
  136. if 'doc_form' not in args:
  137. args['doc_form'] = 'text_model'
  138. if 'doc_language' not in args:
  139. args['doc_language'] = 'English'
  140. # get dataset info
  141. dataset_id = str(dataset_id)
  142. tenant_id = str(tenant_id)
  143. dataset = db.session.query(Dataset).filter(
  144. Dataset.tenant_id == tenant_id,
  145. Dataset.id == dataset_id
  146. ).first()
  147. if not dataset:
  148. raise ValueError('Dataset is not exist.')
  149. if not dataset.indexing_technique and not args['indexing_technique']:
  150. raise ValueError('indexing_technique is required.')
  151. # save file info
  152. file = request.files['file']
  153. # check file
  154. if 'file' not in request.files:
  155. raise NoFileUploadedError()
  156. if len(request.files) > 1:
  157. raise TooManyFilesError()
  158. upload_file = FileService.upload_file(file, current_user)
  159. data_source = {
  160. 'type': 'upload_file',
  161. 'info_list': {
  162. 'file_info_list': {
  163. 'file_ids': [upload_file.id]
  164. }
  165. }
  166. }
  167. args['data_source'] = data_source
  168. # validate args
  169. DocumentService.document_create_args_validate(args)
  170. try:
  171. documents, batch = DocumentService.save_document_with_dataset_id(
  172. dataset=dataset,
  173. document_data=args,
  174. account=dataset.created_by_account,
  175. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  176. created_from='api'
  177. )
  178. except ProviderTokenNotInitError as ex:
  179. raise ProviderNotInitializeError(ex.description)
  180. document = documents[0]
  181. documents_and_batch_fields = {
  182. 'document': marshal(document, document_fields),
  183. 'batch': batch
  184. }
  185. return documents_and_batch_fields, 200
  186. class DocumentUpdateByFileApi(DatasetApiResource):
  187. """Resource for update documents."""
  188. def post(self, tenant_id, dataset_id, document_id):
  189. """Update document by upload file."""
  190. args = {}
  191. if 'data' in request.form:
  192. args = json.loads(request.form['data'])
  193. if 'doc_form' not in args:
  194. args['doc_form'] = 'text_model'
  195. if 'doc_language' not in args:
  196. args['doc_language'] = 'English'
  197. # get dataset info
  198. dataset_id = str(dataset_id)
  199. tenant_id = str(tenant_id)
  200. dataset = db.session.query(Dataset).filter(
  201. Dataset.tenant_id == tenant_id,
  202. Dataset.id == dataset_id
  203. ).first()
  204. if not dataset:
  205. raise ValueError('Dataset is not exist.')
  206. if 'file' in request.files:
  207. # save file info
  208. file = request.files['file']
  209. if len(request.files) > 1:
  210. raise TooManyFilesError()
  211. upload_file = FileService.upload_file(file, current_user)
  212. data_source = {
  213. 'type': 'upload_file',
  214. 'info_list': {
  215. 'file_info_list': {
  216. 'file_ids': [upload_file.id]
  217. }
  218. }
  219. }
  220. args['data_source'] = data_source
  221. # validate args
  222. args['original_document_id'] = str(document_id)
  223. DocumentService.document_create_args_validate(args)
  224. try:
  225. documents, batch = DocumentService.save_document_with_dataset_id(
  226. dataset=dataset,
  227. document_data=args,
  228. account=dataset.created_by_account,
  229. dataset_process_rule=dataset.latest_process_rule if 'process_rule' not in args else None,
  230. created_from='api'
  231. )
  232. except ProviderTokenNotInitError as ex:
  233. raise ProviderNotInitializeError(ex.description)
  234. document = documents[0]
  235. documents_and_batch_fields = {
  236. 'document': marshal(document, document_fields),
  237. 'batch': batch
  238. }
  239. return documents_and_batch_fields, 200
  240. class DocumentDeleteApi(DatasetApiResource):
  241. def delete(self, tenant_id, dataset_id, document_id):
  242. """Delete document."""
  243. document_id = str(document_id)
  244. dataset_id = str(dataset_id)
  245. tenant_id = str(tenant_id)
  246. # get dataset info
  247. dataset = db.session.query(Dataset).filter(
  248. Dataset.tenant_id == tenant_id,
  249. Dataset.id == dataset_id
  250. ).first()
  251. if not dataset:
  252. raise ValueError('Dataset is not exist.')
  253. document = DocumentService.get_document(dataset.id, document_id)
  254. # 404 if document not found
  255. if document is None:
  256. raise NotFound("Document Not Exists.")
  257. # 403 if document is archived
  258. if DocumentService.check_archived(document):
  259. raise ArchivedDocumentImmutableError()
  260. try:
  261. # delete document
  262. DocumentService.delete_document(document)
  263. except services.errors.document.DocumentIndexingError:
  264. raise DocumentIndexingError('Cannot delete document during indexing.')
  265. return {'result': 'success'}, 200
  266. class DocumentListApi(DatasetApiResource):
  267. def get(self, tenant_id, dataset_id):
  268. dataset_id = str(dataset_id)
  269. tenant_id = str(tenant_id)
  270. page = request.args.get('page', default=1, type=int)
  271. limit = request.args.get('limit', default=20, type=int)
  272. search = request.args.get('keyword', default=None, type=str)
  273. dataset = db.session.query(Dataset).filter(
  274. Dataset.tenant_id == tenant_id,
  275. Dataset.id == dataset_id
  276. ).first()
  277. if not dataset:
  278. raise NotFound('Dataset not found.')
  279. query = Document.query.filter_by(
  280. dataset_id=str(dataset_id), tenant_id=tenant_id)
  281. if search:
  282. search = f'%{search}%'
  283. query = query.filter(Document.name.like(search))
  284. query = query.order_by(desc(Document.created_at))
  285. paginated_documents = query.paginate(
  286. page=page, per_page=limit, max_per_page=100, error_out=False)
  287. documents = paginated_documents.items
  288. response = {
  289. 'data': marshal(documents, document_fields),
  290. 'has_more': len(documents) == limit,
  291. 'limit': limit,
  292. 'total': paginated_documents.total,
  293. 'page': page
  294. }
  295. return response
  296. class DocumentIndexingStatusApi(DatasetApiResource):
  297. def get(self, tenant_id, dataset_id, batch):
  298. dataset_id = str(dataset_id)
  299. batch = str(batch)
  300. tenant_id = str(tenant_id)
  301. # get dataset
  302. dataset = db.session.query(Dataset).filter(
  303. Dataset.tenant_id == tenant_id,
  304. Dataset.id == dataset_id
  305. ).first()
  306. if not dataset:
  307. raise NotFound('Dataset not found.')
  308. # get documents
  309. documents = DocumentService.get_batch_documents(dataset_id, batch)
  310. if not documents:
  311. raise NotFound('Documents not found.')
  312. documents_status = []
  313. for document in documents:
  314. completed_segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
  315. DocumentSegment.document_id == str(document.id),
  316. DocumentSegment.status != 're_segment').count()
  317. total_segments = DocumentSegment.query.filter(DocumentSegment.document_id == str(document.id),
  318. DocumentSegment.status != 're_segment').count()
  319. document.completed_segments = completed_segments
  320. document.total_segments = total_segments
  321. if document.is_paused:
  322. document.indexing_status = 'paused'
  323. documents_status.append(marshal(document, document_status_fields))
  324. data = {
  325. 'data': documents_status
  326. }
  327. return data
  328. api.add_resource(DocumentAddByTextApi, '/datasets/<uuid:dataset_id>/document/create_by_text')
  329. api.add_resource(DocumentAddByFileApi, '/datasets/<uuid:dataset_id>/document/create_by_file')
  330. api.add_resource(DocumentUpdateByTextApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_text')
  331. api.add_resource(DocumentUpdateByFileApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_file')
  332. api.add_resource(DocumentDeleteApi, '/datasets/<uuid:dataset_id>/documents/<uuid:document_id>')
  333. api.add_resource(DocumentListApi, '/datasets/<uuid:dataset_id>/documents')
  334. api.add_resource(DocumentIndexingStatusApi, '/datasets/<uuid:dataset_id>/documents/<string:batch>/indexing-status')