| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728 | import flask_restfulfrom flask import requestfrom flask_login import current_userfrom flask_restful import Resource, marshal, marshal_with, reqparsefrom werkzeug.exceptions import Forbidden, NotFoundimport servicesfrom configs import dify_configfrom controllers.console import apifrom controllers.console.apikey import api_key_fields, api_key_listfrom controllers.console.app.error import ProviderNotInitializeErrorfrom controllers.console.datasets.error import DatasetInUseError, DatasetNameDuplicateError, IndexingEstimateErrorfrom controllers.console.setup import setup_requiredfrom controllers.console.wraps import account_initialization_requiredfrom core.errors.error import LLMBadRequestError, ProviderTokenNotInitErrorfrom core.indexing_runner import IndexingRunnerfrom core.model_runtime.entities.model_entities import ModelTypefrom core.provider_manager import ProviderManagerfrom core.rag.datasource.vdb.vector_type import VectorTypefrom core.rag.extractor.entity.extract_setting import ExtractSettingfrom core.rag.retrieval.retrieval_methods import RetrievalMethodfrom extensions.ext_database import dbfrom fields.app_fields import related_app_listfrom fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fieldsfrom fields.document_fields import document_status_fieldsfrom libs.login import login_requiredfrom models.dataset import Dataset, DatasetPermissionEnum, Document, DocumentSegmentfrom models.model import ApiToken, UploadFilefrom services.dataset_service import DatasetPermissionService, DatasetService, DocumentServicedef _validate_name(name):    if not name or len(name) < 1 or len(name) > 40:        raise ValueError("Name must be between 1 to 40 characters.")    return namedef _validate_description_length(description):    if len(description) > 400:        raise ValueError("Description cannot exceed 400 characters.")    return descriptionclass DatasetListApi(Resource):    @setup_required    @login_required    @account_initialization_required    def get(self):        page = request.args.get("page", default=1, type=int)        limit = request.args.get("limit", default=20, type=int)        ids = request.args.getlist("ids")        # provider = request.args.get("provider", default="vendor")        search = request.args.get("keyword", default=None, type=str)        tag_ids = request.args.getlist("tag_ids")        if ids:            datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)        else:            datasets, total = DatasetService.get_datasets(                page, limit, current_user.current_tenant_id, current_user, search, tag_ids            )        # check embedding setting        provider_manager = ProviderManager()        configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)        embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)        model_names = []        for embedding_model in embedding_models:            model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")        data = marshal(datasets, dataset_detail_fields)        for item in data:            if item["indexing_technique"] == "high_quality":                item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"                if item_model in model_names:                    item["embedding_available"] = True                else:                    item["embedding_available"] = False            else:                item["embedding_available"] = True            if item.get("permission") == "partial_members":                part_users_list = DatasetPermissionService.get_dataset_partial_member_list(item["id"])                item.update({"partial_member_list": part_users_list})            else:                item.update({"partial_member_list": []})        response = {"data": data, "has_more": len(datasets) == limit, "limit": limit, "total": total, "page": page}        return response, 200    @setup_required    @login_required    @account_initialization_required    def post(self):        parser = reqparse.RequestParser()        parser.add_argument(            "name",            nullable=False,            required=True,            help="type is required. Name must be between 1 to 40 characters.",            type=_validate_name,        )        parser.add_argument(            "indexing_technique",            type=str,            location="json",            choices=Dataset.INDEXING_TECHNIQUE_LIST,            nullable=True,            help="Invalid indexing technique.",        )        parser.add_argument(            "external_knowledge_api_id",            type=str,            nullable=True,            required=False,        )        parser.add_argument(            "provider",            type=str,            nullable=True,            choices=Dataset.PROVIDER_LIST,            required=False,            default="vendor",        )        parser.add_argument(            "external_knowledge_id",            type=str,            nullable=True,            required=False,        )        args = parser.parse_args()        # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator        if not current_user.is_dataset_editor:            raise Forbidden()        try:            dataset = DatasetService.create_empty_dataset(                tenant_id=current_user.current_tenant_id,                name=args["name"],                indexing_technique=args["indexing_technique"],                account=current_user,                permission=DatasetPermissionEnum.ONLY_ME,                provider=args["provider"],                external_knowledge_api_id=args["external_knowledge_api_id"],                external_knowledge_id=args["external_knowledge_id"],            )        except services.errors.dataset.DatasetNameDuplicateError:            raise DatasetNameDuplicateError()        return marshal(dataset, dataset_detail_fields), 201class DatasetApi(Resource):    @setup_required    @login_required    @account_initialization_required    def get(self, dataset_id):        dataset_id_str = str(dataset_id)        dataset = DatasetService.get_dataset(dataset_id_str)        if dataset is None:            raise NotFound("Dataset not found.")        try:            DatasetService.check_dataset_permission(dataset, current_user)        except services.errors.account.NoPermissionError as e:            raise Forbidden(str(e))        data = marshal(dataset, dataset_detail_fields)        if data.get("permission") == "partial_members":            part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)            data.update({"partial_member_list": part_users_list})        # check embedding setting        provider_manager = ProviderManager()        configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)        embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)        model_names = []        for embedding_model in embedding_models:            model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")        if data["indexing_technique"] == "high_quality":            item_model = f"{data['embedding_model']}:{data['embedding_model_provider']}"            if item_model in model_names:                data["embedding_available"] = True            else:                data["embedding_available"] = False        else:            data["embedding_available"] = True        if data.get("permission") == "partial_members":            part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)            data.update({"partial_member_list": part_users_list})        return data, 200    @setup_required    @login_required    @account_initialization_required    def patch(self, dataset_id):        dataset_id_str = str(dataset_id)        dataset = DatasetService.get_dataset(dataset_id_str)        if dataset is None:            raise NotFound("Dataset not found.")        parser = reqparse.RequestParser()        parser.add_argument(            "name",            nullable=False,            help="type is required. Name must be between 1 to 40 characters.",            type=_validate_name,        )        parser.add_argument("description", location="json", store_missing=False, type=_validate_description_length)        parser.add_argument(            "indexing_technique",            type=str,            location="json",            choices=Dataset.INDEXING_TECHNIQUE_LIST,            nullable=True,            help="Invalid indexing technique.",        )        parser.add_argument(            "permission",            type=str,            location="json",            choices=(DatasetPermissionEnum.ONLY_ME, DatasetPermissionEnum.ALL_TEAM, DatasetPermissionEnum.PARTIAL_TEAM),            help="Invalid permission.",        )        parser.add_argument("embedding_model", type=str, location="json", help="Invalid embedding model.")        parser.add_argument(            "embedding_model_provider", type=str, location="json", help="Invalid embedding model provider."        )        parser.add_argument("retrieval_model", type=dict, location="json", help="Invalid retrieval model.")        parser.add_argument("partial_member_list", type=list, location="json", help="Invalid parent user list.")        parser.add_argument(            "external_retrieval_model",            type=dict,            required=False,            nullable=True,            location="json",            help="Invalid external retrieval model.",        )        parser.add_argument(            "external_knowledge_id",            type=str,            required=False,            nullable=True,            location="json",            help="Invalid external knowledge id.",        )        parser.add_argument(            "external_knowledge_api_id",            type=str,            required=False,            nullable=True,            location="json",            help="Invalid external knowledge api id.",        )        args = parser.parse_args()        data = request.get_json()        # check embedding model setting        if data.get("indexing_technique") == "high_quality":            DatasetService.check_embedding_model_setting(                dataset.tenant_id, data.get("embedding_model_provider"), data.get("embedding_model")            )        # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator        DatasetPermissionService.check_permission(            current_user, dataset, data.get("permission"), data.get("partial_member_list")        )        dataset = DatasetService.update_dataset(dataset_id_str, args, current_user)        if dataset is None:            raise NotFound("Dataset not found.")        result_data = marshal(dataset, dataset_detail_fields)        tenant_id = current_user.current_tenant_id        if data.get("partial_member_list") and data.get("permission") == "partial_members":            DatasetPermissionService.update_partial_member_list(                tenant_id, dataset_id_str, data.get("partial_member_list")            )        # clear partial member list when permission is only_me or all_team_members        elif (            data.get("permission") == DatasetPermissionEnum.ONLY_ME            or data.get("permission") == DatasetPermissionEnum.ALL_TEAM        ):            DatasetPermissionService.clear_partial_member_list(dataset_id_str)        partial_member_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)        result_data.update({"partial_member_list": partial_member_list})        return result_data, 200    @setup_required    @login_required    @account_initialization_required    def delete(self, dataset_id):        dataset_id_str = str(dataset_id)        # The role of the current user in the ta table must be admin, owner, or editor        if not current_user.is_editor or current_user.is_dataset_operator:            raise Forbidden()        try:            if DatasetService.delete_dataset(dataset_id_str, current_user):                DatasetPermissionService.clear_partial_member_list(dataset_id_str)                return {"result": "success"}, 204            else:                raise NotFound("Dataset not found.")        except services.errors.dataset.DatasetInUseError:            raise DatasetInUseError()class DatasetUseCheckApi(Resource):    @setup_required    @login_required    @account_initialization_required    def get(self, dataset_id):        dataset_id_str = str(dataset_id)        dataset_is_using = DatasetService.dataset_use_check(dataset_id_str)        return {"is_using": dataset_is_using}, 200class DatasetQueryApi(Resource):    @setup_required    @login_required    @account_initialization_required    def get(self, dataset_id):        dataset_id_str = str(dataset_id)        dataset = DatasetService.get_dataset(dataset_id_str)        if dataset is None:            raise NotFound("Dataset not found.")        try:            DatasetService.check_dataset_permission(dataset, current_user)        except services.errors.account.NoPermissionError as e:            raise Forbidden(str(e))        page = request.args.get("page", default=1, type=int)        limit = request.args.get("limit", default=20, type=int)        dataset_queries, total = DatasetService.get_dataset_queries(dataset_id=dataset.id, page=page, per_page=limit)        response = {            "data": marshal(dataset_queries, dataset_query_detail_fields),            "has_more": len(dataset_queries) == limit,            "limit": limit,            "total": total,            "page": page,        }        return response, 200class DatasetIndexingEstimateApi(Resource):    @setup_required    @login_required    @account_initialization_required    def post(self):        parser = reqparse.RequestParser()        parser.add_argument("info_list", type=dict, required=True, nullable=True, location="json")        parser.add_argument("process_rule", type=dict, required=True, nullable=True, location="json")        parser.add_argument(            "indexing_technique",            type=str,            required=True,            choices=Dataset.INDEXING_TECHNIQUE_LIST,            nullable=True,            location="json",        )        parser.add_argument("doc_form", type=str, default="text_model", required=False, nullable=False, location="json")        parser.add_argument("dataset_id", type=str, required=False, nullable=False, location="json")        parser.add_argument(            "doc_language", type=str, default="English", required=False, nullable=False, location="json"        )        args = parser.parse_args()        # validate args        DocumentService.estimate_args_validate(args)        extract_settings = []        if args["info_list"]["data_source_type"] == "upload_file":            file_ids = args["info_list"]["file_info_list"]["file_ids"]            file_details = (                db.session.query(UploadFile)                .filter(UploadFile.tenant_id == current_user.current_tenant_id, UploadFile.id.in_(file_ids))                .all()            )            if file_details is None:                raise NotFound("File not found.")            if file_details:                for file_detail in file_details:                    extract_setting = ExtractSetting(                        datasource_type="upload_file", upload_file=file_detail, document_model=args["doc_form"]                    )                    extract_settings.append(extract_setting)        elif args["info_list"]["data_source_type"] == "notion_import":            notion_info_list = args["info_list"]["notion_info_list"]            for notion_info in notion_info_list:                workspace_id = notion_info["workspace_id"]                for page in notion_info["pages"]:                    extract_setting = ExtractSetting(                        datasource_type="notion_import",                        notion_info={                            "notion_workspace_id": workspace_id,                            "notion_obj_id": page["page_id"],                            "notion_page_type": page["type"],                            "tenant_id": current_user.current_tenant_id,                        },                        document_model=args["doc_form"],                    )                    extract_settings.append(extract_setting)        elif args["info_list"]["data_source_type"] == "website_crawl":            website_info_list = args["info_list"]["website_info_list"]            for url in website_info_list["urls"]:                extract_setting = ExtractSetting(                    datasource_type="website_crawl",                    website_info={                        "provider": website_info_list["provider"],                        "job_id": website_info_list["job_id"],                        "url": url,                        "tenant_id": current_user.current_tenant_id,                        "mode": "crawl",                        "only_main_content": website_info_list["only_main_content"],                    },                    document_model=args["doc_form"],                )                extract_settings.append(extract_setting)        else:            raise ValueError("Data source type not support")        indexing_runner = IndexingRunner()        try:            response = indexing_runner.indexing_estimate(                current_user.current_tenant_id,                extract_settings,                args["process_rule"],                args["doc_form"],                args["doc_language"],                args["dataset_id"],                args["indexing_technique"],            )        except LLMBadRequestError:            raise ProviderNotInitializeError(                "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."            )        except ProviderTokenNotInitError as ex:            raise ProviderNotInitializeError(ex.description)        except Exception as e:            raise IndexingEstimateError(str(e))        return response, 200class DatasetRelatedAppListApi(Resource):    @setup_required    @login_required    @account_initialization_required    @marshal_with(related_app_list)    def get(self, dataset_id):        dataset_id_str = str(dataset_id)        dataset = DatasetService.get_dataset(dataset_id_str)        if dataset is None:            raise NotFound("Dataset not found.")        try:            DatasetService.check_dataset_permission(dataset, current_user)        except services.errors.account.NoPermissionError as e:            raise Forbidden(str(e))        app_dataset_joins = DatasetService.get_related_apps(dataset.id)        related_apps = []        for app_dataset_join in app_dataset_joins:            app_model = app_dataset_join.app            if app_model:                related_apps.append(app_model)        return {"data": related_apps, "total": len(related_apps)}, 200class DatasetIndexingStatusApi(Resource):    @setup_required    @login_required    @account_initialization_required    def get(self, dataset_id):        dataset_id = str(dataset_id)        documents = (            db.session.query(Document)            .filter(Document.dataset_id == dataset_id, Document.tenant_id == current_user.current_tenant_id)            .all()        )        documents_status = []        for document in documents:            completed_segments = DocumentSegment.query.filter(                DocumentSegment.completed_at.isnot(None),                DocumentSegment.document_id == str(document.id),                DocumentSegment.status != "re_segment",            ).count()            total_segments = DocumentSegment.query.filter(                DocumentSegment.document_id == str(document.id), DocumentSegment.status != "re_segment"            ).count()            document.completed_segments = completed_segments            document.total_segments = total_segments            documents_status.append(marshal(document, document_status_fields))        data = {"data": documents_status}        return dataclass DatasetApiKeyApi(Resource):    max_keys = 10    token_prefix = "dataset-"    resource_type = "dataset"    @setup_required    @login_required    @account_initialization_required    @marshal_with(api_key_list)    def get(self):        keys = (            db.session.query(ApiToken)            .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)            .all()        )        return {"items": keys}    @setup_required    @login_required    @account_initialization_required    @marshal_with(api_key_fields)    def post(self):        # The role of the current user in the ta table must be admin or owner        if not current_user.is_admin_or_owner:            raise Forbidden()        current_key_count = (            db.session.query(ApiToken)            .filter(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)            .count()        )        if current_key_count >= self.max_keys:            flask_restful.abort(                400,                message=f"Cannot create more than {self.max_keys} API keys for this resource type.",                code="max_keys_exceeded",            )        key = ApiToken.generate_api_key(self.token_prefix, 24)        api_token = ApiToken()        api_token.tenant_id = current_user.current_tenant_id        api_token.token = key        api_token.type = self.resource_type        db.session.add(api_token)        db.session.commit()        return api_token, 200class DatasetApiDeleteApi(Resource):    resource_type = "dataset"    @setup_required    @login_required    @account_initialization_required    def delete(self, api_key_id):        api_key_id = str(api_key_id)        # The role of the current user in the ta table must be admin or owner        if not current_user.is_admin_or_owner:            raise Forbidden()        key = (            db.session.query(ApiToken)            .filter(                ApiToken.tenant_id == current_user.current_tenant_id,                ApiToken.type == self.resource_type,                ApiToken.id == api_key_id,            )            .first()        )        if key is None:            flask_restful.abort(404, message="API key not found")        db.session.query(ApiToken).filter(ApiToken.id == api_key_id).delete()        db.session.commit()        return {"result": "success"}, 204class DatasetApiBaseUrlApi(Resource):    @setup_required    @login_required    @account_initialization_required    def get(self):        return {"api_base_url": (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1"}class DatasetRetrievalSettingApi(Resource):    @setup_required    @login_required    @account_initialization_required    def get(self):        vector_type = dify_config.VECTOR_STORE        match vector_type:            case (                VectorType.MILVUS                | VectorType.RELYT                | VectorType.TIDB_VECTOR                | VectorType.CHROMA                | VectorType.TENCENT                | VectorType.PGVECTO_RS            ):                return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}            case (                VectorType.QDRANT                | VectorType.WEAVIATE                | VectorType.OPENSEARCH                | VectorType.ANALYTICDB                | VectorType.MYSCALE                | VectorType.ORACLE                | VectorType.ELASTICSEARCH                | VectorType.PGVECTOR            ):                return {                    "retrieval_method": [                        RetrievalMethod.SEMANTIC_SEARCH.value,                        RetrievalMethod.FULL_TEXT_SEARCH.value,                        RetrievalMethod.HYBRID_SEARCH.value,                    ]                }            case _:                raise ValueError(f"Unsupported vector db type {vector_type}.")class DatasetRetrievalSettingMockApi(Resource):    @setup_required    @login_required    @account_initialization_required    def get(self, vector_type):        match vector_type:            case (                VectorType.MILVUS                | VectorType.RELYT                | VectorType.TIDB_VECTOR                | VectorType.CHROMA                | VectorType.TENCENT                | VectorType.PGVECTO_RS            ):                return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}            case (                VectorType.QDRANT                | VectorType.WEAVIATE                | VectorType.OPENSEARCH                | VectorType.ANALYTICDB                | VectorType.MYSCALE                | VectorType.ORACLE                | VectorType.ELASTICSEARCH                | VectorType.PGVECTOR            ):                return {                    "retrieval_method": [                        RetrievalMethod.SEMANTIC_SEARCH.value,                        RetrievalMethod.FULL_TEXT_SEARCH.value,                        RetrievalMethod.HYBRID_SEARCH.value,                    ]                }            case _:                raise ValueError(f"Unsupported vector db type {vector_type}.")class DatasetErrorDocs(Resource):    @setup_required    @login_required    @account_initialization_required    def get(self, dataset_id):        dataset_id_str = str(dataset_id)        dataset = DatasetService.get_dataset(dataset_id_str)        if dataset is None:            raise NotFound("Dataset not found.")        results = DocumentService.get_error_documents_by_dataset_id(dataset_id_str)        return {"data": [marshal(item, document_status_fields) for item in results], "total": len(results)}, 200class DatasetPermissionUserListApi(Resource):    @setup_required    @login_required    @account_initialization_required    def get(self, dataset_id):        dataset_id_str = str(dataset_id)        dataset = DatasetService.get_dataset(dataset_id_str)        if dataset is None:            raise NotFound("Dataset not found.")        try:            DatasetService.check_dataset_permission(dataset, current_user)        except services.errors.account.NoPermissionError as e:            raise Forbidden(str(e))        partial_members_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)        return {            "data": partial_members_list,        }, 200api.add_resource(DatasetListApi, "/datasets")api.add_resource(DatasetApi, "/datasets/<uuid:dataset_id>")api.add_resource(DatasetUseCheckApi, "/datasets/<uuid:dataset_id>/use-check")api.add_resource(DatasetQueryApi, "/datasets/<uuid:dataset_id>/queries")api.add_resource(DatasetErrorDocs, "/datasets/<uuid:dataset_id>/error-docs")api.add_resource(DatasetIndexingEstimateApi, "/datasets/indexing-estimate")api.add_resource(DatasetRelatedAppListApi, "/datasets/<uuid:dataset_id>/related-apps")api.add_resource(DatasetIndexingStatusApi, "/datasets/<uuid:dataset_id>/indexing-status")api.add_resource(DatasetApiKeyApi, "/datasets/api-keys")api.add_resource(DatasetApiDeleteApi, "/datasets/api-keys/<uuid:api_key_id>")api.add_resource(DatasetApiBaseUrlApi, "/datasets/api-base-info")api.add_resource(DatasetRetrievalSettingApi, "/datasets/retrieval-setting")api.add_resource(DatasetRetrievalSettingMockApi, "/datasets/retrieval-setting/<string:vector_type>")api.add_resource(DatasetPermissionUserListApi, "/datasets/<uuid:dataset_id>/permission-part-users")
 |