| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181 | 
							- # -*- coding:utf-8 -*-
 
- import json
 
- import logging
 
- from typing import Generator, Union
 
- from flask import Response, stream_with_context
 
- from flask_login import current_user
 
- from flask_restful import reqparse
 
- from werkzeug.exceptions import InternalServerError, NotFound
 
- import services
 
- from controllers.console import api
 
- from controllers.console.app.error import ConversationCompletedError, AppUnavailableError, ProviderNotInitializeError, \
 
-     ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError
 
- from controllers.console.explore.error import NotCompletionAppError, NotChatAppError
 
- from controllers.console.explore.wraps import InstalledAppResource
 
- from core.conversation_message_task import PubHandler
 
- from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
 
-     LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
 
- from libs.helper import uuid_value
 
- from services.completion_service import CompletionService
 
- # define completion api for user
 
- class CompletionApi(InstalledAppResource):
 
-     def post(self, installed_app):
 
-         app_model = installed_app.app
 
-         if app_model.mode != 'completion':
 
-             raise NotCompletionAppError()
 
-         parser = reqparse.RequestParser()
 
-         parser.add_argument('inputs', type=dict, required=True, location='json')
 
-         parser.add_argument('query', type=str, location='json')
 
-         parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
 
-         args = parser.parse_args()
 
-         streaming = args['response_mode'] == 'streaming'
 
-         try:
 
-             response = CompletionService.completion(
 
-                 app_model=app_model,
 
-                 user=current_user,
 
-                 args=args,
 
-                 from_source='console',
 
-                 streaming=streaming
 
-             )
 
-             return compact_response(response)
 
-         except services.errors.conversation.ConversationNotExistsError:
 
-             raise NotFound("Conversation Not Exists.")
 
-         except services.errors.conversation.ConversationCompletedError:
 
-             raise ConversationCompletedError()
 
-         except services.errors.app_model_config.AppModelConfigBrokenError:
 
-             logging.exception("App model config broken.")
 
-             raise AppUnavailableError()
 
-         except ProviderTokenNotInitError:
 
-             raise ProviderNotInitializeError()
 
-         except QuotaExceededError:
 
-             raise ProviderQuotaExceededError()
 
-         except ModelCurrentlyNotSupportError:
 
-             raise ProviderModelCurrentlyNotSupportError()
 
-         except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
 
-                 LLMRateLimitError, LLMAuthorizationError) as e:
 
-             raise CompletionRequestError(str(e))
 
-         except ValueError as e:
 
-             raise e
 
-         except Exception as e:
 
-             logging.exception("internal server error.")
 
-             raise InternalServerError()
 
- class CompletionStopApi(InstalledAppResource):
 
-     def post(self, installed_app, task_id):
 
-         app_model = installed_app.app
 
-         if app_model.mode != 'completion':
 
-             raise NotCompletionAppError()
 
-         PubHandler.stop(current_user, task_id)
 
-         return {'result': 'success'}, 200
 
- class ChatApi(InstalledAppResource):
 
-     def post(self, installed_app):
 
-         app_model = installed_app.app
 
-         if app_model.mode != 'chat':
 
-             raise NotChatAppError()
 
-         parser = reqparse.RequestParser()
 
-         parser.add_argument('inputs', type=dict, required=True, location='json')
 
-         parser.add_argument('query', type=str, required=True, location='json')
 
-         parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
 
-         parser.add_argument('conversation_id', type=uuid_value, location='json')
 
-         args = parser.parse_args()
 
-         streaming = args['response_mode'] == 'streaming'
 
-         try:
 
-             response = CompletionService.completion(
 
-                 app_model=app_model,
 
-                 user=current_user,
 
-                 args=args,
 
-                 from_source='console',
 
-                 streaming=streaming
 
-             )
 
-             return compact_response(response)
 
-         except services.errors.conversation.ConversationNotExistsError:
 
-             raise NotFound("Conversation Not Exists.")
 
-         except services.errors.conversation.ConversationCompletedError:
 
-             raise ConversationCompletedError()
 
-         except services.errors.app_model_config.AppModelConfigBrokenError:
 
-             logging.exception("App model config broken.")
 
-             raise AppUnavailableError()
 
-         except ProviderTokenNotInitError:
 
-             raise ProviderNotInitializeError()
 
-         except QuotaExceededError:
 
-             raise ProviderQuotaExceededError()
 
-         except ModelCurrentlyNotSupportError:
 
-             raise ProviderModelCurrentlyNotSupportError()
 
-         except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
 
-                 LLMRateLimitError, LLMAuthorizationError) as e:
 
-             raise CompletionRequestError(str(e))
 
-         except ValueError as e:
 
-             raise e
 
-         except Exception as e:
 
-             logging.exception("internal server error.")
 
-             raise InternalServerError()
 
- class ChatStopApi(InstalledAppResource):
 
-     def post(self, installed_app, task_id):
 
-         app_model = installed_app.app
 
-         if app_model.mode != 'chat':
 
-             raise NotChatAppError()
 
-         PubHandler.stop(current_user, task_id)
 
-         return {'result': 'success'}, 200
 
- def compact_response(response: Union[dict | Generator]) -> Response:
 
-     if isinstance(response, dict):
 
-         return Response(response=json.dumps(response), status=200, mimetype='application/json')
 
-     else:
 
-         def generate() -> Generator:
 
-             try:
 
-                 for chunk in response:
 
-                     yield chunk
 
-             except services.errors.conversation.ConversationNotExistsError:
 
-                 yield "data: " + json.dumps(api.handle_error(NotFound("Conversation Not Exists.")).get_json()) + "\n\n"
 
-             except services.errors.conversation.ConversationCompletedError:
 
-                 yield "data: " + json.dumps(api.handle_error(ConversationCompletedError()).get_json()) + "\n\n"
 
-             except services.errors.app_model_config.AppModelConfigBrokenError:
 
-                 logging.exception("App model config broken.")
 
-                 yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
 
-             except ProviderTokenNotInitError:
 
-                 yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
 
-             except QuotaExceededError:
 
-                 yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
 
-             except ModelCurrentlyNotSupportError:
 
-                 yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
 
-             except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
 
-                     LLMRateLimitError, LLMAuthorizationError) as e:
 
-                 yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
 
-             except ValueError as e:
 
-                 yield "data: " + json.dumps(api.handle_error(e).get_json()) + "\n\n"
 
-             except Exception:
 
-                 logging.exception("internal server error.")
 
-                 yield "data: " + json.dumps(api.handle_error(InternalServerError()).get_json()) + "\n\n"
 
-         return Response(stream_with_context(generate()), status=200,
 
-                         mimetype='text/event-stream')
 
- api.add_resource(CompletionApi, '/installed-apps/<uuid:installed_app_id>/completion-messages', endpoint='installed_app_completion')
 
- api.add_resource(CompletionStopApi, '/installed-apps/<uuid:installed_app_id>/completion-messages/<string:task_id>/stop', endpoint='installed_app_stop_completion')
 
- api.add_resource(ChatApi, '/installed-apps/<uuid:installed_app_id>/chat-messages', endpoint='installed_app_chat_completion')
 
- api.add_resource(ChatStopApi, '/installed-apps/<uuid:installed_app_id>/chat-messages/<string:task_id>/stop', endpoint='installed_app_stop_chat_completion')
 
 
  |