123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143 |
- import json
- import logging
- from typing import Generator, Union
- from flask import Response, stream_with_context
- from flask_login import current_user
- from flask_restful import reqparse
- from werkzeug.exceptions import InternalServerError, NotFound
- import services
- from controllers.console import api
- from controllers.console.app.error import ConversationCompletedError, AppUnavailableError, ProviderNotInitializeError, \
- ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError
- from controllers.console.universal_chat.wraps import UniversalChatResource
- from core.constant import llm_constant
- from core.conversation_message_task import PubHandler
- from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \
- LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError
- from libs.helper import uuid_value
- from services.completion_service import CompletionService
- class UniversalChatApi(UniversalChatResource):
- def post(self, universal_app):
- app_model = universal_app
- parser = reqparse.RequestParser()
- parser.add_argument('query', type=str, required=True, location='json')
- parser.add_argument('conversation_id', type=uuid_value, location='json')
- parser.add_argument('model', type=str, required=True, location='json')
- parser.add_argument('tools', type=list, required=True, location='json')
- args = parser.parse_args()
- app_model_config = app_model.app_model_config
- # update app model config
- args['model_config'] = app_model_config.to_dict()
- args['model_config']['model']['name'] = args['model']
- if not llm_constant.models[args['model']]:
- raise ValueError("Model not exists.")
- args['model_config']['model']['provider'] = llm_constant.models[args['model']]
- args['model_config']['agent_mode']['tools'] = args['tools']
- if not args['model_config']['agent_mode']['tools']:
- args['model_config']['agent_mode']['tools'] = [
- {
- "current_datetime": {
- "enabled": True
- }
- }
- ]
- else:
- args['model_config']['agent_mode']['tools'].append({
- "current_datetime": {
- "enabled": True
- }
- })
- args['inputs'] = {}
- del args['model']
- del args['tools']
- try:
- response = CompletionService.completion(
- app_model=app_model,
- user=current_user,
- args=args,
- from_source='console',
- streaming=True,
- is_model_config_override=True,
- )
- return compact_response(response)
- except services.errors.conversation.ConversationNotExistsError:
- raise NotFound("Conversation Not Exists.")
- except services.errors.conversation.ConversationCompletedError:
- raise ConversationCompletedError()
- except services.errors.app_model_config.AppModelConfigBrokenError:
- logging.exception("App model config broken.")
- raise AppUnavailableError()
- except ProviderTokenNotInitError:
- raise ProviderNotInitializeError()
- except QuotaExceededError:
- raise ProviderQuotaExceededError()
- except ModelCurrentlyNotSupportError:
- raise ProviderModelCurrentlyNotSupportError()
- except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
- LLMRateLimitError, LLMAuthorizationError) as e:
- raise CompletionRequestError(str(e))
- except ValueError as e:
- raise e
- except Exception as e:
- logging.exception("internal server error.")
- raise InternalServerError()
- class UniversalChatStopApi(UniversalChatResource):
- def post(self, universal_app, task_id):
- PubHandler.stop(current_user, task_id)
- return {'result': 'success'}, 200
- def compact_response(response: Union[dict | Generator]) -> Response:
- if isinstance(response, dict):
- return Response(response=json.dumps(response), status=200, mimetype='application/json')
- else:
- def generate() -> Generator:
- try:
- for chunk in response:
- yield chunk
- except services.errors.conversation.ConversationNotExistsError:
- yield "data: " + json.dumps(api.handle_error(NotFound("Conversation Not Exists.")).get_json()) + "\n\n"
- except services.errors.conversation.ConversationCompletedError:
- yield "data: " + json.dumps(api.handle_error(ConversationCompletedError()).get_json()) + "\n\n"
- except services.errors.app_model_config.AppModelConfigBrokenError:
- logging.exception("App model config broken.")
- yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
- except ProviderTokenNotInitError:
- yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
- except QuotaExceededError:
- yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
- except ModelCurrentlyNotSupportError:
- yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
- except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
- LLMRateLimitError, LLMAuthorizationError) as e:
- yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
- except ValueError as e:
- yield "data: " + json.dumps(api.handle_error(e).get_json()) + "\n\n"
- except Exception:
- logging.exception("internal server error.")
- yield "data: " + json.dumps(api.handle_error(InternalServerError()).get_json()) + "\n\n"
- return Response(stream_with_context(generate()), status=200,
- mimetype='text/event-stream')
- api.add_resource(UniversalChatApi, '/universal-chat/messages')
- api.add_resource(UniversalChatStopApi, '/universal-chat/messages/<string:task_id>/stop')
|