completion.py 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. # -*- coding:utf-8 -*-
  2. import json
  3. import logging
  4. from typing import Generator, Union
  5. from flask import Response, stream_with_context
  6. from flask_restful import reqparse
  7. from werkzeug.exceptions import InternalServerError, NotFound
  8. import services
  9. from controllers.web import api
  10. from controllers.web.error import AppUnavailableError, ConversationCompletedError, \
  11. ProviderNotInitializeError, NotChatAppError, NotCompletionAppError, CompletionRequestError, \
  12. ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError
  13. from controllers.web.wraps import WebApiResource
  14. from core.conversation_message_task import PubHandler
  15. from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
  16. LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
  17. from libs.helper import uuid_value
  18. from services.completion_service import CompletionService
  19. # define completion api for user
  20. class CompletionApi(WebApiResource):
  21. def post(self, app_model, end_user):
  22. if app_model.mode != 'completion':
  23. raise NotCompletionAppError()
  24. parser = reqparse.RequestParser()
  25. parser.add_argument('inputs', type=dict, required=True, location='json')
  26. parser.add_argument('query', type=str, location='json', default='')
  27. parser.add_argument('files', type=list, required=False, location='json')
  28. parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
  29. parser.add_argument('retriever_from', type=str, required=False, default='web_app', location='json')
  30. args = parser.parse_args()
  31. streaming = args['response_mode'] == 'streaming'
  32. args['auto_generate_name'] = False
  33. try:
  34. response = CompletionService.completion(
  35. app_model=app_model,
  36. user=end_user,
  37. args=args,
  38. from_source='api',
  39. streaming=streaming
  40. )
  41. return compact_response(response)
  42. except services.errors.conversation.ConversationNotExistsError:
  43. raise NotFound("Conversation Not Exists.")
  44. except services.errors.conversation.ConversationCompletedError:
  45. raise ConversationCompletedError()
  46. except services.errors.app_model_config.AppModelConfigBrokenError:
  47. logging.exception("App model config broken.")
  48. raise AppUnavailableError()
  49. except ProviderTokenNotInitError as ex:
  50. raise ProviderNotInitializeError(ex.description)
  51. except QuotaExceededError:
  52. raise ProviderQuotaExceededError()
  53. except ModelCurrentlyNotSupportError:
  54. raise ProviderModelCurrentlyNotSupportError()
  55. except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
  56. LLMRateLimitError, LLMAuthorizationError) as e:
  57. raise CompletionRequestError(str(e))
  58. except ValueError as e:
  59. raise e
  60. except Exception as e:
  61. logging.exception("internal server error.")
  62. raise InternalServerError()
  63. class CompletionStopApi(WebApiResource):
  64. def post(self, app_model, end_user, task_id):
  65. if app_model.mode != 'completion':
  66. raise NotCompletionAppError()
  67. PubHandler.stop(end_user, task_id)
  68. return {'result': 'success'}, 200
  69. class ChatApi(WebApiResource):
  70. def post(self, app_model, end_user):
  71. if app_model.mode != 'chat':
  72. raise NotChatAppError()
  73. parser = reqparse.RequestParser()
  74. parser.add_argument('inputs', type=dict, required=True, location='json')
  75. parser.add_argument('query', type=str, required=True, location='json')
  76. parser.add_argument('files', type=list, required=False, location='json')
  77. parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
  78. parser.add_argument('conversation_id', type=uuid_value, location='json')
  79. parser.add_argument('retriever_from', type=str, required=False, default='web_app', location='json')
  80. args = parser.parse_args()
  81. streaming = args['response_mode'] == 'streaming'
  82. args['auto_generate_name'] = False
  83. try:
  84. response = CompletionService.completion(
  85. app_model=app_model,
  86. user=end_user,
  87. args=args,
  88. from_source='api',
  89. streaming=streaming
  90. )
  91. return compact_response(response)
  92. except services.errors.conversation.ConversationNotExistsError:
  93. raise NotFound("Conversation Not Exists.")
  94. except services.errors.conversation.ConversationCompletedError:
  95. raise ConversationCompletedError()
  96. except services.errors.app_model_config.AppModelConfigBrokenError:
  97. logging.exception("App model config broken.")
  98. raise AppUnavailableError()
  99. except ProviderTokenNotInitError as ex:
  100. raise ProviderNotInitializeError(ex.description)
  101. except QuotaExceededError:
  102. raise ProviderQuotaExceededError()
  103. except ModelCurrentlyNotSupportError:
  104. raise ProviderModelCurrentlyNotSupportError()
  105. except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
  106. LLMRateLimitError, LLMAuthorizationError) as e:
  107. raise CompletionRequestError(str(e))
  108. except ValueError as e:
  109. raise e
  110. except Exception as e:
  111. logging.exception("internal server error.")
  112. raise InternalServerError()
  113. class ChatStopApi(WebApiResource):
  114. def post(self, app_model, end_user, task_id):
  115. if app_model.mode != 'chat':
  116. raise NotChatAppError()
  117. PubHandler.stop(end_user, task_id)
  118. return {'result': 'success'}, 200
  119. def compact_response(response: Union[dict, Generator]) -> Response:
  120. if isinstance(response, dict):
  121. return Response(response=json.dumps(response), status=200, mimetype='application/json')
  122. else:
  123. def generate() -> Generator:
  124. try:
  125. for chunk in response:
  126. yield chunk
  127. except services.errors.conversation.ConversationNotExistsError:
  128. yield "data: " + json.dumps(api.handle_error(NotFound("Conversation Not Exists.")).get_json()) + "\n\n"
  129. except services.errors.conversation.ConversationCompletedError:
  130. yield "data: " + json.dumps(api.handle_error(ConversationCompletedError()).get_json()) + "\n\n"
  131. except services.errors.app_model_config.AppModelConfigBrokenError:
  132. logging.exception("App model config broken.")
  133. yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
  134. except ProviderTokenNotInitError as ex:
  135. yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
  136. except QuotaExceededError:
  137. yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
  138. except ModelCurrentlyNotSupportError:
  139. yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
  140. except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
  141. LLMRateLimitError, LLMAuthorizationError) as e:
  142. yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
  143. except ValueError as e:
  144. yield "data: " + json.dumps(api.handle_error(e).get_json()) + "\n\n"
  145. except Exception:
  146. logging.exception("internal server error.")
  147. yield "data: " + json.dumps(api.handle_error(InternalServerError()).get_json()) + "\n\n"
  148. return Response(stream_with_context(generate()), status=200,
  149. mimetype='text/event-stream')
  150. api.add_resource(CompletionApi, '/completion-messages')
  151. api.add_resource(CompletionStopApi, '/completion-messages/<string:task_id>/stop')
  152. api.add_resource(ChatApi, '/chat-messages')
  153. api.add_resource(ChatStopApi, '/chat-messages/<string:task_id>/stop')