123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268 |
- import json
- import logging
- from collections.abc import Generator
- import requests
- from flask import request
- from flask_restful import Resource, reqparse # type: ignore
- from werkzeug.exceptions import BadRequest, InternalServerError, NotFound
- import services
- from controllers.service_api import api
- from controllers.service_api.app.error import (
- AppUnavailableError,
- CompletionRequestError,
- ConversationCompletedError,
- NotChatAppError,
- ProviderModelCurrentlyNotSupportError,
- ProviderNotInitializeError,
- ProviderQuotaExceededError,
- )
- from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
- from controllers.web.error import InvokeRateLimitError as InvokeRateLimitHttpError
- from core.app.apps.base_app_queue_manager import AppQueueManager
- from core.app.entities.app_invoke_entities import InvokeFrom
- from core.errors.error import (
- ModelCurrentlyNotSupportError,
- ProviderTokenNotInitError,
- QuotaExceededError,
- )
- from core.model_runtime.errors.invoke import InvokeError
- from libs import helper
- from libs.helper import uuid_value
- from models.model import App, AppMode, EndUser
- from services.app_generate_service import AppGenerateService
- from services.errors.llm import InvokeRateLimitError
- class CompletionApi(Resource):
- @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
- def post(self, app_model: App, end_user: EndUser):
- if app_model.mode != "completion":
- raise AppUnavailableError()
- parser = reqparse.RequestParser()
- parser.add_argument("inputs", type=dict, required=True, location="json")
- parser.add_argument("query", type=str, location="json", default="")
- parser.add_argument("files", type=list, required=False, location="json")
- parser.add_argument("response_mode", type=str, choices=["blocking", "streaming"], location="json")
- parser.add_argument("retriever_from", type=str, required=False, default="dev", location="json")
- args = parser.parse_args()
- streaming = args["response_mode"] == "streaming"
- args["auto_generate_name"] = False
- try:
- response = AppGenerateService.generate(
- app_model=app_model,
- user=end_user,
- args=args,
- invoke_from=InvokeFrom.SERVICE_API,
- streaming=streaming,
- )
- return helper.compact_generate_response(response)
- except services.errors.conversation.ConversationNotExistsError:
- raise NotFound("Conversation Not Exists.")
- except services.errors.conversation.ConversationCompletedError:
- raise ConversationCompletedError()
- except services.errors.app_model_config.AppModelConfigBrokenError:
- logging.exception("App model config broken.")
- raise AppUnavailableError()
- except ProviderTokenNotInitError as ex:
- raise ProviderNotInitializeError(ex.description)
- except QuotaExceededError:
- raise ProviderQuotaExceededError()
- except ModelCurrentlyNotSupportError:
- raise ProviderModelCurrentlyNotSupportError()
- except InvokeError as e:
- raise CompletionRequestError(e.description)
- except ValueError as e:
- raise e
- except Exception:
- logging.exception("internal server error.")
- raise InternalServerError()
- class CompletionStopApi(Resource):
- @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
- def post(self, app_model: App, end_user: EndUser, task_id):
- if app_model.mode != "completion":
- raise AppUnavailableError()
- AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
- return {"result": "success"}, 200
- class ChatApi(Resource):
- @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
- def post(self, app_model: App, end_user: EndUser):
- app_mode = AppMode.value_of(app_model.mode)
- if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
- raise NotChatAppError()
- parser = reqparse.RequestParser()
- parser.add_argument("inputs", type=dict, required=True, location="json")
- parser.add_argument("query", type=str, required=True, location="json")
- parser.add_argument("files", type=list, required=False, location="json")
- parser.add_argument("response_mode", type=str, choices=["blocking", "streaming"], location="json")
- parser.add_argument("conversation_id", type=uuid_value, location="json")
- parser.add_argument("retriever_from", type=str, required=False, default="dev", location="json")
- parser.add_argument("auto_generate_name", type=bool, required=False, default=True, location="json")
- args = parser.parse_args()
- streaming = args["response_mode"] == "streaming"
- try:
- response = AppGenerateService.generate(
- app_model=app_model, user=end_user, args=args, invoke_from=InvokeFrom.SERVICE_API, streaming=streaming
- )
- return helper.compact_generate_response(response)
- except services.errors.conversation.ConversationNotExistsError:
- raise NotFound("Conversation Not Exists.")
- except services.errors.conversation.ConversationCompletedError:
- raise ConversationCompletedError()
- except services.errors.app_model_config.AppModelConfigBrokenError:
- logging.exception("App model config broken.")
- raise AppUnavailableError()
- except ProviderTokenNotInitError as ex:
- raise ProviderNotInitializeError(ex.description)
- except QuotaExceededError:
- raise ProviderQuotaExceededError()
- except ModelCurrentlyNotSupportError:
- raise ProviderModelCurrentlyNotSupportError()
- except InvokeRateLimitError as ex:
- raise InvokeRateLimitHttpError(ex.description)
- except InvokeError as e:
- raise CompletionRequestError(e.description)
- except ValueError as e:
- raise e
- except Exception:
- logging.exception("internal server error.")
- raise InternalServerError()
- class ChatStopApi(Resource):
- @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
- def post(self, app_model: App, end_user: EndUser, task_id):
- app_mode = AppMode.value_of(app_model.mode)
- if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
- raise NotChatAppError()
- AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
- return {"result": "success"}, 200
- class ChatApiForRobot(Resource):
- def post(self):
- parser = reqparse.RequestParser()
- parser.add_argument("id", type=str, required=True, location="json")
- parser.add_argument("enterprise_id", type=str, required=False, location="json")
- parser.add_argument("device_id", type=str, required=False, location="json")
- parser.add_argument("messages", type=list, required=True, location="json")
- parser.add_argument("max_tokens", type=int, required=True, location="json")
- parser.add_argument("stream", type=bool, required=True, location="json")
- args = parser.parse_args()
- messages = args["messages"]
- if messages is None or len(messages) == 0:
- raise BadRequest("messages is empty.")
- id = args["id"]
- query = messages[len(messages) - 1]["content"]
- response_mode = "streaming" if args["stream"] else "blocking"
- device_id = args["device_id"]
- data = {
- "inputs": {},
- "query": query,
- "response_mode": response_mode,
- "conversation_id": "",
- "user": device_id if device_id else "abc-123",
- "files": []
- }
- chat_message_url = request.host_url + "v1/chat-messages"
- logging.info("Sending request to %s", chat_message_url)
- response = requests.post(chat_message_url, data = json.dumps(data), headers = request.headers)
- if args["stream"]:
- def after_response_generator():
- i = 0
- for line in response.iter_lines():
- line_str = line.decode("utf-8")
- if not line_str.startswith('data:'):
- continue
- content = json.loads(line_str[6:])
- event = content["event"]
- if event not in ["message", "message_end"]:
- continue
- new_content = {
- "id": id,
- "model": "advanced-chat",
- "created": content["created_at"],
- "choices": [],
- }
- if i == 0:
- choice = {
- "index": 0,
- "delta": {
- "role": "assistant",
- "content": ""
- },
- "finish_reason": None
- }
- new_content["choices"].append(choice)
- yield f"id: {i}\ndata: {json.dumps(new_content)}\n\n"
- new_content["choices"].pop()
- i = i + 1
- if content["event"] == "message":
- choice = {
- "index": 0,
- "delta": {
- "content": content["answer"]
- },
- "finish_reason": None
- }
- new_content["choices"].append(choice)
- else:
- choice = {
- "index": 0,
- "delta": {},
- "finish_reason": "stop"
- }
- new_content["choices"].append(choice)
- yield f"id: {i}\ndata: {json.dumps(new_content)}\n\n"
- new_response = after_response_generator()
- def generate() -> Generator:
- yield from new_response
- return helper.compact_generate_response(generate())
- else:
- content = json.loads(response.text)
- new_response = {
- "id": id,
- "model": "advanced-chat",
- "created": content["created_at"],
- "answer": content ["answer"],
- }
- return new_response
- api.add_resource(CompletionApi, "/completion-messages")
- api.add_resource(CompletionStopApi, "/completion-messages/<string:task_id>/stop")
- api.add_resource(ChatApi, "/chat-messages")
- api.add_resource(ChatStopApi, "/chat-messages/<string:task_id>/stop")
- api.add_resource(ChatApiForRobot, "/chat-messages-for-robot")
|