completion.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. from typing import Optional, List, Union
  2. from langchain.callbacks import CallbackManager
  3. from langchain.chat_models.base import BaseChatModel
  4. from langchain.llms import BaseLLM
  5. from langchain.schema import BaseMessage, BaseLanguageModel, HumanMessage
  6. from core.constant import llm_constant
  7. from core.callback_handler.llm_callback_handler import LLMCallbackHandler
  8. from core.callback_handler.std_out_callback_handler import DifyStreamingStdOutCallbackHandler, \
  9. DifyStdOutCallbackHandler
  10. from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException
  11. from core.llm.error import LLMBadRequestError
  12. from core.llm.llm_builder import LLMBuilder
  13. from core.chain.main_chain_builder import MainChainBuilder
  14. from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
  15. from core.llm.streamable_open_ai import StreamableOpenAI
  16. from core.memory.read_only_conversation_token_db_buffer_shared_memory import \
  17. ReadOnlyConversationTokenDBBufferSharedMemory
  18. from core.memory.read_only_conversation_token_db_string_buffer_shared_memory import \
  19. ReadOnlyConversationTokenDBStringBufferSharedMemory
  20. from core.prompt.prompt_builder import PromptBuilder
  21. from core.prompt.prompt_template import OutLinePromptTemplate
  22. from core.prompt.prompts import MORE_LIKE_THIS_GENERATE_PROMPT
  23. from models.model import App, AppModelConfig, Account, Conversation, Message
  24. class Completion:
  25. @classmethod
  26. def generate(cls, task_id: str, app: App, app_model_config: AppModelConfig, query: str, inputs: dict,
  27. user: Account, conversation: Optional[Conversation], streaming: bool, is_override: bool = False):
  28. """
  29. errors: ProviderTokenNotInitError
  30. """
  31. cls.validate_query_tokens(app.tenant_id, app_model_config, query)
  32. memory = None
  33. if conversation:
  34. # get memory of conversation (read-only)
  35. memory = cls.get_memory_from_conversation(
  36. tenant_id=app.tenant_id,
  37. app_model_config=app_model_config,
  38. conversation=conversation,
  39. return_messages=False
  40. )
  41. inputs = conversation.inputs
  42. conversation_message_task = ConversationMessageTask(
  43. task_id=task_id,
  44. app=app,
  45. app_model_config=app_model_config,
  46. user=user,
  47. conversation=conversation,
  48. is_override=is_override,
  49. inputs=inputs,
  50. query=query,
  51. streaming=streaming
  52. )
  53. # build main chain include agent
  54. main_chain = MainChainBuilder.to_langchain_components(
  55. tenant_id=app.tenant_id,
  56. agent_mode=app_model_config.agent_mode_dict,
  57. memory=ReadOnlyConversationTokenDBStringBufferSharedMemory(memory=memory) if memory else None,
  58. conversation_message_task=conversation_message_task
  59. )
  60. chain_output = ''
  61. if main_chain:
  62. chain_output = main_chain.run(query)
  63. # run the final llm
  64. try:
  65. cls.run_final_llm(
  66. tenant_id=app.tenant_id,
  67. mode=app.mode,
  68. app_model_config=app_model_config,
  69. query=query,
  70. inputs=inputs,
  71. chain_output=chain_output,
  72. conversation_message_task=conversation_message_task,
  73. memory=memory,
  74. streaming=streaming
  75. )
  76. except ConversationTaskStoppedException:
  77. return
  78. @classmethod
  79. def run_final_llm(cls, tenant_id: str, mode: str, app_model_config: AppModelConfig, query: str, inputs: dict,
  80. chain_output: str,
  81. conversation_message_task: ConversationMessageTask,
  82. memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory], streaming: bool):
  83. final_llm = LLMBuilder.to_llm_from_model(
  84. tenant_id=tenant_id,
  85. model=app_model_config.model_dict,
  86. streaming=streaming
  87. )
  88. # get llm prompt
  89. prompt = cls.get_main_llm_prompt(
  90. mode=mode,
  91. llm=final_llm,
  92. pre_prompt=app_model_config.pre_prompt,
  93. query=query,
  94. inputs=inputs,
  95. chain_output=chain_output,
  96. memory=memory
  97. )
  98. final_llm.callback_manager = cls.get_llm_callback_manager(final_llm, streaming, conversation_message_task)
  99. cls.recale_llm_max_tokens(
  100. final_llm=final_llm,
  101. prompt=prompt,
  102. mode=mode
  103. )
  104. response = final_llm.generate([prompt])
  105. return response
  106. @classmethod
  107. def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, pre_prompt: str, query: str, inputs: dict,
  108. chain_output: Optional[str],
  109. memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]) -> \
  110. Union[str | List[BaseMessage]]:
  111. # disable template string in query
  112. query_params = OutLinePromptTemplate.from_template(template=query).input_variables
  113. if query_params:
  114. for query_param in query_params:
  115. if query_param not in inputs:
  116. inputs[query_param] = '{' + query_param + '}'
  117. pre_prompt = PromptBuilder.process_template(pre_prompt) if pre_prompt else pre_prompt
  118. if mode == 'completion':
  119. prompt_template = OutLinePromptTemplate.from_template(
  120. template=("""Use the following CONTEXT as your learned knowledge:
  121. [CONTEXT]
  122. {context}
  123. [END CONTEXT]
  124. When answer to user:
  125. - If you don't know, just say that you don't know.
  126. - If you don't know when you are not sure, ask for clarification.
  127. Avoid mentioning that you obtained the information from the context.
  128. And answer according to the language of the user's question.
  129. """ if chain_output else "")
  130. + (pre_prompt + "\n" if pre_prompt else "")
  131. + "{query}\n"
  132. )
  133. if chain_output:
  134. inputs['context'] = chain_output
  135. prompt_inputs = {k: inputs[k] for k in prompt_template.input_variables if k in inputs}
  136. prompt_content = prompt_template.format(
  137. query=query,
  138. **prompt_inputs
  139. )
  140. if isinstance(llm, BaseChatModel):
  141. # use chat llm as completion model
  142. return [HumanMessage(content=prompt_content)]
  143. else:
  144. return prompt_content
  145. else:
  146. messages: List[BaseMessage] = []
  147. human_inputs = {
  148. "query": query
  149. }
  150. human_message_prompt = ""
  151. if pre_prompt:
  152. pre_prompt_inputs = {k: inputs[k] for k in
  153. OutLinePromptTemplate.from_template(template=pre_prompt).input_variables
  154. if k in inputs}
  155. if pre_prompt_inputs:
  156. human_inputs.update(pre_prompt_inputs)
  157. if chain_output:
  158. human_inputs['context'] = chain_output
  159. human_message_prompt += """Use the following CONTEXT as your learned knowledge.
  160. [CONTEXT]
  161. {context}
  162. [END CONTEXT]
  163. When answer to user:
  164. - If you don't know, just say that you don't know.
  165. - If you don't know when you are not sure, ask for clarification.
  166. Avoid mentioning that you obtained the information from the context.
  167. And answer according to the language of the user's question.
  168. """
  169. if pre_prompt:
  170. human_message_prompt += pre_prompt
  171. query_prompt = "\nHuman: {query}\nAI: "
  172. if memory:
  173. # append chat histories
  174. tmp_human_message = PromptBuilder.to_human_message(
  175. prompt_content=human_message_prompt + query_prompt,
  176. inputs=human_inputs
  177. )
  178. curr_message_tokens = memory.llm.get_messages_tokens([tmp_human_message])
  179. rest_tokens = llm_constant.max_context_token_length[memory.llm.model_name] \
  180. - memory.llm.max_tokens - curr_message_tokens
  181. rest_tokens = max(rest_tokens, 0)
  182. history_messages = cls.get_history_messages_from_memory(memory, rest_tokens)
  183. human_message_prompt += "\n\n" + history_messages
  184. human_message_prompt += query_prompt
  185. # construct main prompt
  186. human_message = PromptBuilder.to_human_message(
  187. prompt_content=human_message_prompt,
  188. inputs=human_inputs
  189. )
  190. messages.append(human_message)
  191. return messages
  192. @classmethod
  193. def get_llm_callback_manager(cls, llm: Union[StreamableOpenAI, StreamableChatOpenAI],
  194. streaming: bool,
  195. conversation_message_task: ConversationMessageTask) -> CallbackManager:
  196. llm_callback_handler = LLMCallbackHandler(llm, conversation_message_task)
  197. if streaming:
  198. callback_handlers = [llm_callback_handler, DifyStreamingStdOutCallbackHandler()]
  199. else:
  200. callback_handlers = [llm_callback_handler, DifyStdOutCallbackHandler()]
  201. return CallbackManager(callback_handlers)
  202. @classmethod
  203. def get_history_messages_from_memory(cls, memory: ReadOnlyConversationTokenDBBufferSharedMemory,
  204. max_token_limit: int) -> \
  205. str:
  206. """Get memory messages."""
  207. memory.max_token_limit = max_token_limit
  208. memory_key = memory.memory_variables[0]
  209. external_context = memory.load_memory_variables({})
  210. return external_context[memory_key]
  211. @classmethod
  212. def get_memory_from_conversation(cls, tenant_id: str, app_model_config: AppModelConfig,
  213. conversation: Conversation,
  214. **kwargs) -> ReadOnlyConversationTokenDBBufferSharedMemory:
  215. # only for calc token in memory
  216. memory_llm = LLMBuilder.to_llm_from_model(
  217. tenant_id=tenant_id,
  218. model=app_model_config.model_dict
  219. )
  220. # use llm config from conversation
  221. memory = ReadOnlyConversationTokenDBBufferSharedMemory(
  222. conversation=conversation,
  223. llm=memory_llm,
  224. max_token_limit=kwargs.get("max_token_limit", 2048),
  225. memory_key=kwargs.get("memory_key", "chat_history"),
  226. return_messages=kwargs.get("return_messages", True),
  227. input_key=kwargs.get("input_key", "input"),
  228. output_key=kwargs.get("output_key", "output"),
  229. message_limit=kwargs.get("message_limit", 10),
  230. )
  231. return memory
  232. @classmethod
  233. def validate_query_tokens(cls, tenant_id: str, app_model_config: AppModelConfig, query: str):
  234. llm = LLMBuilder.to_llm_from_model(
  235. tenant_id=tenant_id,
  236. model=app_model_config.model_dict
  237. )
  238. model_limited_tokens = llm_constant.max_context_token_length[llm.model_name]
  239. max_tokens = llm.max_tokens
  240. if model_limited_tokens - max_tokens - llm.get_num_tokens(query) < 0:
  241. raise LLMBadRequestError("Query is too long")
  242. @classmethod
  243. def recale_llm_max_tokens(cls, final_llm: Union[StreamableOpenAI, StreamableChatOpenAI],
  244. prompt: Union[str, List[BaseMessage]], mode: str):
  245. # recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
  246. model_limited_tokens = llm_constant.max_context_token_length[final_llm.model_name]
  247. max_tokens = final_llm.max_tokens
  248. if mode == 'completion' and isinstance(final_llm, BaseLLM):
  249. prompt_tokens = final_llm.get_num_tokens(prompt)
  250. else:
  251. prompt_tokens = final_llm.get_messages_tokens(prompt)
  252. if prompt_tokens + max_tokens > model_limited_tokens:
  253. max_tokens = max(model_limited_tokens - prompt_tokens, 16)
  254. final_llm.max_tokens = max_tokens
  255. @classmethod
  256. def generate_more_like_this(cls, task_id: str, app: App, message: Message, pre_prompt: str,
  257. app_model_config: AppModelConfig, user: Account, streaming: bool):
  258. llm: StreamableOpenAI = LLMBuilder.to_llm(
  259. tenant_id=app.tenant_id,
  260. model_name='gpt-3.5-turbo',
  261. streaming=streaming
  262. )
  263. # get llm prompt
  264. original_prompt = cls.get_main_llm_prompt(
  265. mode="completion",
  266. llm=llm,
  267. pre_prompt=pre_prompt,
  268. query=message.query,
  269. inputs=message.inputs,
  270. chain_output=None,
  271. memory=None
  272. )
  273. original_completion = message.answer.strip()
  274. prompt = MORE_LIKE_THIS_GENERATE_PROMPT
  275. prompt = prompt.format(prompt=original_prompt, original_completion=original_completion)
  276. if isinstance(llm, BaseChatModel):
  277. prompt = [HumanMessage(content=prompt)]
  278. conversation_message_task = ConversationMessageTask(
  279. task_id=task_id,
  280. app=app,
  281. app_model_config=app_model_config,
  282. user=user,
  283. inputs=message.inputs,
  284. query=message.query,
  285. is_override=True if message.override_model_configs else False,
  286. streaming=streaming
  287. )
  288. llm.callback_manager = cls.get_llm_callback_manager(llm, streaming, conversation_message_task)
  289. cls.recale_llm_max_tokens(
  290. final_llm=llm,
  291. prompt=prompt,
  292. mode='completion'
  293. )
  294. llm.generate([prompt])