model_template.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. import json
  2. from models.model import AppModelConfig, App
  3. model_templates = {
  4. # completion default mode
  5. 'completion_default': {
  6. 'app': {
  7. 'mode': 'completion',
  8. 'enable_site': True,
  9. 'enable_api': True,
  10. 'is_demo': False,
  11. 'api_rpm': 0,
  12. 'api_rph': 0,
  13. 'status': 'normal'
  14. },
  15. 'model_config': {
  16. 'provider': 'openai',
  17. 'model_id': 'gpt-3.5-turbo-instruct',
  18. 'configs': {
  19. 'prompt_template': '',
  20. 'prompt_variables': [],
  21. 'completion_params': {
  22. 'max_token': 512,
  23. 'temperature': 1,
  24. 'top_p': 1,
  25. 'presence_penalty': 0,
  26. 'frequency_penalty': 0,
  27. }
  28. },
  29. 'model': json.dumps({
  30. "provider": "openai",
  31. "name": "gpt-3.5-turbo-instruct",
  32. "mode": "completion",
  33. "completion_params": {
  34. "max_tokens": 512,
  35. "temperature": 1,
  36. "top_p": 1,
  37. "presence_penalty": 0,
  38. "frequency_penalty": 0
  39. }
  40. }),
  41. 'user_input_form': json.dumps([
  42. {
  43. "paragraph": {
  44. "label": "Query",
  45. "variable": "query",
  46. "required": True,
  47. "default": ""
  48. }
  49. }
  50. ]),
  51. 'pre_prompt': '{{query}}'
  52. }
  53. },
  54. # chat default mode
  55. 'chat_default': {
  56. 'app': {
  57. 'mode': 'chat',
  58. 'enable_site': True,
  59. 'enable_api': True,
  60. 'is_demo': False,
  61. 'api_rpm': 0,
  62. 'api_rph': 0,
  63. 'status': 'normal'
  64. },
  65. 'model_config': {
  66. 'provider': 'openai',
  67. 'model_id': 'gpt-3.5-turbo',
  68. 'configs': {
  69. 'prompt_template': '',
  70. 'prompt_variables': [],
  71. 'completion_params': {
  72. 'max_token': 512,
  73. 'temperature': 1,
  74. 'top_p': 1,
  75. 'presence_penalty': 0,
  76. 'frequency_penalty': 0,
  77. }
  78. },
  79. 'model': json.dumps({
  80. "provider": "openai",
  81. "name": "gpt-3.5-turbo",
  82. "mode": "chat",
  83. "completion_params": {
  84. "max_tokens": 512,
  85. "temperature": 1,
  86. "top_p": 1,
  87. "presence_penalty": 0,
  88. "frequency_penalty": 0
  89. }
  90. })
  91. }
  92. },
  93. }
  94. demo_model_templates = {
  95. 'en-US': [
  96. {
  97. 'name': 'Translation Assistant',
  98. 'icon': '',
  99. 'icon_background': '',
  100. 'description': 'A multilingual translator that provides translation capabilities in multiple languages, translating user input into the language they need.',
  101. 'mode': 'completion',
  102. 'model_config': AppModelConfig(
  103. provider='openai',
  104. model_id='gpt-3.5-turbo-instruct',
  105. configs={
  106. 'prompt_template': "Please translate the following text into {{target_language}}:\n",
  107. 'prompt_variables': [
  108. {
  109. "key": "target_language",
  110. "name": "Target Language",
  111. "description": "The language you want to translate into.",
  112. "type": "select",
  113. "default": "Chinese",
  114. 'options': [
  115. 'Chinese',
  116. 'English',
  117. 'Japanese',
  118. 'French',
  119. 'Russian',
  120. 'German',
  121. 'Spanish',
  122. 'Korean',
  123. 'Italian',
  124. ]
  125. }
  126. ],
  127. 'completion_params': {
  128. 'max_token': 1000,
  129. 'temperature': 0,
  130. 'top_p': 0,
  131. 'presence_penalty': 0.1,
  132. 'frequency_penalty': 0.1,
  133. }
  134. },
  135. opening_statement='',
  136. suggested_questions=None,
  137. pre_prompt="Please translate the following text into {{target_language}}:\n{{query}}\ntranslate:",
  138. model=json.dumps({
  139. "provider": "openai",
  140. "name": "gpt-3.5-turbo-instruct",
  141. "mode": "completion",
  142. "completion_params": {
  143. "max_tokens": 1000,
  144. "temperature": 0,
  145. "top_p": 0,
  146. "presence_penalty": 0.1,
  147. "frequency_penalty": 0.1
  148. }
  149. }),
  150. user_input_form=json.dumps([
  151. {
  152. "select": {
  153. "label": "Target Language",
  154. "variable": "target_language",
  155. "description": "The language you want to translate into.",
  156. "default": "Chinese",
  157. "required": True,
  158. 'options': [
  159. 'Chinese',
  160. 'English',
  161. 'Japanese',
  162. 'French',
  163. 'Russian',
  164. 'German',
  165. 'Spanish',
  166. 'Korean',
  167. 'Italian',
  168. ]
  169. }
  170. },{
  171. "paragraph": {
  172. "label": "Query",
  173. "variable": "query",
  174. "required": True,
  175. "default": ""
  176. }
  177. }
  178. ])
  179. )
  180. },
  181. {
  182. 'name': 'AI Front-end Interviewer',
  183. 'icon': '',
  184. 'icon_background': '',
  185. 'description': 'A simulated front-end interviewer that tests the skill level of front-end development through questioning.',
  186. 'mode': 'chat',
  187. 'model_config': AppModelConfig(
  188. provider='openai',
  189. model_id='gpt-3.5-turbo',
  190. configs={
  191. 'introduction': 'Hi, welcome to our interview. I am the interviewer for this technology company, and I will test your web front-end development skills. Next, I will ask you some technical questions. Please answer them as thoroughly as possible. ',
  192. 'prompt_template': "You will play the role of an interviewer for a technology company, examining the user's web front-end development skills and posing 5-10 sharp technical questions.\n\nPlease note:\n- Only ask one question at a time.\n- After the user answers a question, ask the next question directly, without trying to correct any mistakes made by the candidate.\n- If you think the user has not answered correctly for several consecutive questions, ask fewer questions.\n- After asking the last question, you can ask this question: Why did you leave your last job? After the user answers this question, please express your understanding and support.\n",
  193. 'prompt_variables': [],
  194. 'completion_params': {
  195. 'max_token': 300,
  196. 'temperature': 0.8,
  197. 'top_p': 0.9,
  198. 'presence_penalty': 0.1,
  199. 'frequency_penalty': 0.1,
  200. }
  201. },
  202. opening_statement='Hi, welcome to our interview. I am the interviewer for this technology company, and I will test your web front-end development skills. Next, I will ask you some technical questions. Please answer them as thoroughly as possible. ',
  203. suggested_questions=None,
  204. pre_prompt="You will play the role of an interviewer for a technology company, examining the user's web front-end development skills and posing 5-10 sharp technical questions.\n\nPlease note:\n- Only ask one question at a time.\n- After the user answers a question, ask the next question directly, without trying to correct any mistakes made by the candidate.\n- If you think the user has not answered correctly for several consecutive questions, ask fewer questions.\n- After asking the last question, you can ask this question: Why did you leave your last job? After the user answers this question, please express your understanding and support.\n",
  205. model=json.dumps({
  206. "provider": "openai",
  207. "name": "gpt-3.5-turbo",
  208. "mode": "chat",
  209. "completion_params": {
  210. "max_tokens": 300,
  211. "temperature": 0.8,
  212. "top_p": 0.9,
  213. "presence_penalty": 0.1,
  214. "frequency_penalty": 0.1
  215. }
  216. }),
  217. user_input_form=None
  218. )
  219. }
  220. ],
  221. 'zh-Hans': [
  222. {
  223. 'name': '翻译助手',
  224. 'icon': '',
  225. 'icon_background': '',
  226. 'description': '一个多语言翻译器,提供多种语言翻译能力,将用户输入的文本翻译成他们需要的语言。',
  227. 'mode': 'completion',
  228. 'model_config': AppModelConfig(
  229. provider='openai',
  230. model_id='gpt-3.5-turbo-instruct',
  231. configs={
  232. 'prompt_template': "请将以下文本翻译为{{target_language}}:\n",
  233. 'prompt_variables': [
  234. {
  235. "key": "target_language",
  236. "name": "目标语言",
  237. "description": "翻译的目标语言",
  238. "type": "select",
  239. "default": "中文",
  240. "options": [
  241. "中文",
  242. "英文",
  243. "日语",
  244. "法语",
  245. "俄语",
  246. "德语",
  247. "西班牙语",
  248. "韩语",
  249. "意大利语",
  250. ]
  251. }
  252. ],
  253. 'completion_params': {
  254. 'max_token': 1000,
  255. 'temperature': 0,
  256. 'top_p': 0,
  257. 'presence_penalty': 0.1,
  258. 'frequency_penalty': 0.1,
  259. }
  260. },
  261. opening_statement='',
  262. suggested_questions=None,
  263. pre_prompt="请将以下文本翻译为{{target_language}}:\n{{query}}\n翻译:",
  264. model=json.dumps({
  265. "provider": "openai",
  266. "name": "gpt-3.5-turbo-instruct",
  267. "mode": "completion",
  268. "completion_params": {
  269. "max_tokens": 1000,
  270. "temperature": 0,
  271. "top_p": 0,
  272. "presence_penalty": 0.1,
  273. "frequency_penalty": 0.1
  274. }
  275. }),
  276. user_input_form=json.dumps([
  277. {
  278. "select": {
  279. "label": "目标语言",
  280. "variable": "target_language",
  281. "description": "翻译的目标语言",
  282. "default": "中文",
  283. "required": True,
  284. 'options': [
  285. "中文",
  286. "英文",
  287. "日语",
  288. "法语",
  289. "俄语",
  290. "德语",
  291. "西班牙语",
  292. "韩语",
  293. "意大利语",
  294. ]
  295. }
  296. },{
  297. "paragraph": {
  298. "label": "文本内容",
  299. "variable": "query",
  300. "required": True,
  301. "default": ""
  302. }
  303. }
  304. ])
  305. )
  306. },
  307. {
  308. 'name': 'AI 前端面试官',
  309. 'icon': '',
  310. 'icon_background': '',
  311. 'description': '一个模拟的前端面试官,通过提问的方式对前端开发的技能水平进行检验。',
  312. 'mode': 'chat',
  313. 'model_config': AppModelConfig(
  314. provider='openai',
  315. model_id='gpt-3.5-turbo',
  316. configs={
  317. 'introduction': '你好,欢迎来参加我们的面试,我是这家科技公司的面试官,我将考察你的 Web 前端开发技能。接下来我会向您提出一些技术问题,请您尽可能详尽地回答。',
  318. 'prompt_template': "你将扮演一个科技公司的面试官,考察用户作为候选人的 Web 前端开发水平,提出 5-10 个犀利的技术问题。\n\n请注意:\n- 每次只问一个问题\n- 用户回答问题后请直接问下一个问题,而不要试图纠正候选人的错误;\n- 如果你认为用户连续几次回答的都不对,就少问一点;\n- 问完最后一个问题后,你可以问这样一个问题:上一份工作为什么离职?用户回答该问题后,请表示理解与支持。\n",
  319. 'prompt_variables': [],
  320. 'completion_params': {
  321. 'max_token': 300,
  322. 'temperature': 0.8,
  323. 'top_p': 0.9,
  324. 'presence_penalty': 0.1,
  325. 'frequency_penalty': 0.1,
  326. }
  327. },
  328. opening_statement='你好,欢迎来参加我们的面试,我是这家科技公司的面试官,我将考察你的 Web 前端开发技能。接下来我会向您提出一些技术问题,请您尽可能详尽地回答。',
  329. suggested_questions=None,
  330. pre_prompt="你将扮演一个科技公司的面试官,考察用户作为候选人的 Web 前端开发水平,提出 5-10 个犀利的技术问题。\n\n请注意:\n- 每次只问一个问题\n- 用户回答问题后请直接问下一个问题,而不要试图纠正候选人的错误;\n- 如果你认为用户连续几次回答的都不对,就少问一点;\n- 问完最后一个问题后,你可以问这样一个问题:上一份工作为什么离职?用户回答该问题后,请表示理解与支持。\n",
  331. model=json.dumps({
  332. "provider": "openai",
  333. "name": "gpt-3.5-turbo",
  334. "mode": "chat",
  335. "completion_params": {
  336. "max_tokens": 300,
  337. "temperature": 0.8,
  338. "top_p": 0.9,
  339. "presence_penalty": 0.1,
  340. "frequency_penalty": 0.1
  341. }
  342. }),
  343. user_input_form=None
  344. )
  345. }
  346. ],
  347. }