model_template.py 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. import json
  2. from models.model import App, AppModelConfig
  3. model_templates = {
  4. # completion default mode
  5. 'completion_default': {
  6. 'app': {
  7. 'mode': 'completion',
  8. 'enable_site': True,
  9. 'enable_api': True,
  10. 'is_demo': False,
  11. 'api_rpm': 0,
  12. 'api_rph': 0,
  13. 'status': 'normal'
  14. },
  15. 'model_config': {
  16. 'provider': 'openai',
  17. 'model_id': 'gpt-3.5-turbo-instruct',
  18. 'configs': {
  19. 'prompt_template': '',
  20. 'prompt_variables': [],
  21. 'completion_params': {
  22. 'max_token': 512,
  23. 'temperature': 1,
  24. 'top_p': 1,
  25. 'presence_penalty': 0,
  26. 'frequency_penalty': 0,
  27. }
  28. },
  29. 'model': json.dumps({
  30. "provider": "openai",
  31. "name": "gpt-3.5-turbo-instruct",
  32. "mode": "completion",
  33. "completion_params": {
  34. "max_tokens": 512,
  35. "temperature": 1,
  36. "top_p": 1,
  37. "presence_penalty": 0,
  38. "frequency_penalty": 0
  39. }
  40. }),
  41. 'user_input_form': json.dumps([
  42. {
  43. "paragraph": {
  44. "label": "Query",
  45. "variable": "query",
  46. "required": True,
  47. "default": ""
  48. }
  49. }
  50. ]),
  51. 'pre_prompt': '{{query}}'
  52. }
  53. },
  54. # chat default mode
  55. 'chat_default': {
  56. 'app': {
  57. 'mode': 'chat',
  58. 'enable_site': True,
  59. 'enable_api': True,
  60. 'is_demo': False,
  61. 'api_rpm': 0,
  62. 'api_rph': 0,
  63. 'status': 'normal'
  64. },
  65. 'model_config': {
  66. 'provider': 'openai',
  67. 'model_id': 'gpt-3.5-turbo',
  68. 'configs': {
  69. 'prompt_template': '',
  70. 'prompt_variables': [],
  71. 'completion_params': {
  72. 'max_token': 512,
  73. 'temperature': 1,
  74. 'top_p': 1,
  75. 'presence_penalty': 0,
  76. 'frequency_penalty': 0,
  77. }
  78. },
  79. 'model': json.dumps({
  80. "provider": "openai",
  81. "name": "gpt-3.5-turbo",
  82. "mode": "chat",
  83. "completion_params": {
  84. "max_tokens": 512,
  85. "temperature": 1,
  86. "top_p": 1,
  87. "presence_penalty": 0,
  88. "frequency_penalty": 0
  89. }
  90. })
  91. }
  92. },
  93. }