llm.yaml 705 B

12345678910111213141516171819202122232425262728293031323334
  1. model: gpt-3.5-turbo-16k-0613
  2. label:
  3. zh_Hans: gpt-3.5-turbo-16k-0613
  4. en_US: gpt-3.5-turbo-16k-0613
  5. model_type: llm
  6. features:
  7. - multi-tool-call
  8. - agent-thought
  9. - stream-tool-call
  10. model_properties:
  11. mode: chat
  12. context_size: 16385
  13. parameter_rules:
  14. - name: temperature
  15. use_template: temperature
  16. - name: top_p
  17. use_template: top_p
  18. - name: presence_penalty
  19. use_template: presence_penalty
  20. - name: frequency_penalty
  21. use_template: frequency_penalty
  22. - name: max_tokens
  23. use_template: max_tokens
  24. default: 512
  25. min: 1
  26. max: 16385
  27. - name: response_format
  28. use_template: response_format
  29. pricing:
  30. input: '0.003'
  31. output: '0.004'
  32. unit: '0.001'
  33. currency: USD