Просмотр исходного кода

feat: add internlm2.5-20b and qwen2.5-coder-7b model (#8862)

zhuhao месяцев назад: 7
Родитель
Сommit
61c89a9168

+ 3 - 13
api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml

@@ -1,18 +1,17 @@
 - Qwen/Qwen2.5-72B-Instruct
-- Qwen/Qwen2.5-Math-72B-Instruct
 - Qwen/Qwen2.5-32B-Instruct
 - Qwen/Qwen2.5-14B-Instruct
 - Qwen/Qwen2.5-7B-Instruct
 - Qwen/Qwen2.5-Coder-7B-Instruct
-- deepseek-ai/DeepSeek-V2.5
+- Qwen/Qwen2.5-Math-72B-Instruct
 - Qwen/Qwen2-72B-Instruct
 - Qwen/Qwen2-57B-A14B-Instruct
 - Qwen/Qwen2-7B-Instruct
 - Qwen/Qwen2-1.5B-Instruct
+- deepseek-ai/DeepSeek-V2.5
 - deepseek-ai/DeepSeek-V2-Chat
 - deepseek-ai/DeepSeek-Coder-V2-Instruct
 - THUDM/glm-4-9b-chat
-- THUDM/chatglm3-6b
 - 01-ai/Yi-1.5-34B-Chat-16K
 - 01-ai/Yi-1.5-9B-Chat-16K
 - 01-ai/Yi-1.5-6B-Chat
@@ -26,13 +25,4 @@
 - google/gemma-2-27b-it
 - google/gemma-2-9b-it
 - mistralai/Mistral-7B-Instruct-v0.2
-- Pro/Qwen/Qwen2-7B-Instruct
-- Pro/Qwen/Qwen2-1.5B-Instruct
-- Pro/THUDM/glm-4-9b-chat
-- Pro/THUDM/chatglm3-6b
-- Pro/01-ai/Yi-1.5-9B-Chat-16K
-- Pro/01-ai/Yi-1.5-6B-Chat
-- Pro/internlm/internlm2_5-7b-chat
-- Pro/meta-llama/Meta-Llama-3.1-8B-Instruct
-- Pro/meta-llama/Meta-Llama-3-8B-Instruct
-- Pro/google/gemma-2-9b-it
+- mistralai/Mixtral-8x7B-Instruct-v0.1

+ 30 - 0
api/core/model_runtime/model_providers/siliconflow/llm/internlm2_5-20b-chat.yaml

@@ -0,0 +1,30 @@
+model: internlm/internlm2_5-20b-chat
+label:
+  en_US: internlm/internlm2_5-20b-chat
+model_type: llm
+features:
+  - agent-thought
+model_properties:
+  mode: chat
+  context_size: 32768
+parameter_rules:
+  - name: temperature
+    use_template: temperature
+  - name: max_tokens
+    use_template: max_tokens
+    type: int
+    default: 512
+    min: 1
+    max: 4096
+    help:
+      zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+      en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+  - name: top_p
+    use_template: top_p
+  - name: frequency_penalty
+    use_template: frequency_penalty
+pricing:
+  input: '1'
+  output: '1'
+  unit: '0.000001'
+  currency: RMB

Разница между файлами не показана из-за своего большого размера
+ 74 - 0
api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-coder-7b-instruct.yaml


Разница между файлами не показана из-за своего большого размера
+ 74 - 0
api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-math-72b-instruct.yaml