From 3a0734d94cbcd0da4cc106006e5dcfbe2113c13c Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Wed, 9 Oct 2024 01:00:10 +0800 Subject: [PATCH] Feat/9081 add support for llamaguard through groq provider (#9083) --- .../model_providers/groq/llm/_position.yaml | 1 + .../groq/llm/llama-guard-3-8b.yaml | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 api/core/model_runtime/model_providers/groq/llm/llama-guard-3-8b.yaml diff --git a/api/core/model_runtime/model_providers/groq/llm/_position.yaml b/api/core/model_runtime/model_providers/groq/llm/_position.yaml index be115ca920..0613b19f87 100644 --- a/api/core/model_runtime/model_providers/groq/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/groq/llm/_position.yaml @@ -5,3 +5,4 @@ - llama3-8b-8192 - mixtral-8x7b-32768 - llama2-70b-4096 +- llama-guard-3-8b diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-guard-3-8b.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-guard-3-8b.yaml new file mode 100644 index 0000000000..03779ccc66 --- /dev/null +++ b/api/core/model_runtime/model_providers/groq/llm/llama-guard-3-8b.yaml @@ -0,0 +1,25 @@ +model: llama-guard-3-8b +label: + zh_Hans: Llama-Guard-3-8B + en_US: Llama-Guard-3-8B +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 8192 +pricing: + input: '0.20' + output: '0.20' + unit: '0.000001' + currency: USD