evidence-seeker-demo / config /preprocessing_config.yaml
scacean's picture
Changing to Llama 70b as preprocessor model.
5455286
timeout: 1800
used_model_key: Llama-3.3-70B-Instruct
models:
Llama-3.2-3B-Instruct-Turbo:
name: Llama-3.2-3B-Instruct-Turbo
description: Llama-3.2-3B-Instruct-Turbo served by Together.ai over HuggingFace
base_url: https://router.huggingface.co/together/v1
model: meta-llama/Llama-3.2-3B-Instruct-Turbo
model_link: https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct
api_key_name: hf_debatelab_inference_provider
backend_type: openai
default_headers:
X-HF-Bill-To: DebateLabKIT
max_tokens: 1024
temperature: 0.2
timeout: 260
Llama-3.3-70B-Instruct:
name: Llama-3.3-70B-Instruct
description: Llama-3.3-70B served by Together.ai over HuggingFace
base_url: https://router.huggingface.co/v1
model: meta-llama/Llama-3.3-70B-Instruct:together
model_link: https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct
api_key_name: hf_debatelab_inference_provider
backend_type: openai
default_headers:
X-HF-Bill-To: DebateLabKIT
max_tokens: 1024
temperature: 0.2
timeout: 260
Kimi-K2-Instruct:
name: Kimi-K2-Instruct
description: Kimi-K2-Instruct served by together.ai over HuggingFace
base_url: https://router.huggingface.co/v1
model: moonshotai/Kimi-K2-Instruct:together
model_link: https://huggingface.co/moonshotai/Kimi-K2-Instruct
api_key_name: hf_debatelab_inference_provider
backend_type: openai
default_headers:
X-HF-Bill-To: DebateLabKIT
max_tokens: 1024
temperature: 0.6
timeout: 260