mini-llama-200M / mergekit_config.yml
rootxhacker's picture
Upload folder using huggingface_hub
790d51c verified
raw
history blame contribute delete
848 Bytes
slices:
- sources:
- model: rootxhacker/mini-Llama-70M-SFT-v2 # Replace with actual model IDs
layer_range: [0, 5] # All 6 layers
- sources:
- model: rootxhacker/mini-Llama-70M-SFT-COT
layer_range: [0, 4] # First 5 layers
- sources:
- model: rootxhacker/mini-Llama-70M-SFT-medical
layer_range: [0, 4]
- sources:
- model: rootxhacker/mini-Llama-70M-SFT-code
layer_range: [0, 4]
- sources:
- model: rootxhacker/mini-Llama-70M-SFT-math
layer_range: [0, 4]
- sources:
- model: rootxhacker/mini-Llama-70M-SFT-ifeval
layer_range: [0, 4]
- sources:
- model: rootxhacker/mini-Llama-70M-SFT-v2
layer_range: [0, 4]
- sources:
- model: rootxhacker/mini-Llama-70M-SFT
layer_range: [0, 3]
merge_method: passthrough
dtype: bfloat16