bases listlengths 0 8 | model stringlengths 3 28 | modular_file stringlengths 42 92 |
|---|---|---|
[
"deepseek_v3",
"qwen3"
] | dots1 | src/transformers/models/dots1/modular_dots1.py |
[
"clip"
] | metaclip_2 | src/transformers/models/metaclip_2/modular_metaclip_2.py |
[
"ernie4_5_moe",
"glm4v",
"mixtral",
"qwen2_5_vl",
"qwen2_vl"
] | ernie4_5_vl_moe | src/transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py |
[
"glm4_moe",
"llama"
] | solar_open | src/transformers/models/solar_open/modular_solar_open.py |
[
"auto",
"sam2",
"sam2_video"
] | edgetam_video | src/transformers/models/edgetam_video/modular_edgetam_video.py |
[
"clip",
"llama",
"qwen2_vl"
] | mlcd | src/transformers/models/mlcd/modular_mlcd.py |
[
"gemma2",
"olmo2"
] | olmo3 | src/transformers/models/olmo3/modular_olmo3.py |
[
"bart",
"opt"
] | biogpt | src/transformers/models/biogpt/modular_biogpt.py |
[
"mistral"
] | ministral3 | src/transformers/models/ministral3/modular_ministral3.py |
[
"llama"
] | cohere | src/transformers/models/cohere/modular_cohere.py |
[] | cohere2_vision | src/transformers/models/cohere2_vision/modular_cohere2_vision.py |
[
"mistral"
] | starcoder2 | src/transformers/models/starcoder2/modular_starcoder2.py |
[
"llama",
"mixtral",
"qwen2"
] | gpt_oss | src/transformers/models/gpt_oss/modular_gpt_oss.py |
[
"gemma",
"llama",
"mixtral",
"qwen2_moe"
] | olmoe | src/transformers/models/olmoe/modular_olmoe.py |
[
"llama"
] | gemma | src/transformers/models/gemma/modular_gemma.py |
[
"mistral",
"qwen2"
] | ministral | src/transformers/models/ministral/modular_ministral.py |
[
"clip",
"llama",
"siglip"
] | aimv2 | src/transformers/models/aimv2/modular_aimv2.py |
[
"auto",
"mistral3",
"pixtral"
] | lighton_ocr | src/transformers/models/lighton_ocr/modular_lighton_ocr.py |
[
"llama",
"olmo"
] | olmo2 | src/transformers/models/olmo2/modular_olmo2.py |
[
"chameleon",
"llama",
"siglip"
] | emu3 | src/transformers/models/emu3/modular_emu3.py |
[
"auto",
"llava",
"sam"
] | got_ocr2 | src/transformers/models/got_ocr2/modular_got_ocr2.py |
[
"gemma",
"llama",
"mistral"
] | diffllama | src/transformers/models/diffllama/modular_diffllama.py |
[
"bamba",
"gemma2",
"gemma3",
"llama",
"mixtral",
"qwen2_moe",
"qwen3_moe"
] | qwen3_next | src/transformers/models/qwen3_next/modular_qwen3_next.py |
[
"llama",
"nemotron"
] | arcee | src/transformers/models/arcee/modular_arcee.py |
[
"llama"
] | gpt_neox | src/transformers/models/gpt_neox/modular_gpt_neox.py |
[
"wav2vec2"
] | wavlm | src/transformers/models/wavlm/modular_wavlm.py |
[
"gemma2",
"llama",
"olmo2"
] | exaone4 | src/transformers/models/exaone4/modular_exaone4.py |
[
"esm",
"llama"
] | evolla | src/transformers/models/evolla/modular_evolla.py |
[
"auto",
"llava"
] | perception_lm | src/transformers/models/perception_lm/modular_perception_lm.py |
[
"auto",
"sam2"
] | edgetam | src/transformers/models/edgetam/modular_edgetam.py |
[
"fastspeech2_conformer",
"llama"
] | parakeet | src/transformers/models/parakeet/modular_parakeet.py |
[
"llama"
] | granite | src/transformers/models/granite/modular_granite.py |
[
"gemma"
] | gemma2 | src/transformers/models/gemma2/modular_gemma2.py |
[
"mistral"
] | mixtral | src/transformers/models/mixtral/modular_mixtral.py |
[
"deformable_detr",
"detr"
] | conditional_detr | src/transformers/models/conditional_detr/modular_conditional_detr.py |
[
"llama",
"phi4_multimodal"
] | timesfm | src/transformers/models/timesfm/modular_timesfm.py |
[
"flex_olmo",
"glm4_moe",
"mixtral"
] | minimax_m2 | src/transformers/models/minimax_m2/modular_minimax_m2.py |
[
"auto",
"glm4v"
] | glm46v | src/transformers/models/glm46v/modular_glm46v.py |
[
"auto",
"deepseek_vl",
"idefics",
"sam"
] | deepseek_vl_hybrid | src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py |
[
"llama",
"parakeet",
"t5"
] | lasr | src/transformers/models/lasr/modular_lasr.py |
[
"deepseek_v3"
] | longcat_flash | src/transformers/models/longcat_flash/modular_longcat_flash.py |
[
"llama"
] | olmo | src/transformers/models/olmo/modular_olmo.py |
[
"llama",
"mimi"
] | vibevoice_acoustic_tokenizer | src/transformers/models/vibevoice_acoustic_tokenizer/modular_vibevoice_acoustic_tokenizer.py |
[
"mistral",
"phi"
] | phi3 | src/transformers/models/phi3/modular_phi3.py |
[
"auto",
"qwen2_vl",
"siglip"
] | video_llama_3 | src/transformers/models/video_llama_3/modular_video_llama_3.py |
[
"gemma2",
"paligemma",
"siglip"
] | gemma3 | src/transformers/models/gemma3/modular_gemma3.py |
[
"colpali"
] | colqwen2 | src/transformers/models/colqwen2/modular_colqwen2.py |
[
"dinov2",
"mask2former",
"siglip",
"vit"
] | eomt | src/transformers/models/eomt/modular_eomt.py |
[
"glm",
"phi3"
] | glm4 | src/transformers/models/glm4/modular_glm4.py |
[
"llama",
"moonshine",
"wav2vec2"
] | moonshine_streaming | src/transformers/models/moonshine_streaming/modular_moonshine_streaming.py |
[
"auto",
"gemma3",
"siglip",
"t5gemma"
] | t5gemma2 | src/transformers/models/t5gemma2/modular_t5gemma2.py |
[
"auto",
"dac",
"pe_audio_video"
] | pe_audio | src/transformers/models/pe_audio/modular_pe_audio.py |
[
"chameleon",
"glm4v",
"glm4v_moe",
"qwen2_vl",
"siglip"
] | glm_image | src/transformers/models/glm_image/modular_glm_image.py |
[
"hunyuan_v1_dense",
"llama",
"mixtral"
] | hunyuan_v1_moe | src/transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py |
[
"conditional_detr",
"deformable_detr",
"detr"
] | rt_detr | src/transformers/models/rt_detr/modular_rt_detr.py |
[
"auto",
"resnet",
"rt_detr"
] | pp_doclayout_v3 | src/transformers/models/pp_doclayout_v3/modular_pp_doclayout_v3.py |
[
"llama",
"mamba2",
"zamba"
] | zamba2 | src/transformers/models/zamba2/modular_zamba2.py |
[
"llama",
"mixtral"
] | jetmoe | src/transformers/models/jetmoe/modular_jetmoe.py |
[
"gemma2",
"llama",
"mistral"
] | qwen2 | src/transformers/models/qwen2/modular_qwen2.py |
[
"wav2vec2"
] | hubert | src/transformers/models/hubert/modular_hubert.py |
[
"sam"
] | sam_hq | src/transformers/models/sam_hq/modular_sam_hq.py |
[
"granite",
"jetmoe",
"llama",
"mixtral"
] | granitemoe | src/transformers/models/granitemoe/modular_granitemoe.py |
[
"gemma",
"granite",
"llama"
] | helium | src/transformers/models/helium/modular_helium.py |
[
"gemma2"
] | t5gemma | src/transformers/models/t5gemma/modular_t5gemma.py |
[
"deepseek_v3",
"exaone4",
"olmoe",
"qwen2_moe"
] | exaone_moe | src/transformers/models/exaone_moe/modular_exaone_moe.py |
[
"wav2vec2"
] | sew | src/transformers/models/sew/modular_sew.py |
[
"auto"
] | llava_next_video | src/transformers/models/llava_next_video/modular_llava_next_video.py |
[
"mamba"
] | falcon_mamba | src/transformers/models/falcon_mamba/modular_falcon_mamba.py |
[] | mask2former | src/transformers/models/mask2former/modular_mask2former.py |
[] | grounding_dino | src/transformers/models/grounding_dino/modular_grounding_dino.py |
[
"bamba",
"gemma2",
"llama"
] | lfm2 | src/transformers/models/lfm2/modular_lfm2.py |
[
"gemma",
"llama",
"qwen2"
] | qwen3 | src/transformers/models/qwen3/modular_qwen3.py |
[
"wav2vec2"
] | data2vec | src/transformers/models/data2vec/modular_data2vec_audio.py |
[
"roberta"
] | data2vec | src/transformers/models/data2vec/modular_data2vec_text.py |
[
"mixtral",
"olmo2",
"olmoe"
] | flex_olmo | src/transformers/models/flex_olmo/modular_flex_olmo.py |
[
"dinov3_vit",
"eomt"
] | eomt_dinov3 | src/transformers/models/eomt_dinov3/modular_eomt_dinov3.py |
[
"cohere",
"gemma2"
] | cohere2 | src/transformers/models/cohere2/modular_cohere2.py |
[
"deepseek_v3",
"llama",
"qwen3"
] | youtu | src/transformers/models/youtu/modular_youtu.py |
[
"llama",
"nemotron"
] | apertus | src/transformers/models/apertus/modular_apertus.py |
[] | dinov3_vit | src/transformers/models/dinov3_vit/modular_dinov3_vit.py |
[
"auto",
"qwen3"
] | pe_audio_video | src/transformers/models/pe_audio_video/modular_pe_audio_video.py |
[
"bamba",
"gemma2",
"granitemoeshared"
] | granitemoehybrid | src/transformers/models/granitemoehybrid/modular_granitemoehybrid.py |
[
"wav2vec2"
] | unispeech_sat | src/transformers/models/unispeech_sat/modular_unispeech_sat.py |
[
"gemma2",
"mixtral"
] | minimax | src/transformers/models/minimax/modular_minimax.py |
[
"llama",
"mistral",
"mixtral"
] | jamba | src/transformers/models/jamba/modular_jamba.py |
[
"llama",
"mixtral"
] | phimoe | src/transformers/models/phimoe/modular_phimoe.py |
[
"roberta"
] | xlm_roberta | src/transformers/models/xlm_roberta/modular_xlm_roberta.py |
[
"bart",
"time_series_transformer"
] | informer | src/transformers/models/informer/modular_informer.py |
[
"align",
"gemma3"
] | modernbert | src/transformers/models/modernbert/modular_modernbert.py |
[
"beit"
] | dpt | src/transformers/models/dpt/modular_dpt.py |
[
"auto",
"qwen2_audio"
] | voxtral | src/transformers/models/voxtral/modular_voxtral.py |
[
"glm",
"llama",
"whisper"
] | moonshine | src/transformers/models/moonshine/modular_moonshine.py |
[] | colpali | src/transformers/models/colpali/modular_colpali.py |
[
"llama",
"qwen2_vl"
] | qwen2_5_vl | src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py |
[
"llama",
"mixtral"
] | doge | src/transformers/models/doge/modular_doge.py |
[
"llava"
] | lfm2_vl | src/transformers/models/lfm2_vl/modular_lfm2_vl.py |
[
"auto",
"idefics",
"janus"
] | deepseek_vl | src/transformers/models/deepseek_vl/modular_deepseek_vl.py |
[
"llama",
"qwen2_moe"
] | deepseek_v2 | src/transformers/models/deepseek_v2/modular_deepseek_v2.py |
[
"auto",
"blip",
"blip_2",
"chameleon",
"idefics",
"llama",
"siglip"
] | janus | src/transformers/models/janus/modular_janus.py |
[
"t5"
] | switch_transformers | src/transformers/models/switch_transformers/modular_switch_transformers.py |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 2