Spaces:
Running
Running
{ | |
"models_auto": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 226, | |
"skipped": 10, | |
"time_spent": "3.79, 5.93, ", | |
"failures": {}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215208", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215147" | |
} | |
}, | |
"models_bert": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 527, | |
"skipped": 211, | |
"time_spent": "0:01:47, 0:01:50, ", | |
"failures": {}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215196", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215175" | |
} | |
}, | |
"models_clip": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 660, | |
"skipped": 934, | |
"time_spent": "0:02:15, 0:02:11, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215674", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215699" | |
} | |
}, | |
"models_detr": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 177, | |
"skipped": 271, | |
"time_spent": "0:01:07, 0:01:11, ", | |
"failures": {}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216030", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216008" | |
} | |
}, | |
"models_gemma3": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 1 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 507, | |
"skipped": 320, | |
"time_spent": "0:09:30, 0:09:28, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3Vision2TextModelTest::test_model_parallelism", | |
"trace": "(line 925) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0!" | |
} | |
] | |
}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216642", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216593" | |
} | |
}, | |
"models_gemma3n": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 288, | |
"skipped": 703, | |
"time_spent": "0:02:15, 0:02:15, ", | |
"failures": { | |
"single": [ | |
{ | |
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_sdpa_padding_matches_padding_free_with_position_ids", | |
"trace": "(line 4243) AssertionError: Tensor-likes are not close!" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216605", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216660" | |
} | |
}, | |
"models_got_ocr2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 257, | |
"skipped": 333, | |
"time_spent": "0:01:49, 0:01:49, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216911", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216742" | |
} | |
}, | |
"models_gpt2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 487, | |
"skipped": 229, | |
"time_spent": "0:02:11, 0:02:01, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216717", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216759" | |
} | |
}, | |
"models_internvl": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 1 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 355, | |
"skipped": 241, | |
"time_spent": "0:04:33, 0:04:31, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_flex_attention_with_grads", | |
"trace": "(line 439) torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfResources: out of resource: shared memory, Required: 106496, Hardware limit: 101376. Reducing block sizes or `num_stages` may help." | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_flex_attention_with_grads", | |
"trace": "(line 439) torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfResources: out of resource: shared memory, Required: 106496, Hardware limit: 101376. Reducing block sizes or `num_stages` may help." | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301217017", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301217056" | |
} | |
}, | |
"models_llama": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 481, | |
"skipped": 253, | |
"time_spent": "0:03:43, 0:03:37, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301217239", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301217242" | |
} | |
}, | |
"models_llava": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 349, | |
"skipped": 159, | |
"time_spent": "0:08:59, 0:09:11, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301217250", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301217263" | |
} | |
}, | |
"models_mistral3": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 283, | |
"skipped": 267, | |
"time_spent": "0:09:53, 0:09:40, ", | |
"failures": {}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215108", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215124" | |
} | |
}, | |
"models_modernbert": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 174, | |
"skipped": 218, | |
"time_spent": "0:01:27, 0:01:24, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215158", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215123" | |
} | |
}, | |
"models_qwen2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 443, | |
"skipped": 251, | |
"time_spent": "0:02:16, 0:02:16, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215909", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215891" | |
} | |
}, | |
"models_qwen2_5_omni": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 1 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 278, | |
"skipped": 159, | |
"time_spent": "0:02:55, 0:03:00, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_model_parallelism", | |
"trace": "(line 675) AssertionError: Items in the second set but not the first:" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215907", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215896" | |
} | |
}, | |
"models_qwen2_5_vl": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 1 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 309, | |
"skipped": 141, | |
"time_spent": "0:03:13, 0:03:14, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_different_resolutions", | |
"trace": "(line 675) AssertionError: Lists differ: ['sys[314 chars]ion\\n addCriterion\\n\\n addCriterion\\n\\n addCri[75 chars]n\\n'] != ['sys[314 chars]ion\\nThe dog in the picture appears to be a La[81 chars] is']" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_different_resolutions", | |
"trace": "(line 675) AssertionError: Lists differ: ['sys[314 chars]ion\\n addCriterion\\n\\n addCriterion\\n\\n addCri[75 chars]n\\n'] != ['sys[314 chars]ion\\nThe dog in the picture appears to be a La[81 chars] is']" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215945", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301215911" | |
} | |
}, | |
"models_smolvlm": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 497, | |
"skipped": 269, | |
"time_spent": "0:01:33, 0:01:36, ", | |
"failures": {}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216282", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216321" | |
} | |
}, | |
"models_t5": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 1, | |
"multi": 2 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 592, | |
"skipped": 535, | |
"time_spent": "0:03:13, 0:02:52, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 131) TypeError: EncoderDecoderCache.__init__() missing 1 required positional argument: 'cross_attention_cache'" | |
}, | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_export_t5_summarization", | |
"trace": "(line 687) AttributeError: 'dict' object has no attribute 'batch_size'" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_export_t5_summarization", | |
"trace": "(line 687) AttributeError: 'dict' object has no attribute 'batch_size'" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216565", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216464" | |
} | |
}, | |
"models_vit": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 217, | |
"skipped": 199, | |
"time_spent": "2.03, 1.28, ", | |
"failures": {}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216869", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216833" | |
} | |
}, | |
"models_wav2vec2": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 4, | |
"multi": 4 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 672, | |
"skipped": 438, | |
"time_spent": "0:03:37, 0:03:36, ", | |
"failures": { | |
"multi": [ | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_inference_mms_1b_all", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm_invalid_pool", | |
"trace": "(line 675) AssertionError: Traceback (most recent call last):" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm_pool", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
} | |
], | |
"single": [ | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_inference_mms_1b_all", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm_invalid_pool", | |
"trace": "(line 675) AssertionError: Traceback (most recent call last):" | |
}, | |
{ | |
"line": "tests/models/wav2vec2/test_modeling_wav2vec2.py::Wav2Vec2ModelIntegrationTest::test_wav2vec2_with_lm_pool", | |
"trace": "(line 989) RuntimeError: Dataset scripts are no longer supported, but found common_voice_11_0.py" | |
} | |
] | |
}, | |
"job_link": { | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216956", | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216929" | |
} | |
}, | |
"models_whisper": { | |
"failed": { | |
"PyTorch": { | |
"unclassified": 0, | |
"single": 5, | |
"multi": 6 | |
}, | |
"TensorFlow": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Flax": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Tokenizers": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Pipelines": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Trainer": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"ONNX": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Auto": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Quantization": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
}, | |
"Unclassified": { | |
"unclassified": 0, | |
"single": 0, | |
"multi": 0 | |
} | |
}, | |
"errors": 0, | |
"success": 1014, | |
"skipped": 475, | |
"time_spent": "0:11:09, 0:11:47, ", | |
"failures": { | |
"single": [ | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_batched_generation_multilingual", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_longform_timestamps_generation", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_longform_timestamps_generation", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard", | |
"trace": "(line 675) AssertionError: Lists differ: [\" Fo[272 chars]ting of classics, Sicilian, nade door variatio[8147 chars]le!'] != [\" Fo[272 chars]ting a classic Sicilian, nade door variation o[8150 chars]le!']" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_shortform_single_batch_prev_cond", | |
"trace": "(line 675) AssertionError: Lists differ: [\" Fo[268 chars]ating, so soft, it would make JD power and her[196 chars]ke.\"] != [\" Fo[268 chars]ating so soft, it would make JD power and her [195 chars]ke.\"]" | |
} | |
], | |
"multi": [ | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelTest::test_multi_gpu_data_parallel_forward", | |
"trace": "(line 131) TypeError: EncoderDecoderCache.__init__() missing 1 required positional argument: 'cross_attention_cache'" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_batched_generation_multilingual", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_longform_timestamps_generation", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_longform_timestamps_generation", | |
"trace": "(line 756) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard", | |
"trace": "(line 675) AssertionError: Lists differ: [\" Fo[272 chars]ting of classics, Sicilian, nade door variatio[8147 chars]le!'] != [\" Fo[272 chars]ting a classic Sicilian, nade door variation o[8150 chars]le!']" | |
}, | |
{ | |
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_shortform_single_batch_prev_cond", | |
"trace": "(line 675) AssertionError: Lists differ: [\" Fo[268 chars]ating, so soft, it would make JD power and her[196 chars]ke.\"] != [\" Fo[268 chars]ating so soft, it would make JD power and her [195 chars]ke.\"]" | |
} | |
] | |
}, | |
"job_link": { | |
"single": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301216943", | |
"multi": "https://github.com/huggingface/transformers/actions/runs/16712955100/job/47301217012" | |
} | |
} | |
} |