tcid / sample_amd.json
ror's picture
ror HF Staff
stable-dash (#2)
b52e342 verified
{
"models_auto": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 80,
"skipped": 2,
"time_spent": "0.99, 2.41, ",
"failures": {},
"job_link": {
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329937",
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330183"
}
},
"models_bert": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 239,
"skipped": 111,
"time_spent": "8.85, 0:01:00, ",
"failures": {},
"job_link": {
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329946",
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330199"
}
},
"models_clip": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 288,
"skipped": 590,
"time_spent": "0:01:55, 0:01:58, ",
"failures": {},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330217",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329991"
}
},
"models_detr": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 77,
"skipped": 159,
"time_spent": "4.40, 6.77, ",
"failures": {},
"job_link": {
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330035",
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330267"
}
},
"models_gemma3": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 6,
"multi": 7
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 349,
"skipped": 260,
"time_spent": "0:11:14, 0:11:08, ",
"failures": {
"single": [
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_1b_text_only",
"trace": "(line 715) AssertionError: Lists differ: ['Wri[57 chars]s, a silent stream,\\nInto the neural net, a wa[42 chars],\\n'] != ['Wri[57 chars]s, a river deep,\\nWith patterns hidden, secret[46 chars]ing']"
},
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch",
"trace": "(line 715) AssertionError: Lists differ: ['use[114 chars]rown cow standing on a sandy beach with clear [264 chars]cow\"] != ['use[114 chars]rown and white cow standing on a sandy beach n[272 chars]ach']"
},
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch_crops",
"trace": "(line 715) AssertionError: Lists differ: [\"user\\nYou are a helpful assistant.\\n\\nHe[678 chars]h a'] != ['user\\nYou are a helpful assistant.\\n\\nHe[658 chars]h a']"
},
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_bf16",
"trace": "(line 715) AssertionError: Lists differ: ['use[114 chars]rown cow standing on a sandy beach with clear [55 chars]ike'] != ['use[114 chars]rown and white cow standing on a sandy beach w[68 chars]oks']"
},
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_crops",
"trace": "(line 715) AssertionError: Lists differ: [\"use[251 chars]. There's a blue sky with some white clouds in the background\"] != [\"use[251 chars]. There's a bright blue sky with some white clouds in the\"]"
},
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_multiimage",
"trace": "(line 715) AssertionError: Lists differ: [\"use[122 chars]n\\n**Main Features:**\\n\\n* **Chinese Archway[19 chars]ent\"] != [\"use[122 chars]n\\n**Overall Scene:**\\n\\nIt looks like a stree[18 chars]nt,\"]"
}
],
"multi": [
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3Vision2TextModelTest::test_model_parallelism",
"trace": "(line 925) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0!"
},
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_1b_text_only",
"trace": "(line 715) AssertionError: Lists differ: ['Wri[57 chars]s, a silent stream,\\nInto the neural net, a wa[42 chars],\\n'] != ['Wri[57 chars]s, a river deep,\\nWith patterns hidden, secret[46 chars]ing']"
},
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch",
"trace": "(line 715) AssertionError: Lists differ: ['use[114 chars]rown cow standing on a sandy beach with clear [264 chars]cow\"] != ['use[114 chars]rown and white cow standing on a sandy beach n[272 chars]ach']"
},
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch_crops",
"trace": "(line 715) AssertionError: Lists differ: [\"user\\nYou are a helpful assistant.\\n\\nHe[678 chars]h a'] != ['user\\nYou are a helpful assistant.\\n\\nHe[658 chars]h a']"
},
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_bf16",
"trace": "(line 715) AssertionError: Lists differ: ['use[114 chars]rown cow standing on a sandy beach with clear [55 chars]ike'] != ['use[114 chars]rown and white cow standing on a sandy beach w[68 chars]oks']"
},
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_crops",
"trace": "(line 715) AssertionError: Lists differ: [\"use[251 chars]. There's a blue sky with some white clouds in the background\"] != [\"use[251 chars]. There's a bright blue sky with some white clouds in the\"]"
},
{
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_multiimage",
"trace": "(line 715) AssertionError: Lists differ: [\"use[122 chars]n\\n**Main Features:**\\n\\n* **Chinese Archway[19 chars]ent\"] != [\"use[122 chars]n\\n**Overall Scene:**\\n\\nIt looks like a stree[18 chars]nt,\"]"
}
]
},
"job_link": {
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330061",
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330319"
}
},
"models_gemma3n": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 197,
"skipped": 635,
"time_spent": "0:01:06, 0:01:08, ",
"failures": {},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330294",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330077"
}
},
"models_got_ocr2": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 147,
"skipped": 163,
"time_spent": "0:01:03, 0:01:01, ",
"failures": {},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330314",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330094"
}
},
"models_gpt2": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 249,
"skipped": 99,
"time_spent": "0:02:01, 0:01:46, ",
"failures": {},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330311",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330113"
}
},
"models_internvl": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 1,
"multi": 1
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 253,
"skipped": 107,
"time_spent": "0:01:50, 0:02:00, ",
"failures": {
"multi": [
{
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLLlamaIntegrationTest::test_llama_small_model_integration_forward",
"trace": "(line 727) AssertionError: False is not true : Actual logits: tensor([ -9.8750, -0.4885, 1.4668, -10.3359, -10.3359], dtype=torch.float16)"
}
],
"single": [
{
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLLlamaIntegrationTest::test_llama_small_model_integration_forward",
"trace": "(line 727) AssertionError: False is not true : Actual logits: tensor([ -9.8750, -0.4885, 1.4668, -10.3359, -10.3359], dtype=torch.float16)"
}
]
},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330361",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330105"
}
},
"models_llama": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 1,
"multi": 1
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 235,
"skipped": 101,
"time_spent": "0:03:15, 0:02:51, ",
"failures": {
"multi": [
{
"line": "tests/models/llama/test_modeling_llama.py::LlamaIntegrationTest::test_model_7b_logits_bf16",
"trace": "(line 727) AssertionError: False is not true"
}
],
"single": [
{
"line": "tests/models/llama/test_modeling_llama.py::LlamaIntegrationTest::test_model_7b_logits_bf16",
"trace": "(line 727) AssertionError: False is not true"
}
]
},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330531",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330138"
}
},
"models_llava": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 1,
"multi": 1
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 206,
"skipped": 124,
"time_spent": "0:03:58, 0:04:34, ",
"failures": {
"multi": [
{
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_batched_generation",
"trace": "(line 399) importlib.metadata.PackageNotFoundError: No package metadata was found for bitsandbytes"
}
],
"single": [
{
"line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_batched_generation",
"trace": "(line 399) importlib.metadata.PackageNotFoundError: No package metadata was found for bitsandbytes"
}
]
},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330406",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330161"
}
},
"models_mistral3": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 1,
"multi": 1
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 199,
"skipped": 105,
"time_spent": "0:04:34, 0:04:39, ",
"failures": {
"single": [
{
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_generate",
"trace": "(line 715) AssertionError: 'The [14 chars] two cats lying on a pink surface, which appea[21 chars] bed' != 'The [14 chars] two tabby cats lying on a pink surface, which[23 chars]n or'"
}
],
"multi": [
{
"line": "tests/models/mistral3/test_modeling_mistral3.py::Mistral3IntegrationTest::test_mistral3_integration_generate",
"trace": "(line 715) AssertionError: 'The [14 chars] two cats lying on a pink surface, which appea[21 chars] bed' != 'The [14 chars] two tabby cats lying on a pink surface, which[23 chars]n or'"
}
]
},
"job_link": {
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330418",
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329678"
}
},
"models_modernbert": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 142,
"skipped": 102,
"time_spent": "0:01:03, 9.02, ",
"failures": {},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329712",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330429"
}
},
"models_qwen2": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 1,
"multi": 1
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 217,
"skipped": 113,
"time_spent": "0:01:08, 0:01:05, ",
"failures": {
"multi": [
{
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2IntegrationTest::test_export_static_cache",
"trace": "(line 715) AssertionError: Lists differ: ['My [35 chars], organic, gluten free, vegan, and vegetarian. I love to use'] != ['My [35 chars], organic, gluten free, vegan, and free from preservatives. I']"
}
],
"single": [
{
"line": "tests/models/qwen2/test_modeling_qwen2.py::Qwen2IntegrationTest::test_export_static_cache",
"trace": "(line 715) AssertionError: Lists differ: ['My [35 chars], organic, gluten free, vegan, and vegetarian. I love to use'] != ['My [35 chars], organic, gluten free, vegan, and free from preservatives. I']"
}
]
},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329761",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330508"
}
},
"models_qwen2_5_omni": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 2,
"multi": 2
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 167,
"skipped": 141,
"time_spent": "0:02:23, 0:01:53, ",
"failures": {
"multi": [
{
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_model_parallelism",
"trace": "(line 715) AssertionError: Items in the second set but not the first:"
},
{
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch",
"trace": "(line 715) AssertionError: Lists differ: [\"sys[293 chars]s shattering, and the dog appears to be a Labrador Retriever.\"] != [\"sys[293 chars]s shattering, and the dog is a Labrador Retriever.\"]"
}
],
"single": [
{
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test",
"trace": "(line 700) requests.exceptions.ConnectionError: HTTPSConnectionPool(host='qianwen-res.oss-accelerate-overseas.aliyuncs.com', port=443): Max retries exceeded with url: /Qwen2-VL/demo_small.jpg (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7cb8c91d02f0>: Failed to establish a new connection: [Errno -2] Name or service not known'))"
},
{
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch",
"trace": "(line 715) AssertionError: Lists differ: [\"sys[109 chars]d is a glass shattering, and the dog is a Labr[187 chars]er.\"] != [\"sys[109 chars]d is glass shattering, and the dog is a Labrad[185 chars]er.\"]"
}
]
},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329806",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330503"
}
},
"models_qwen2_5_vl": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 1,
"multi": 1
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 205,
"skipped": 113,
"time_spent": "0:02:32, 0:02:29, ",
"failures": {
"multi": [
{
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_different_resolutions",
"trace": "(line 715) AssertionError: Lists differ: ['sys[314 chars]ion\\n addCriterion\\n\\n addCriterion\\n\\n addCri[75 chars]n\\n'] != ['sys[314 chars]ion\\nThe dog in the picture appears to be a La[81 chars] is']"
}
],
"single": [
{
"line": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_different_resolutions",
"trace": "(line 715) AssertionError: Lists differ: ['sys[314 chars]ion\\n addCriterion\\n\\n addCriterion\\n\\n addCri[75 chars]n\\n'] != ['sys[314 chars]ion\\nThe dog in the picture appears to be a La[81 chars] is']"
}
]
},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329760",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330498"
}
},
"models_smolvlm": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 323,
"skipped": 231,
"time_spent": "0:01:08, 0:01:13, ",
"failures": {},
"job_link": {
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330553",
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329835"
}
},
"models_t5": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 2,
"multi": 3
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 254,
"skipped": 325,
"time_spent": "0:01:50, 0:01:40, ",
"failures": {
"multi": [
{
"line": "tests/models/t5/test_modeling_t5.py::T5ModelTest::test_multi_gpu_data_parallel_forward",
"trace": "(line 131) TypeError: EncoderDecoderCache.__init__() missing 1 required positional argument: 'cross_attention_cache'"
},
{
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_export_t5_summarization",
"trace": "(line 687) AttributeError: 'dict' object has no attribute 'batch_size'"
},
{
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_small_integration_test",
"trace": "(line 727) AssertionError: False is not true"
}
],
"single": [
{
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_export_t5_summarization",
"trace": "(line 687) AttributeError: 'dict' object has no attribute 'batch_size'"
},
{
"line": "tests/models/t5/test_modeling_t5.py::T5ModelIntegrationTests::test_small_integration_test",
"trace": "(line 727) AssertionError: False is not true"
}
]
},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329815",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330559"
}
},
"models_vit": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 135,
"skipped": 93,
"time_spent": "9.85, 7.74, ",
"failures": {},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329875",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330596"
}
},
"models_wav2vec2": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 292,
"skipped": 246,
"time_spent": "0:01:56, 0:01:54, ",
"failures": {},
"job_link": {
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329877",
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330632"
}
},
"models_whisper": {
"failed": {
"PyTorch": {
"unclassified": 0,
"single": 40,
"multi": 42
},
"TensorFlow": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Flax": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Tokenizers": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Pipelines": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Trainer": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"ONNX": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Auto": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Quantization": {
"unclassified": 0,
"single": 0,
"multi": 0
},
"Unclassified": {
"unclassified": 0,
"single": 0,
"multi": 0
}
},
"errors": 0,
"success": 537,
"skipped": 337,
"time_spent": "0:03:23, 0:03:02, ",
"failures": {
"single": [
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_distil_token_timestamp_generation",
"trace": "(line 2938) Failed: (subprocess)"
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_generate_with_forced_decoder_ids",
"trace": "(line 2938) Failed: (subprocess)"
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_generate_with_prompt_ids",
"trace": "(line 2938) Failed: (subprocess)"
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_generate_with_prompt_ids_task_language",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_language_detection",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_batched_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_batched_generation_multilingual",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_generation_multilingual",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_logits_librispeech",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_timestamp_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_en_logits_librispeech",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_longform_timestamps_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_token_timestamp_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_speculative_decoding_distil",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_speculative_decoding_non_distil",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_en_batched_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_en_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_logits_librispeech",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_longform_timestamps_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_specaugment_librispeech",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_static_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_static_generation_long_form",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_timestamp_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_token_timestamp_batch_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_token_timestamp_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_token_timestamp_generation_longform",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_empty_longform",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard_prev_cond",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_prev_cond",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_no_speech_detection",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_prompt_ids",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_single_batch",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_single_batch_beam",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_single_batch_prev_cond",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_shortform_multi_batch_hard_prev_cond",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_shortform_single_batch_prev_cond",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
}
],
"multi": [
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelTest::test_multi_gpu_data_parallel_forward",
"trace": "(line 2938) Failed: (subprocess)"
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_distil_token_timestamp_generation",
"trace": "(line 2938) Failed: (subprocess)"
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_generate_with_forced_decoder_ids",
"trace": "(line 2938) Failed: (subprocess)"
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_generate_with_prompt_ids",
"trace": "(line 131) TypeError: EncoderDecoderCache.__init__() missing 1 required positional argument: 'cross_attention_cache'"
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_generate_with_prompt_ids_task_language",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_language_detection",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_batched_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_batched_generation_multilingual",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_generation_multilingual",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_logits_librispeech",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_large_timestamp_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_en_logits_librispeech",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_longform_timestamps_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_small_token_timestamp_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_speculative_decoding_distil",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_speculative_decoding_non_distil",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_en_batched_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_en_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_logits_librispeech",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_longform_timestamps_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_specaugment_librispeech",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_static_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_static_generation_long_form",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_timestamp_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_token_timestamp_batch_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_token_timestamp_generation",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_tiny_token_timestamp_generation_longform",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_empty_longform",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_empty_longform_multi_gpu",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_hard_prev_cond",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_multi_batch_prev_cond",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_no_speech_detection",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_prompt_ids",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_single_batch",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_single_batch_beam",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_longform_single_batch_prev_cond",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_shortform_multi_batch_hard_prev_cond",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
},
{
"line": "tests/models/whisper/test_modeling_whisper.py::WhisperModelIntegrationTests::test_whisper_shortform_single_batch_prev_cond",
"trace": "(line 172) ImportError: To support decoding audio data, please install 'torchcodec'."
}
]
},
"job_link": {
"single": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301330636",
"multi": "https://github.com/huggingface/transformers/actions/runs/16712966867/job/47301329883"
}
}
}