|
import torch |
|
import pytest |
|
|
|
from megablocks.ops.binned_gather import BinnedGatherOp |
|
|
|
binned_gather_triton = BinnedGatherOp.apply |
|
|
|
def set_seeds(seed=0): |
|
torch.manual_seed(seed) |
|
if torch.cuda.is_available(): |
|
torch.cuda.manual_seed_all(seed) |
|
|
|
|
|
def make_stress_expert_capacity_tests(): |
|
tests = [] |
|
|
|
for seq_len, hidden_size, num_experts, top_k in [ |
|
(4, 2, 2, 1), |
|
(4, 2, 2, 2), |
|
(4, 2, 2, 4), |
|
]: |
|
for expert_capacity in [1, 2, 4]: |
|
tests.append((seq_len, hidden_size, num_experts, top_k, expert_capacity)) |
|
|
|
for seq_len, hidden_size, num_experts, top_k in [ |
|
(1024, 1536, 4, 1), |
|
(1024, 1536, 4, 2), |
|
(1024, 1536, 4, 4), |
|
(1024, 1536, 64, 1), |
|
(1024, 1536, 64, 2), |
|
(1024, 1536, 64, 4), |
|
(1024, 1536, 128, 1), |
|
(1024, 1536, 128, 2), |
|
(1024, 1536, 128, 4), |
|
]: |
|
for expert_capacity in [1, 2, 4, 128, 1024]: |
|
tests.append((seq_len, hidden_size, num_experts, top_k, expert_capacity)) |
|
|
|
|
|
for seq_len, hidden_size, num_experts, top_k in [ |
|
(4096, 768, 32, 4), |
|
]: |
|
for expert_capacity in [65535, 70000, 90000]: |
|
tests.append((seq_len, hidden_size, num_experts, top_k, expert_capacity)) |
|
|
|
return tuple(tests) |
|
|
|
BINNED_GATHER_TESTS = make_stress_expert_capacity_tests() |
|
|
|
@pytest.mark.parametrize(('seq_len', 'hidden_size', 'num_experts', 'top_k', 'expert_capacity'), BINNED_GATHER_TESTS) |
|
def test_binned_gather(seq_len: int, hidden_size: int, num_experts: int, top_k: int, expert_capacity: int): |
|
|
|
set_seeds(42) |
|
|
|
x = torch.arange(seq_len * hidden_size, device='cuda', dtype=torch.half).view(seq_len, hidden_size) |
|
x.requires_grad_(True) |
|
|
|
|
|
top_expert = torch.randint(0, num_experts, (seq_len * top_k,), device='cuda', dtype=torch.int) |
|
_, indices = torch.sort(top_expert) |
|
bins = torch.cumsum(torch.bincount(top_expert, minlength=num_experts), 0).to(torch.int32) |
|
|
|
|
|
|
|
|
|
|
|
def binned_gather_pytorch( |
|
x: torch.Tensor, |
|
indices: torch.Tensor, |
|
bins: torch.Tensor, |
|
expert_capacity: int, |
|
top_k: int, |
|
): |
|
start = 0 |
|
out = torch.zeros((num_experts, expert_capacity, hidden_size), dtype=x.dtype, device=x.device) |
|
for i in range(num_experts): |
|
end = bins[i] |
|
num_tokens = min(expert_capacity, end - start) |
|
if num_tokens > 0: |
|
|
|
|
|
idx = indices[start : start + num_tokens] // top_k |
|
out[i, :num_tokens, :] = x[idx, :] |
|
start = end |
|
return out |
|
|
|
out = binned_gather_triton(x, indices, bins, expert_capacity, top_k) |
|
expected_out = binned_gather_pytorch(x, indices, bins, expert_capacity, top_k) |
|
assert torch.all(torch.eq(out, expected_out)) |
|
|
|
|
|
grad_output = torch.arange(out.numel(), device=out.device, dtype=out.dtype).view_as(out) |
|
out.backward(grad_output) |
|
|
|
|
|
assert x.grad is not None, "Gradients should be computed for input x" |
|
assert x.grad.shape == x.shape, f"Gradient shape {x.grad.shape} should match input shape {x.shape}" |
|
|
|
|
|
def binned_scatter_pytorch( |
|
x: torch.Tensor, |
|
indices: torch.Tensor, |
|
weights: torch.Tensor, |
|
bins: torch.Tensor, |
|
top_k: int, |
|
): |
|
|
|
|
|
|
|
|
|
|
|
out = torch.zeros((seq_len, hidden_size), device=x.device, dtype=x.dtype) |
|
start = 0 |
|
for i in range(num_experts): |
|
end = bins[i] |
|
num_tokens = min(expert_capacity, end - start) |
|
for j in range(num_tokens): |
|
index = indices[start + j] |
|
scale = weights[index] if weights is not None else 1.0 |
|
token_pos = index // top_k |
|
|
|
out[token_pos, :] += scale * x[i, j, :] |
|
start = end |
|
return out |
|
|
|
expected_grad = binned_scatter_pytorch(grad_output, indices, None, bins, top_k) |
|
print(f"x.grad: {x.grad}") |
|
print(f"expected_grad: {expected_grad}") |
|
|
|
|
|
if torch.allclose(x.grad, expected_grad, rtol=1e-3, atol=1e-3): |
|
print("β
Success: Gradients match!") |
|
else: |
|
print("β Gradients don't match") |
|
|
|
print("Checking if values match when sorted...") |
|
grad_sorted = torch.sort(x.grad.flatten())[0] |
|
expected_sorted = torch.sort(expected_grad.flatten())[0] |
|
if torch.allclose(grad_sorted, expected_sorted, rtol=1e-3, atol=1e-3): |
|
print("β
Same values, different order - routing issue!") |
|
else: |
|
print("β Different values entirely") |
|
|
|
print(f"\nTriton Output Shape: {x.grad.shape}") |
|
print(f"PyTorch Output Shape: {expected_grad.shape}") |