python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch.distributed.pipeline.sync.copy import Copy, Wait
from torch.distributed.pipeline.sync.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def _test_copy_wait(prev_stream, next_stream, cuda_sleep=None):
device = get_device(prev_stream)
with use_stream(prev_stream):
if is_cuda(prev_stream):
cuda_sleep(0.5)
x = torch.ones(100, device=device, requires_grad=True)
(y,) = Copy.apply(prev_stream, next_stream, x)
(y,) = Wait.apply(prev_stream, next_stream, x)
with use_stream(next_stream):
assert torch.allclose(y.sum(), torch.tensor(100.0, device=device))
y.norm().backward()
with use_stream(prev_stream):
assert torch.allclose(x.grad.sum(), torch.tensor(10.0, device=device))
def test_copy_wait_cpu_cpu():
prev_stream = CPUStream
next_stream = CPUStream
_test_copy_wait(prev_stream, next_stream)
@skip_if_no_cuda
def test_copy_wait_cpu_cuda(cuda_sleep):
prev_stream = CPUStream
next_stream = current_stream(torch.device("cuda"))
_test_copy_wait(prev_stream, next_stream, cuda_sleep)
@skip_if_no_cuda
def test_copy_wait_cuda_cpu(cuda_sleep):
prev_stream = current_stream(torch.device("cuda"))
next_stream = CPUStream
_test_copy_wait(prev_stream, next_stream, cuda_sleep)
@skip_if_no_cuda
def test_copy_wait_cuda_cuda(cuda_sleep):
prev_stream = current_stream(torch.device("cuda"))
next_stream = new_stream(torch.device("cuda"))
_test_copy_wait(prev_stream, next_stream, cuda_sleep)
def test_wait_multiple_tensors():
a = torch.rand(1, requires_grad=True)
b = torch.rand(1, requires_grad=True)
a, b = Wait.apply(CPUStream, CPUStream, a, b)
assert a.grad_fn is b.grad_fn
assert a.grad_fn.__class__ is Wait._backward_cls
|
pytorch-master
|
test/distributed/pipeline/sync/test_copy.py
|
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# tests/__init__.py makes pytest can import the application without custom sys.path or PYTHONPATH.
# See also: https://docs.pytest.org/en/latest/goodpractices.html
|
pytorch-master
|
test/distributed/pipeline/sync/__init__.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import weakref
import pytest
import torch
from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_fork_join():
logs = []
class Log(torch.autograd.Function):
@staticmethod
def forward(ctx, number, tensor):
ctx.number = number
return tensor.detach()
@staticmethod
def backward(ctx, grad):
logs.append(ctx.number)
return None, grad
a = torch.rand(1, device="cpu", requires_grad=True)
b = torch.rand(1, device="cuda", requires_grad=True)
a = Log.apply(1, a)
a, phony = fork(a)
b = join(a, phony)
b = Log.apply(2, b)
b = b.to("cpu")
(a + b).backward()
assert logs == [2, 1]
def test_fork_join_enable_grad():
x = torch.rand(1, requires_grad=True)
with torch.enable_grad():
x2, p = fork(x)
assert p.requires_grad
assert x2 is not x
x = x2
assert x.requires_grad
assert p.requires_grad
assert x.grad_fn.__class__ is Fork._backward_cls
assert p.grad_fn.__class__ is Fork._backward_cls
with torch.enable_grad():
x2 = join(x, p)
assert x2 is not x
x = x2
assert x.requires_grad
assert x.grad_fn.__class__ is Join._backward_cls
def test_fork_join_no_grad(monkeypatch):
def do_not_apply(*args):
raise AssertionError("Function.apply called")
monkeypatch.setattr("torch.autograd.Function.apply", do_not_apply)
x = torch.rand(1, requires_grad=True)
with torch.no_grad():
x2, p = fork(x)
assert not p.requires_grad
assert x2 is x
x = x2
with torch.no_grad():
x2 = join(x, p)
assert x2 is x
x = x2
def test_fork_leak():
leak = None
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad):
nonlocal leak
leak = weakref.ref(ctx)
return grad
x = torch.rand(1, requires_grad=True)
x = F.apply(x)
x, phony = fork(x)
x = join(x, phony)
x.backward()
del x, phony
assert leak() is None
def test_join_when_fork_not_requires_grad():
x = torch.rand(2, 1)
a, b = x.chunk(2)
assert not a.requires_grad
a, p = fork(a)
assert not a.requires_grad
assert not p.requires_grad
assert not b.requires_grad
b = join(b, p)
assert not b.requires_grad
def test_join_when_fork_requires_grad():
x = torch.rand(2, 1)
a, b = x.chunk(2)
a.requires_grad_()
assert a.requires_grad
a, p = fork(a)
assert a.requires_grad
assert p.requires_grad
assert not b.requires_grad
b = join(b, p)
assert b.requires_grad
|
pytorch-master
|
test/distributed/pipeline/sync/test_dependency.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import torch.cuda
from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
def test_batch_atomic():
x = torch.tensor(42)
b = Batch(x)
assert b.atomic
assert b.tensor is x
with pytest.raises(AttributeError):
b.tensors
assert list(b) == [x]
assert len(b) == 1
assert b[0] is x
def test_batch_non_atomic():
x, y = torch.tensor(42), torch.tensor(21)
b = Batch((x, y))
assert not b.atomic
with pytest.raises(AttributeError):
b.tensor
assert list(b) == [x, y]
assert len(b) == 2
assert b[0] is x
assert b[1] is y
def test_batch_call():
a = Batch(torch.tensor(42))
b = Batch((torch.tensor(42), torch.tensor(21)))
def f(x):
return x
def g(x, y):
return x, y
assert a.call(f).atomic
assert not b.call(g).atomic
def test_batch_setitem_by_index():
a = Batch(torch.tensor(42))
b = Batch((torch.tensor(42), torch.tensor(21)))
a[0] = torch.tensor(0)
b[0] = torch.tensor(0)
assert a.atomic
assert a[0].item() == 0
assert not b.atomic
assert len(b) == 2
assert b[0].item() == 0
assert b[1].item() == 21
def test_batch_setitem_by_slice():
a = Batch(torch.tensor(42))
b = Batch((torch.tensor(42), torch.tensor(21)))
a[:] = (torch.tensor(0),)
b[:] = (torch.tensor(0),)
assert a.atomic
assert a[0].item() == 0
assert not b.atomic
assert len(b) == 1
assert b[0].item() == 0
def test_check():
check(torch.device("cpu"), torch.tensor(42))
check(torch.device("cpu"), torch.tensor(4), torch.tensor(2))
with pytest.raises(TypeError):
check(torch.device("cpu"), 42)
with pytest.raises(TypeError):
check(torch.device("cpu"), "str")
with pytest.raises(TypeError):
check(torch.device("cpu"), (torch.tensor(4), 2))
def test_gather_tensors():
a = torch.zeros(1, 1)
b = torch.zeros(1, 1)
ab = gather([Batch(a), Batch(b)])
assert ab.size() == (2, 1)
def test_gather_tuples():
a = (torch.zeros(1, 1), torch.zeros(2, 2))
b = (torch.zeros(1, 1), torch.zeros(2, 2))
ab = gather([Batch(a), Batch(b)])
assert isinstance(ab, tuple)
assert ab[0].size() == (2, 1)
assert ab[1].size() == (4, 2)
def test_scatter_tensor():
ab = torch.zeros(2, 1)
a, b = scatter(ab, chunks=2)
assert a.tensor.size() == (1, 1)
assert b.tensor.size() == (1, 1)
def test_scatter_multiple_tensors():
ab = (torch.zeros(2, 1), torch.zeros(4, 2))
a, b = scatter(*ab, chunks=2)
assert list(a)[0].size() == (1, 1)
assert list(b)[0].size() == (1, 1)
assert list(a)[1].size() == (2, 2)
assert list(b)[1].size() == (2, 2)
|
pytorch-master
|
test/distributed/pipeline/sync/test_microbatch.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
from itertools import chain
import pytest
import torch
from torch import nn, optim
from torch.distributed.pipeline.sync.batchnorm import DeferredBatchNorm
CHUNKS = 4
def tilt_dist(input):
# Tilt variance by channel.
rgb = input.transpose(0, 1)
rgb[0] *= 1
rgb[1] *= 10
rgb[2] *= 100
# Tilt mean by single batch.
for i, single in enumerate(input):
single += 2 ** i
return input
def chunked_forward(model, input, chunks=CHUNKS):
output_chunks = []
for chunk in input.chunk(chunks):
output_chunks.append(model(chunk))
return torch.cat(output_chunks)
@pytest.mark.parametrize("chunks", [1, 4])
@pytest.mark.parametrize("input_requires_grad", [True, False])
def test_transparency(chunks, input_requires_grad):
bn = nn.BatchNorm2d(3)
dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=chunks)
input1 = torch.rand(16, 3, 224, 224)
input1 = tilt_dist(input1)
input2 = input1.clone()
input1.requires_grad = input_requires_grad
input2.requires_grad = input_requires_grad
output1 = chunked_forward(bn, input1, chunks=chunks)
output2 = chunked_forward(dbn, input2, chunks=chunks)
assert torch.allclose(output1, output2, atol=1e-4)
output1.mean().backward()
output2.mean().backward()
assert torch.allclose(bn.weight.grad, dbn.weight.grad, atol=1e-4)
if input_requires_grad:
assert input1.grad is not None
assert input2.grad is not None
assert torch.allclose(input1.grad, input2.grad, atol=1e-4)
@pytest.mark.parametrize("momentum", [0.1, None])
def test_running_stats(momentum):
bn = nn.BatchNorm2d(3, momentum=momentum)
dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS)
input = torch.rand(16, 3, 224, 224)
input = tilt_dist(input)
bn(input)
chunked_forward(dbn, input)
assert torch.allclose(bn.running_mean, dbn.running_mean, atol=1e-4)
assert torch.allclose(bn.running_var, dbn.running_var, atol=1e-4)
def test_convert_deferred_batch_norm():
bn = nn.BatchNorm2d(3, track_running_stats=False)
bn = DeferredBatchNorm.convert_deferred_batch_norm(bn, chunks=CHUNKS)
assert type(bn) is nn.BatchNorm2d # because of track_running_stats=False
dbn = DeferredBatchNorm(3, chunks=CHUNKS)
dbn_again = DeferredBatchNorm.convert_deferred_batch_norm(dbn, chunks=CHUNKS)
assert dbn is dbn_again
dbn_again = DeferredBatchNorm.convert_deferred_batch_norm(dbn, chunks=CHUNKS + 1)
assert dbn is not dbn_again # because of different chunks
def test_eval():
bn = nn.BatchNorm2d(3)
dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS)
input = torch.rand(16, 3, 224, 224)
input = tilt_dist(input)
bn(input)
chunked_forward(dbn, input)
bn.eval()
dbn.eval()
assert torch.allclose(bn(input), dbn(input), atol=1e-4)
def test_optimize():
bn = nn.BatchNorm2d(3)
dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS)
opt = optim.SGD(chain(bn.parameters(), dbn.parameters()), lr=1.0)
for i in range(5):
input = torch.rand(16, 3, 224, 224)
input = tilt_dist(input)
# train
y = bn(input)
a = y.sum()
a.backward()
y = chunked_forward(dbn, input)
b = y.sum()
b.backward()
opt.step()
# eval
bn.eval()
dbn.eval()
with torch.no_grad():
assert torch.allclose(bn(input), dbn(input), atol=1e-1 * (10 ** i))
def test_conv_bn():
bn = nn.Sequential(nn.Conv2d(3, 3, 1), nn.BatchNorm2d(3))
dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS)
input = torch.rand(16, 3, 224, 224)
input = tilt_dist(input)
opt = optim.SGD(chain(bn.parameters(), dbn.parameters()), lr=0.1)
# 1st step
a = bn(input)
b = chunked_forward(dbn, input)
# Outputs are different. (per-mini-batch vs. per-micro-batch)
assert not torch.allclose(a, b)
a.sum().backward()
b.sum().backward()
opt.step()
opt.zero_grad()
# Conv layers are also trained differently because of their different outputs.
assert not torch.allclose(bn[0].weight, dbn[0].weight)
# But BNs track identical running stats.
assert torch.allclose(bn[1].running_mean, dbn[1].running_mean, atol=1e-4)
assert torch.allclose(bn[1].running_var, dbn[1].running_var, atol=1e3)
# 2nd step
a = bn(input)
b = chunked_forward(dbn, input)
a.sum().backward()
b.sum().backward()
# BNs can't track identical running stats due to the different conv layers.
assert not torch.allclose(bn[1].running_mean, dbn[1].running_mean, atol=1e-4)
assert not torch.allclose(bn[1].running_var, dbn[1].running_var, atol=1e3)
def test_input_requiring_grad():
dbn = DeferredBatchNorm(3, chunks=CHUNKS)
input = torch.rand(16, 3, 224, 224)
input = tilt_dist(input)
input.requires_grad = True
chunked_forward(dbn, input)
assert not dbn.sum.requires_grad
assert dbn.sum.grad_fn is None
|
pytorch-master
|
test/distributed/pipeline/sync/test_deferred_batch_norm.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import threading
import pytest
import torch
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.stream import CPUStream
from torch.distributed.pipeline.sync.worker import Task, spawn_workers
class fake_device:
"""A test double for :class:`torch.device`. Every fake device is different
with each other.
"""
type = "fake"
index = None
def test_compute_multithreading():
"""Task.compute should be executed on multiple threads."""
thread_ids = set()
def log_thread_id():
thread_id = threading.current_thread().ident
thread_ids.add(thread_id)
return Batch(())
with spawn_workers([fake_device() for _ in range(2)]) as (in_queues, out_queues):
for i in range(2):
t = Task(CPUStream, compute=log_thread_id, finalize=None)
in_queues[i].put(t)
for i in range(2):
out_queues[i].get()
assert len(thread_ids) == 2
def test_compute_success():
"""Task.compute returns (True, (task, batch)) on success."""
def _42():
return Batch(torch.tensor(42))
with spawn_workers([torch.device("cpu")]) as (in_queues, out_queues):
t = Task(CPUStream, compute=_42, finalize=None)
in_queues[0].put(t)
ok, (task, batch) = out_queues[0].get()
assert ok
assert task is t
assert isinstance(batch, Batch)
assert batch[0].item() == 42
def test_compute_exception():
"""Task.compute returns (False, exc_info) on failure."""
def zero_div():
0 / 0
with spawn_workers([torch.device("cpu")]) as (in_queues, out_queues):
t = Task(CPUStream, compute=zero_div, finalize=None)
in_queues[0].put(t)
ok, exc_info = out_queues[0].get()
assert not ok
assert isinstance(exc_info, tuple)
assert issubclass(exc_info[0], ZeroDivisionError)
@pytest.mark.parametrize("grad_mode", [True, False])
def test_grad_mode(grad_mode):
def detect_grad_enabled():
x = torch.rand(1, requires_grad=torch.is_grad_enabled())
return Batch(x)
with torch.set_grad_enabled(grad_mode):
with spawn_workers([torch.device("cpu")]) as (in_queues, out_queues):
task = Task(CPUStream, compute=detect_grad_enabled, finalize=None)
in_queues[0].put(task)
ok, (_, batch) = out_queues[0].get()
assert ok
assert batch[0].requires_grad == grad_mode
def test_worker_per_device():
cpu = torch.device("cpu")
cpu0 = torch.device("cpu", index=0)
fake1 = fake_device()
fake2 = fake_device()
with spawn_workers([cpu, cpu, cpu0, fake1, fake2]) as (in_queues, out_queues):
assert len(in_queues) == len(out_queues) == 5
# 0: cpu, 1: cpu, 2: cpu0
assert in_queues[0] is in_queues[1] is in_queues[2]
assert out_queues[0] is out_queues[1] is out_queues[2]
# 3: fake1, 4: fake2
assert in_queues[3] is not in_queues[4]
assert out_queues[3] is not out_queues[4]
|
pytorch-master
|
test/distributed/pipeline/sync/test_worker.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.distributed.pipeline.sync.phony import get_phony
def test_phony_size():
p = get_phony(torch.device("cpu"), requires_grad=False)
assert p.size() == (0,)
def test_phony_requires_grad():
p1 = get_phony(torch.device("cpu"), requires_grad=True)
p2 = get_phony(torch.device("cpu"), requires_grad=False)
assert p1.requires_grad
assert not p2.requires_grad
def test_cached_phony():
p1 = get_phony(torch.device("cpu"), requires_grad=True)
p2 = get_phony(torch.device("cpu"), requires_grad=True)
assert p1 is p2
p3 = get_phony(torch.device("cpu"), requires_grad=False)
p4 = get_phony(torch.device("cpu"), requires_grad=False)
assert p3 is p4
assert p1 is not p3
def test_phony_in_autograd_function():
class Phonify(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
phony = get_phony(input.device, requires_grad=False)
return phony.detach()
x = torch.rand(1, requires_grad=True)
p1 = Phonify.apply(x)
p2 = get_phony(torch.device("cpu"), requires_grad=True)
assert p1 is not p2
assert p1.grad_fn is not None
assert p2.grad_fn is None
|
pytorch-master
|
test/distributed/pipeline/sync/test_phony.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
import torch.nn.functional as F
from torch.distributed.pipeline.sync import Pipe
def test_python_autograd_function(setup_rpc):
# A Python autograd function might fail with this error:
#
# RuntimeError: Returning Variables sharing storage with other Variables
# that require grad is not supported in Python functions. Please submit a
# feature request if you hit this error.
#
# It doesn't look like an essential restriction. But it happens on the
# current PyTorch version. To avoid it, we should detach the tensor before
# returning by identity autograd functions, such as Wait, Fork, and Join.
#
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad):
return grad
class M(nn.Module):
def forward(self, input):
return Identity.apply(input)
model = nn.Sequential(M(), M())
model = Pipe(model, checkpoint="always")
x = torch.rand(42)
y = model(x)
assert torch.allclose(x, y.local_value())
def test_exception_no_hang(setup_rpc):
# In v0.0.2, once a failed partition receives a normal message
# (non-closing) for the next micro-batch, a hang occured. The reason was
# that a failed partition didn't call in_queue.task_done() on a normal
# message. So the former partition was blocked at out_queue.join() for the
# next of next micro-batch.
class ExpectedException(Exception):
pass
class Pass(nn.Module):
def forward(self, x):
return x
class Raise(nn.Module):
def forward(self, x):
raise ExpectedException()
model = nn.Sequential(Pass(), Pass(), Raise())
model = Pipe(model, chunks=3)
with pytest.raises(ExpectedException):
model(torch.rand(3))
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="2 cuda devices required")
def test_tuple_wait(cuda_sleep, setup_rpc):
# In v0.0.3, Wait is applied to only the first tensor on a micro-batch.
# Under this behavior, if checkpointing was disabled, there's a possibility
# that gradient accumulations on other tensors are not synchronized
# properly to the copy stream.
class Sleep(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.detach()
@staticmethod
def backward(ctx, grad):
with torch.cuda.device(grad.device):
cuda_sleep(0.05)
return grad
class Layer1(nn.Module):
def __init__(self):
super().__init__()
self.ones = nn.Parameter(torch.ones(32, 3, 32, 32, requires_grad=True))
def forward(self, a, b):
a = a * self.ones
return a * 1, b * 2, b * 3
class Layer2(nn.Module):
def __init__(self):
super().__init__()
self.ones = nn.Parameter(torch.ones(32, 3, 32, 32, requires_grad=True))
def forward(self, a, b, c):
a = a * self.ones
b = Sleep.apply(b)
return a + b + c
model = nn.Sequential(Layer1().cuda(0), Layer2().cuda(1))
model = Pipe(model, chunks=32, checkpoint="never")
a = torch.rand(1024, 3, 32, 32, device=0, requires_grad=True)
b = torch.rand(1024, 3, 32, 32, device=0, requires_grad=True)
y = model(a, b)
y.local_value().norm().backward()
torch.cuda.synchronize(0)
torch.cuda.synchronize(1)
assert torch.isclose(b.grad.norm().cpu(), torch.tensor(5.000))
def test_parallel_randoms(setup_rpc):
class Dropouts(nn.Module):
def forward(self, x):
for _ in range(100):
x = F.dropout(x, p=0.001)
return x
model = nn.Sequential(Dropouts(), Dropouts())
x = torch.rand(10, 10, requires_grad=True)
model = Pipe(model, chunks=10, checkpoint="always")
y = model(x)
y = y.local_value()
y.norm().backward()
assert y.to(torch.bool).tolist() == x.grad.to(torch.bool).tolist()
|
pytorch-master
|
test/distributed/pipeline/sync/test_bugs.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import time
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
devices = ["cpu"]
if torch.cuda.is_available():
devices.append("cuda")
def test_blockpartition():
assert blockpartition.solve([1, 2, 3, 4, 5, 6], partitions=2) == [[1, 2, 3, 4], [5, 6]]
def test_blockpartition_zeros():
assert blockpartition.solve([0, 0], partitions=2) == [[0], [0]]
def test_blockpartition_non_positive_partitions():
with pytest.raises(ValueError):
blockpartition.solve([42], partitions=0)
with pytest.raises(ValueError):
blockpartition.solve([42], partitions=-1)
def test_blockpartition_short_sequence():
with pytest.raises(ValueError):
blockpartition.solve([], partitions=1)
with pytest.raises(ValueError):
blockpartition.solve([42], partitions=2)
@pytest.mark.parametrize("device", devices)
@pytest.mark.skip(reason="Flaky due to time.sleep()")
def test_balance_by_time(device):
class Delay(nn.Module):
def __init__(self, seconds):
super().__init__()
self.seconds = seconds
def forward(self, x):
time.sleep(self.seconds)
return x
model = nn.Sequential(*[Delay(i / 10) for i in [1, 2, 3, 4, 5, 6]])
sample = torch.rand(1)
balance = balance_by_time(2, model, sample, device=device)
assert balance == [4, 2]
def test_balance_by_time_loop_resets_input():
# nn.Flatten was introduced at PyTorch 1.2.0.
class Flatten(nn.Module):
def forward(self, x):
return x.flatten(1)
model = nn.Sequential(nn.Conv2d(3, 2, 1), Flatten(), nn.Linear(128, 10))
sample = torch.rand(10, 3, 8, 8)
balance = balance_by_time(2, model, sample, device="cpu")
assert balance == [1, 2]
@skip_if_no_cuda
def test_balance_by_size_latent():
class Expand(nn.Module):
def __init__(self, times):
super().__init__()
self.times = times
def forward(self, x):
for i in range(self.times):
x = x + torch.rand_like(x, requires_grad=True)
return x
sample = torch.rand(10, 100, 100)
model = nn.Sequential(*[Expand(i) for i in [1, 2, 3, 4, 5, 6]])
balance = balance_by_size(2, model, sample)
assert balance == [4, 2]
model = nn.Sequential(*[Expand(i) for i in [6, 5, 4, 3, 2, 1]])
balance = balance_by_size(2, model, sample)
assert balance == [2, 4]
@skip_if_no_cuda
def test_balance_by_size_param():
model = nn.Sequential(*[nn.Linear(i + 1, i + 2) for i in range(6)])
sample = torch.rand(7, 1)
balance = balance_by_size(2, model, sample, param_scale=100)
assert balance == [4, 2]
model = nn.Sequential(*[nn.Linear(i + 2, i + 1) for i in reversed(range(6))])
sample = torch.rand(1, 7)
balance = balance_by_size(2, model, sample, param_scale=100)
assert balance == [2, 4]
@skip_if_no_cuda
def test_balance_by_size_param_scale():
class Tradeoff(nn.Module):
def __init__(self, param_size, latent_size):
super().__init__()
self.fc = nn.Linear(param_size, param_size)
self.latent_size = latent_size
def forward(self, x):
for i in range(self.latent_size):
x = x + torch.rand_like(x, requires_grad=True)
return x
model = nn.Sequential(
Tradeoff(param_size=1, latent_size=6),
Tradeoff(param_size=2, latent_size=5),
Tradeoff(param_size=3, latent_size=4),
Tradeoff(param_size=4, latent_size=3),
Tradeoff(param_size=5, latent_size=2),
Tradeoff(param_size=6, latent_size=1),
)
sample = torch.rand(1, requires_grad=True)
balance = balance_by_size(2, model, sample, param_scale=0)
assert balance == [2, 4]
balance = balance_by_size(2, model, sample, param_scale=100)
assert balance == [4, 2]
@pytest.mark.parametrize("device", devices)
def test_layerwise_sandbox(device):
model = nn.Sequential(nn.Conv2d(3, 3, 1), nn.BatchNorm2d(3))
model.eval()
for layer in layerwise_sandbox(model, torch.device(device)):
assert layer.training
assert all(p.device.type == device for p in layer.parameters())
assert all(not l.training for l in model)
assert all(p.device.type == "cpu" for p in model.parameters())
@pytest.mark.parametrize("device", devices)
def test_sandbox_during_profiling(device):
model = nn.Sequential(nn.BatchNorm2d(3))
before = {k: v.clone() for k, v in model.state_dict().items()}
sample = torch.rand(1, 3, 10, 10)
balance_by_time(1, model, sample, device=device)
after = model.state_dict()
assert before.keys() == after.keys()
for key, value in before.items():
assert torch.allclose(after[key], value), key
def test_not_training():
class AssertTraining(nn.Module):
def forward(self, x):
assert self.training
return x
model = nn.Sequential(AssertTraining())
model.eval()
assert not model.training
sample = torch.rand(1)
balance_by_time(1, model, sample, device="cpu")
assert not model.training
def test_balance_by_time_tuple():
class Twin(nn.Module):
def forward(self, x):
return x, x.detach()
class Add(nn.Module):
def forward(self, a, b):
return a + b
model = nn.Sequential(Twin(), Add())
sample = torch.rand(1, requires_grad=True)
balance_by_time(1, model, sample, device="cpu")
@skip_if_no_cuda
def test_balance_by_size_tuple():
class Twin(nn.Module):
def forward(self, x):
return x, x.detach()
class Add(nn.Module):
def forward(self, a, b):
return a + b
model = nn.Sequential(Twin(), Add())
sample = torch.rand(1, requires_grad=True)
balance_by_size(1, model, sample)
def test_already_has_grad():
model = nn.Sequential(nn.Conv2d(3, 3, 1))
sample = torch.rand(1, 3, 32, 32)
model(sample).norm().backward()
with pytest.raises(ValueError, match="some parameter already has gradient"):
balance_by_time(1, model, sample, device="cpu")
|
pytorch-master
|
test/distributed/pipeline/sync/test_balance.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_pipe_without_rpc():
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(RuntimeError, match='Please initialize RPC framework'):
pipe = Pipe(model, chunks=1)
def test_parameters(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
pipe = Pipe(model, chunks=1)
assert list(pipe.parameters()) != []
def test_public_attrs(setup_rpc):
class MyString:
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
model = nn.Sequential(nn.Linear(1, 1))
pipe = Pipe(model, chunks=42.000, checkpoint=MyString("always"))
assert pipe.devices == [torch.device("cpu")]
assert pipe.chunks == 42
assert isinstance(pipe.chunks, int)
assert pipe.checkpoint == "always"
assert isinstance(pipe.checkpoint, str)
def test_sequential_like(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = Pipe(model)
assert len(model) == 2
assert list(model) == [a, b]
assert model[0] is a
assert model[1] is b
with pytest.raises(IndexError):
_ = model[2]
assert model[-1] is b
assert model[-2] is a
def test_chunks_less_than_1(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(ValueError):
Pipe(model, chunks=0)
with pytest.raises(ValueError):
Pipe(model, chunks=-1)
def test_batch_size_indivisible(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=4)
with pytest.warns(None) as record:
model(torch.rand(7, 1))
# Indivisible batch size is legal.
assert not record
def test_batch_size_small(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=4)
with pytest.warns(None) as record:
model(torch.rand(2, 1))
# Batch size smaller than chunks is legal.
assert not record
def test_checkpoint_mode(setup_rpc):
def count_grad_fn(grad_fn, name, visited=None):
if visited is None:
visited = set()
if grad_fn in visited:
return 0
visited.add(grad_fn)
if grad_fn is None:
return 0
if grad_fn.__class__.__name__ == name:
return 1
counter = 0
for next_grad_fn, _ in grad_fn.next_functions:
counter += count_grad_fn(next_grad_fn, name, visited=visited)
return counter
model = nn.Sequential(nn.Linear(1, 1))
input = torch.rand(2, 1)
always = Pipe(model, chunks=2, checkpoint="always")
except_last = Pipe(model, chunks=2, checkpoint="except_last")
never = Pipe(model, chunks=2, checkpoint="never")
always_output = always(input)
except_last_output = except_last(input)
never_output = never(input)
assert count_grad_fn(always_output.local_value().grad_fn, "CheckpointBackward") == 2
assert count_grad_fn(except_last_output.local_value().grad_fn, "CheckpointBackward") == 1
assert count_grad_fn(never_output.local_value().grad_fn, "CheckpointBackward") == 0
def test_checkpoint_mode_invalid(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(ValueError, match="checkpoint is not one of 'always', 'except_last', or 'never'"):
Pipe(model, chunks=2, checkpoint="INVALID_CHECKPOINT")
def test_checkpoint_mode_when_chunks_1(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
# All checkpoint modes are fine.
Pipe(model, chunks=1, checkpoint="except_last")
Pipe(model, chunks=1, checkpoint="always")
Pipe(model, chunks=1, checkpoint="never")
def test_checkpoint_eval(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=2)
input = torch.rand(2, 1)
def find_grad_fn(grad_fn, name):
if grad_fn is None:
return False
if grad_fn.__class__.__name__ == name:
return True
for next_grad_fn, _ in grad_fn.next_functions:
if find_grad_fn(next_grad_fn, name):
return True
return False
model.train()
train_output = model(input)
assert find_grad_fn(train_output.local_value().grad_fn, "CheckpointBackward")
assert find_grad_fn(train_output.local_value().grad_fn, "RecomputeBackward")
model.eval()
eval_output = model(input)
assert not find_grad_fn(eval_output.local_value().grad_fn, "CheckpointBackward")
assert not find_grad_fn(eval_output.local_value().grad_fn, "RecomputeBackward")
def test_checkpoint_non_float_input(setup_rpc):
class ForkNonFloat(nn.Module):
def forward(self, input):
return (input * 2, torch.tensor([False]))
class JoinNonFloat(nn.Module):
def forward(self, input, non_float):
return input * 2
model = nn.Sequential(ForkNonFloat(), JoinNonFloat())
model = Pipe(model, chunks=1, checkpoint="always")
input = torch.rand(1, requires_grad=True)
output = model(input)
output.backward()
def test_no_grad(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=2)
input = torch.rand(2, 1)
latent = None
def hook(module, input, output):
_ = module
_ = input
nonlocal latent
latent = output
partition = model.partitions[0]
partition.register_forward_hook(hook)
with torch.no_grad():
model(input)
assert latent.grad_fn is None
def test_exception(setup_rpc):
class ExpectedException(Exception):
pass
class Raise(nn.Module):
def forward(self, *_):
raise ExpectedException()
model = nn.Sequential(Raise())
model = Pipe(model, chunks=1)
with pytest.raises(ExpectedException):
model(torch.rand(1))
def test_exception_early_stop_asap(setup_rpc):
"""Even the first partitions have finished to process, the partition before
the failed partition should be killed as soon as possible.
"""
class ExpectedException(Exception):
pass
class Pass(nn.Module):
def forward(self, x):
return x
counter = 0
class Counter(nn.Module):
def forward(self, x):
time.sleep(0.1)
nonlocal counter
counter += 1
return x
class Raise(nn.Module):
def forward(self, x):
raise ExpectedException()
model = nn.Sequential(Pass(), Pass(), Counter(), Raise())
model = Pipe(model, chunks=3)
with pytest.raises(ExpectedException):
model(torch.rand(3))
# If the early stop doesn't work, it would be 3 instead.
assert counter == 2
def test_nested_input(setup_rpc):
class NestedInput(nn.Module):
def __init__(self):
super().__init__()
self.fc_a = nn.Linear(1, 1)
self.fc_b = nn.Linear(1, 1)
def forward(self, inp):
return inp
model = nn.Sequential(NestedInput())
model = Pipe(model, chunks=2)
a = torch.rand(10, 1, requires_grad=True)
b = torch.rand(10, 1, requires_grad=True)
# TypeError: expected Tensor, but got tuple
with pytest.raises(TypeError):
model((a, (a, b))).local_value()
# TypeError: expected Tensor, but got list
with pytest.raises(TypeError):
model((a, [a, b])).local_value()
def test_input_pair(setup_rpc):
class Two(nn.Module):
def __init__(self):
super().__init__()
self.fc_a = nn.Linear(1, 1)
self.fc_b = nn.Linear(1, 1)
def forward(self, a, b):
return (self.fc_a(a), self.fc_b(b))
model = nn.Sequential(Two())
model = Pipe(model, chunks=2)
a = torch.rand(10, 1, requires_grad=True)
b = torch.rand(10, 1, requires_grad=True)
a_out, b_out = model(a, b).local_value()
loss = (a_out + b_out).mean()
loss.backward()
assert a.grad is not None
assert b.grad is not None
def test_multi_sequence_input(setup_rpc):
class MultiSeq(nn.Module):
def forward(self, tup1, tup2):
return tup1, tup2
model = Pipe(nn.Sequential(MultiSeq()))
with pytest.raises(TypeError):
model(
[torch.rand(10), torch.rand(10)],
[torch.rand(10), torch.rand(10)]
)
def test_input_singleton(setup_rpc):
class One(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(1, 1)
def forward(self, a):
return (self.fc(a),)
model = nn.Sequential(One())
model = Pipe(model, chunks=2)
a = torch.rand(10, 1, requires_grad=True)
(a_out,) = model(a).local_value()
loss = a_out.mean()
loss.backward()
assert all(p.grad is not None for p in model.parameters())
assert a.grad is not None
def test_input_varargs(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model)
a = torch.rand(1)
b = torch.rand(1)
# TypeError: forward() takes 2 positional arguments but 3 were given
with pytest.raises(TypeError):
model(a, b)
def test_non_tensor(setup_rpc):
class NonTensor(nn.Module):
def forward(self, _):
return "hello"
model = nn.Sequential(NonTensor())
model = Pipe(model)
x = torch.rand(1)
with pytest.raises(TypeError):
model(x)
with pytest.raises(TypeError):
model("hello")
def test_non_tensor_sequence(setup_rpc):
class NonTensorTuple(nn.Module):
def forward(self, x):
return (x, "hello")
class NonTensorArgs(nn.Module):
def forward(self, x: str, y: bool):
return x, y
model = nn.Sequential(NonTensorTuple())
model = Pipe(model)
x = torch.rand(1)
with pytest.raises(TypeError):
model((x, "hello"))
with pytest.raises(TypeError):
model([x, "hello"])
model = nn.Sequential(NonTensorArgs())
model = Pipe(model)
with pytest.raises(TypeError):
# Need atleast one Tensor.
model("hello", True)
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_valid_non_tensor(checkpoint, setup_rpc):
class NonTensor1(nn.Module):
def forward(self, a: int, b: Tensor, c: bool, d: Tensor):
res = b + a if c else b * a
if d is not None:
res += d
return res, c, a, b, "hello", d
class NonTensor2(nn.Module):
def forward(self, a: Tensor, b: bool, c: int, d: Tensor, e: str, f: Tensor):
res = a * c if b else a + c
res += d
return c, res, a, d + f if f is not None else d, b, e, f
model = Pipe(nn.Sequential(NonTensor1(), NonTensor2()), chunks=5, checkpoint=checkpoint)
a = random.randint(0, 10)
b = torch.rand(10, 10)
c = random.randint(0, 1) == 0
d = torch.rand(10, 10)
res = model(a, b, c, d).local_value()
assert 7 == len(res)
assert [a] * 5 == res[0]
if c:
assert torch.allclose(((b + a + d) * a) + b, res[1])
assert torch.allclose(b + a + d, res[2])
else:
assert torch.allclose(((b * a) + d + a) + b, res[1])
assert torch.allclose(b * a + d, res[2])
assert torch.allclose(b + d, res[3])
assert [c] * 5 == res[4]
assert ["hello"] * 5 == res[5]
assert torch.allclose(d, res[6])
# Test one of the tensors can be None
res = model(a, b, c, None).local_value()
assert 7 == len(res)
assert [a] * 5 == res[0]
if c:
assert torch.allclose(((b + a) * a) + b, res[1])
assert torch.allclose(b + a, res[2])
else:
assert torch.allclose(((b * a) + a) + b, res[1])
assert torch.allclose(b * a, res[2])
assert torch.allclose(b, res[3])
assert [c] * 5 == res[4]
assert ["hello"] * 5 == res[5]
assert [None] * 5 == res[6]
# Need atleast one tensor.
with pytest.raises(TypeError):
model(a, None, c, None)
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_no_tensor_output(checkpoint, setup_rpc):
class Model1(nn.Module):
def forward(self, a: int, b: Tensor, c: bool):
return a, c, "hello"
class Model2(nn.Module):
def forward(self, a: int, b: bool, c: str):
return a, c, b
model = Pipe(nn.Sequential(Model1(), Model2()), chunks=5)
a = random.randint(0, 10)
b = torch.rand(10, 10)
c = random.randint(0, 1) == 0
# Need atleast one tensor across partitions too.
with pytest.raises(TypeError):
res = model(a, b, c).local_value()
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_uneven_batch_size(checkpoint, setup_rpc):
class Model(nn.Module):
def forward(self, a: Tensor, b: int, c: Tensor):
return a, b, c
model = Pipe(nn.Sequential(Model()), checkpoint=checkpoint, chunks=5)
a = torch.rand(3, 10)
b = random.randint(0, 10)
c = torch.rand(6, 10)
res = model(a, b, c).local_value()
assert torch.allclose(a, res[0])
assert [b] * 3 == res[1] # 3 chunks
assert torch.allclose(c, res[2])
# Two tensors producing uneven chunks would fail.
model = Pipe(nn.Sequential(Model()), checkpoint=checkpoint, chunks=5)
a = torch.rand(3, 10)
b = random.randint(0, 10)
c = torch.rand(4, 10)
with pytest.raises(RuntimeError, match='Found different number of chunks'):
model(a, b, c)
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_no_chunk(checkpoint, setup_rpc):
class Model(nn.Module):
def forward(self, a: Tensor, b: int, c: Tensor):
return a, b, c
model = Pipe(nn.Sequential(Model()), checkpoint=checkpoint, chunks=5)
a = torch.rand(10, 10)
b = random.randint(0, 10)
c = torch.rand(10, 10)
res = model(a, b, NoChunk(c)).local_value()
assert torch.allclose(a, res[0])
assert [b] * 5 == res[1]
# c gets replicated due to NoChunk and the same tensor gets concatenated 5
# times in the output.
assert torch.allclose(torch.cat((c, c, c, c, c)), res[2])
# Test invalid type for NoChunk
with pytest.raises(TypeError, match='NoChunk only supported for tensors'):
NoChunk(b)
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_deferred_batch_norm(checkpoint, setup_rpc):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe = Pipe(
nn.Sequential(pipe_bn), chunks=2, checkpoint=checkpoint, deferred_batch_norm=True
)
x = torch.rand(4, 3, 10, 10)
pipe(x).local_value().mean().backward()
bn(x).mean().backward()
assert torch.allclose(pipe[0].running_mean, bn.running_mean, atol=1e-4)
assert torch.allclose(pipe[0].running_var, bn.running_var, atol=1e-4)
@pytest.mark.parametrize("checkpoint", ["never", "always"])
def test_deferred_batch_norm_params(checkpoint, setup_rpc):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe = Pipe(
nn.Sequential(pipe_bn), chunks=1, checkpoint=checkpoint, deferred_batch_norm=True
)
x = torch.rand(4, 3, 10, 10)
pipe(x).local_value().mean().backward()
bn(x).mean().backward()
assert pipe[0].weight.grad is not None
assert pipe[0].bias.grad is not None
assert torch.allclose(pipe[0].weight.grad, bn.weight.grad, atol=1e-4)
assert torch.allclose(pipe[0].bias.grad, bn.bias.grad, atol=1e-4)
def test_devices(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
c = nn.Linear(1, 1)
# There are extra two devices.
model = nn.Sequential(a, b, c)
model = Pipe(model)
cpu = torch.device("cpu")
# Extra devices must be discarded.
assert model.devices == [cpu, cpu, cpu]
def test_partitions(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = Pipe(model)
assert isinstance(model.partitions, nn.ModuleList)
assert isinstance(model.partitions[0], nn.Sequential)
assert isinstance(model.partitions[1], nn.Sequential)
assert "partitions.0.0.weight" in model.state_dict()
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_merged_partitions(setup_rpc):
a = nn.Linear(1, 1).to(0)
b = nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 2)).to(0)
c = nn.Linear(1, 1)
d = nn.Linear(1, 2)
model = nn.Sequential(a, b, c, d)
model = Pipe(model)
assert isinstance(model.partitions, nn.ModuleList)
assert isinstance(model.partitions[0], PipeSequential)
assert isinstance(model.partitions[1], PipeSequential)
assert list(model.partitions[0]) == [a, b[0], b[1]]
assert list(model.partitions[1]) == [c]
assert list(model.partitions[2]) == [d]
def test_deny_moving(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = Pipe(model)
# Moving is denied.
with pytest.raises(TypeError):
model.cuda()
with pytest.raises(TypeError):
model.cpu()
with pytest.raises(TypeError):
model.to(torch.device("cuda"))
with pytest.raises(TypeError):
model.to(0)
with pytest.raises(TypeError):
model.to("cuda")
with pytest.raises(TypeError):
model.to(device=0)
with pytest.raises(TypeError):
model.to(torch.rand(1))
with pytest.raises(TypeError):
model.to(tensor=torch.rand(1))
# Casting is allowed.
model.half()
model.to(torch.double)
model.to(dtype=torch.float)
def test_empty_module(setup_rpc):
# Empty sequential module is not illegal.
model = nn.Sequential()
model = Pipe(model)
assert model(torch.tensor(42)).local_value() == torch.tensor(42)
# But only tensor or tensors is legal in Pipe.
with pytest.raises(TypeError):
model(42)
def test_named_children(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(OrderedDict([("a", a), ("b", b)]))
model = Pipe(model)
names = set(n for n, _ in model.named_modules())
assert "partitions.0.0" in names
assert "partitions.1.0" in names
# Pipe doesn't support __getattr__. Unlike nn.Sequential, Pipe requires
# several methods in its namespace.
with pytest.raises(AttributeError):
model.a
def test_verify_module_non_sequential(setup_rpc):
with pytest.raises(TypeError, match="module must be nn.Sequential to be partitioned"):
Pipe(nn.Module())
def test_verify_module_duplicate_children(setup_rpc):
conv = nn.Conv2d(3, 3, 1)
model = nn.Sequential(conv, conv)
with pytest.raises(ValueError, match="module with duplicate children is not supported"):
Pipe(model)
@skip_if_no_cuda
def test_verify_module_params_on_same_device(setup_rpc):
class Surrogate(nn.Module):
def __init__(self, param1, param2):
super().__init__()
self.param1 = param1
self.param2 = param2
conv1 = nn.Conv2d(3, 3, 1)
conv2 = nn.Conv2d(3, 3, 1)
model = nn.Sequential(Surrogate(conv1, conv2.cuda()))
with pytest.raises(
ValueError,
match=r'should have all parameters on a single device, please use .to\(\)'
' to place the module on a single device'):
Pipe(model)
@skip_if_no_cuda
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need atleast two GPUs")
def test_verify_nested_modules(setup_rpc):
model = nn.Sequential(
nn.Sequential(
nn.Linear(32, 16).cuda(0),
nn.Linear(16, 8).cuda(0)
),
nn.Sequential(
nn.Linear(8, 4).cuda(1),
nn.Linear(4, 2).cuda(1)
),
)
pipe = Pipe(model)
out = pipe(torch.rand(10, 32).cuda(0))
assert out.local_value().device == torch.device("cuda:1")
assert out.local_value().size() == torch.Size([10, 2])
def test_verify_module_duplicate_parameters_on_same_device(setup_rpc):
class Surrogate(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
conv = nn.Conv2d(3, 3, 1)
model = nn.Sequential(Surrogate(conv), Surrogate(conv))
Pipe(model)
def test_forward_lockstep(setup_rpc):
timeline = []
class DelayedLog(nn.Module):
def __init__(self, j, seconds):
super().__init__()
self.i = 0
self.j = j
self.seconds = seconds
def forward(self, x):
time.sleep(self.seconds)
timeline.append((self.i, self.j))
self.i += 1
return x
model = nn.Sequential(DelayedLog(0, seconds=0), DelayedLog(1, seconds=0.1))
model = Pipe(model, chunks=3)
model(torch.rand(3, 1))
# Expected timeline: (Logs are recorded at !)
#
# Partition #0: 0! 1! 2!
# Partition #1: 000! 111! 222!
#
assert timeline == [(0, 0), (1, 0), (0, 1), (2, 0), (1, 1), (2, 1)]
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
@skip_if_no_cuda
def test_multiple_inputs(checkpoint, setup_rpc):
class Module1(nn.Module):
def forward(self, a, b, c):
return a + b + c, a * b * c
class Module2(nn.Module):
def forward(self, a, b):
return a + b
model = Pipe(nn.Sequential(Module1().cuda(0), Module2().cuda(0)), chunks=2, checkpoint=checkpoint)
t = torch.rand(10)
res = model(t, t, t).local_value()
assert torch.equal(res, (t + t + t) + (t * t * t))
@skip_if_no_cuda
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need atleast two GPUs")
def test_inputs_wrong_device(setup_rpc):
class Module1(nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(5))
def forward(self, a, b):
return a + b + self.param, b
# Start inputs on wrong device and ensure Pipe moves them correctly.
a = torch.rand(10).cuda(1)
b = torch.rand(10).cuda(1)
model = Pipe(nn.Sequential(Module1().cuda(0), Module1().cuda(1)), chunks=2)
with pytest.raises(ValueError, match='All inputs should be on the same device as the first partition'):
model(a, b)
@skip_if_no_cuda
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need atleast two GPUs")
def test_with_device_wrapper(setup_rpc):
fc1 = nn.Linear(16, 8).cuda(0)
fc2 = nn.Linear(8, 4).cuda(1)
dropout = nn.Dropout()
model = nn.Sequential(fc1, fc2, WithDevice(dropout, 'cuda:1'))
model = Pipe(model, chunks=8)
assert torch.device('cuda:1') == model(torch.rand(16, 16).cuda(0)).local_value().device
assert [torch.device('cuda:0'), torch.device('cuda:1')] == model.devices
model = nn.Sequential(fc1, WithDevice(dropout, 'cuda:1'))
model = Pipe(model, chunks=8)
assert torch.device('cuda:1') == model(torch.rand(16, 16).cuda(0)).local_value().device
assert [torch.device('cuda:0'), torch.device('cuda:1')] == model.devices
model = nn.Sequential(fc1, WithDevice(fc2, 'cuda:0'))
model = Pipe(model, chunks=8)
assert torch.device('cuda:0') == model(torch.rand(16, 16).cuda(0)).local_value().device
assert [torch.device('cuda:0')] == model.devices
assert torch.device('cuda:0') == fc2.weight.device
|
pytorch-master
|
test/distributed/pipeline/sync/test_pipe.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from torch.distributed.pipeline.sync.pipeline import _clock_cycles
def test_clock_cycles():
assert list(_clock_cycles(1, 1)) == [[(0, 0)]]
assert list(_clock_cycles(1, 3)) == [[(0, 0)], [(0, 1)], [(0, 2)]]
assert list(_clock_cycles(3, 1)) == [[(0, 0)], [(1, 0)], [(2, 0)]]
assert list(_clock_cycles(3, 3)) == [
[(0, 0)],
[(1, 0), (0, 1)],
[(2, 0), (1, 1), (0, 2)],
[(2, 1), (1, 2)],
[(2, 2)],
]
assert list(_clock_cycles(4, 2)) == [
[(0, 0)],
[(1, 0), (0, 1)],
[(2, 0), (1, 1)],
[(3, 0), (2, 1)],
[(3, 1)],
]
|
pytorch-master
|
test/distributed/pipeline/sync/test_pipeline.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import pytest
import torch
from torch import nn
import torch.cuda
from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.microbatch import Batch
devices = ["cpu"]
if torch.cuda.is_available():
devices.append("cuda")
@pytest.mark.parametrize("device", devices)
def test_serial_checkpoints(device):
# Copied from https://github.com/pytorch/pytorch/pull/18568.
timeline = []
class Log(torch.autograd.Function):
@staticmethod
def forward(ctx, name, x):
ctx.name = name
timeline.append(f"{name}:forward")
return x.detach()
@staticmethod
def backward(ctx, grad_output):
name = ctx.name
timeline.append(f"{name}:backward")
return None, grad_output
a = torch.rand(1, device=device, requires_grad=True)
b = torch.rand(1, device=device, requires_grad=True)
# Increase the next function sequence number.
_ = a + 1 + 2 + 3 + 4 + 5
a = checkpoint(partial(Log.apply, "a"), a)
a, phony = fork(a)
b = join(b, phony)
b = checkpoint(partial(Log.apply, "b"), b)
c = torch.cat((a, b))
out = c.sum()
# +--> {a} --Checkpoint(Log)--> {a}
# {out} --Sum--> {c} --Cat ^-----------------------------+
# +--> {b} --Checkpoint(Log)--> {b} --First--> {b}
out.backward()
assert timeline == ["a:forward", "b:forward", "b:forward", "b:backward", "a:forward", "a:backward"]
# |----------------------| |-----------------------| |-----------------------|
# forward pass Checkpoint(Log[b]) Checkpoint(Log[a])
def test_not_requires_grad():
x = Batch(torch.rand(1, requires_grad=False))
assert not x[0].requires_grad
def f(x):
return x * 2
chk = Checkpointing(f, x)
x = chk.checkpoint()
assert x[0].requires_grad
chk.recompute(x)
assert x[0].requires_grad
x.tensor.backward()
def test_not_requires_grad_with_parameter():
x = torch.rand(1, requires_grad=False)
a = torch.rand(1, requires_grad=True)
def f(x):
return x * a
y = checkpoint(f, x)
y.backward()
assert a.grad is not None
@pytest.mark.parametrize("device", devices)
def test_random_in_checkpoint(device):
dropout = nn.Dropout(p=0.5)
torch.manual_seed(0)
x = torch.randn(3, 3, device=device, requires_grad=True)
y = dropout(x)
y.norm().backward()
torch.manual_seed(0)
chk_x = torch.randn(3, 3, device=device, requires_grad=True)
chk_y = checkpoint(dropout, chk_x)
chk_y.norm().backward()
assert torch.allclose(x.grad, chk_x.grad)
def test_detect_checkpointing_recomputing():
logs = []
class Detect(nn.Module):
def forward(self, input):
logs.append((is_checkpointing(), is_recomputing()))
return input
model = Detect()
input = torch.rand(1, requires_grad=True)
output = checkpoint(model, input)
output.backward()
assert logs == [(True, False), (False, True)]
def test_detect_checkpointing_recomputing_without_checkpoint():
logs = []
class Detect(nn.Module):
def forward(self, input):
logs.append((is_checkpointing(), is_recomputing()))
return input
model = Detect()
input = torch.rand(1, requires_grad=True)
output = model(input)
output.backward()
assert logs == [(False, False)]
def test_non_grad_output():
class ForkNonGrad(nn.Module):
def forward(self, input):
return (input * 2, torch.rand(1))
model = ForkNonGrad()
input = torch.rand(1, requires_grad=True)
output = checkpoint(model, input)
output[0].backward()
|
pytorch-master
|
test/distributed/pipeline/sync/test_checkpoint.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch.distributed.pipeline.sync.stream import (
CPUStream,
current_stream,
default_stream,
get_device,
is_cuda,
new_stream,
record_stream,
use_device,
use_stream,
wait_stream,
)
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestNewStream:
def test_new_stream_cpu(self):
stream = new_stream(torch.device("cpu"))
assert stream is CPUStream
@skip_if_no_cuda
def test_new_stream_cuda(self):
stream = new_stream(torch.device("cuda"))
assert isinstance(stream, torch.cuda.Stream)
assert stream != torch.cuda.default_stream()
class TestCurrentStream:
def test_current_stream_cpu(self):
stream = current_stream(torch.device("cpu"))
assert stream is CPUStream
@skip_if_no_cuda
def test_current_stream_cuda(self):
stream = current_stream(torch.device("cuda"))
assert isinstance(stream, torch.cuda.Stream)
assert stream == torch.cuda.current_stream()
class TestDefaultStream:
def test_default_stream_cpu(self):
stream = default_stream(torch.device("cpu"))
assert stream is CPUStream
@skip_if_no_cuda
def test_default_stream_cuda(self):
stream = default_stream(torch.device("cuda"))
assert isinstance(stream, torch.cuda.Stream)
assert stream == torch.cuda.default_stream()
class TestUseDevice:
def test_use_device_cpu(self):
with use_device(torch.device("cpu")):
pass
@skip_if_no_cuda
def test_use_device_cuda(self):
with use_device(torch.device("cuda")):
pass
class TestUseStream:
def test_use_stream_cpu(self):
with use_stream(CPUStream):
pass
@skip_if_no_cuda
def test_use_stream_cuda(self):
stream = new_stream(torch.device("cuda"))
with use_stream(stream):
assert current_stream(torch.device("cuda")) == stream
class TestGetDevice:
def test_get_device_cpu(self):
assert get_device(CPUStream).type == "cpu"
@skip_if_no_cuda
def test_get_device_cuda(self):
stream = current_stream(torch.device("cuda"))
assert get_device(stream).type == "cuda"
class TestWaitStream:
def _test_wait_stream(self, source, target, cuda_sleep=None):
with use_stream(target):
if is_cuda(target):
cuda_sleep(0.5)
x = torch.ones(100, 100, device=get_device(target))
wait_stream(source, target)
with use_stream(source):
assert x.sum().item() == 10000
def test_wait_stream_cpu_cpu(self):
source = CPUStream
target = CPUStream
self._test_wait_stream(source, target)
@skip_if_no_cuda
def test_wait_stream_cpu_cuda(self, cuda_sleep):
source = CPUStream
target = new_stream(torch.device("cuda"))
self._test_wait_stream(source, target, cuda_sleep)
@skip_if_no_cuda
def test_wait_stream_cuda_cpu(self, cuda_sleep):
source = new_stream(torch.device("cuda"))
target = CPUStream
self._test_wait_stream(source, target, cuda_sleep)
@skip_if_no_cuda
def test_wait_stream_cuda_cuda(self, cuda_sleep):
source = current_stream(torch.device("cuda"))
target = new_stream(torch.device("cuda"))
self._test_wait_stream(source, target, cuda_sleep)
class TestRecordStream:
def test_record_stream_cpu(self):
# It should silently ignore CPU tensors.
x = torch.rand(1, device=torch.device("cpu"))
record_stream(x, CPUStream)
@skip_if_no_cuda
def test_record_stream_cuda(self, cuda_sleep):
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = new_stream(torch.device("cuda"))
with torch.cuda.stream(stream_alloc):
x = torch.rand(1, device=torch.device("cuda"))
stream = new_stream(torch.device("cuda"))
record_stream(x, stream)
with use_stream(stream):
cuda_sleep(0.5)
# 'x' is deleted at Python's perspective. But the block of 'x' is still
# required for 'stream'. 'y' shouldn't be allocated to the block.
data_ptr = x.data_ptr()
del x
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
y = torch.rand(1, device=torch.device("cuda"))
assert y.data_ptr() != data_ptr
# Pause Python until 'stream' finishes tasks queued. Now the block of
# 'x' is free to be reallocated.
wait_stream(CPUStream, stream)
with torch.cuda.stream(stream_alloc):
z = torch.rand(1, device=torch.device("cuda"))
assert z.data_ptr() == data_ptr
@skip_if_no_cuda
def test_record_stream_shifted_view(self, cuda_sleep):
# Issue: https://github.com/pytorch/pytorch/issues/27366
stream_alloc = new_stream(torch.device("cuda"))
with torch.cuda.stream(stream_alloc):
x = torch.rand(2, device=torch.device("cuda"))
y = x[1:]
assert y.data_ptr() > x.data_ptr()
stream = new_stream(torch.device("cuda"))
with use_stream(stream):
cuda_sleep(0.5)
record_stream(y, stream)
data_ptr = x.data_ptr()
del x, y
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
z = torch.rand(2, device=torch.device("cuda"))
assert z.data_ptr() != data_ptr
|
pytorch-master
|
test/distributed/pipeline/sync/test_stream.py
|
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
|
pytorch-master
|
test/distributed/pipeline/sync/skip/__init__.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, use_skip_tracker
@pytest.fixture(autouse=True)
def skip_tracker():
skip_tracker = SkipTracker()
with use_skip_tracker(skip_tracker):
yield skip_tracker
def test_stash(skip_tracker):
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa: B901
l1 = Stash()
assert len(skip_tracker.tensors) == 0
with use_skip_tracker(skip_tracker):
l1(torch.tensor(42))
assert len(skip_tracker.tensors) == 1
def test_pop():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa: B901
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
l1 = Stash()
l2 = Pop()
output = l2(l1(torch.tensor(42)))
assert output.item() == 42
def test_declare_but_not_use():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
return input * 2
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
return input * 3
l1 = Stash()
l2 = Pop()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
with pytest.raises(RuntimeError):
l2(torch.tensor(42))
def test_stash_not_declared():
@skippable()
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa: B901
l1 = Stash()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
def test_pop_not_declared():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa: B901
@skippable()
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
l1 = Stash()
l2 = Pop()
latent = l1(torch.tensor(42))
with pytest.raises(RuntimeError):
l2(latent)
def test_pop_not_stashed():
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
yield pop("foo")
l1 = Pop()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
def test_stash_none():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", None)
return input * 2 # noqa: B901
l1 = Stash()
l1(torch.tensor(42))
|
pytorch-master
|
test/distributed/pipeline/sync/skip/test_stash_pop.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from queue import Queue
import threading
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync.checkpoint import enable_checkpointing, enable_recomputing
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.layout import SkipLayout
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, SkipTrackerThroughPotals, current_skip_tracker
def test_default_skip_tracker():
q = Queue()
def f():
q.put(current_skip_tracker())
t = threading.Thread(target=f)
t.start()
t.join()
skip_tracker = q.get()
assert type(skip_tracker) is SkipTracker
assert type(skip_tracker) is not SkipTrackerThroughPotals
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_default_skip_tracker_by_data_parallel():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa: B901
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
model = nn.Sequential(Stash(), Pop())
model = nn.DataParallel(model, device_ids=[0, 0], output_device=0)
input = torch.rand(10, device=0)
output = model(input)
assert torch.allclose(output, input)
def test_reuse_portal():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout)
batch = Batch(torch.tensor([1.0]))
a = torch.tensor([2.0])
b = torch.tensor([2.0])
skip_tracker.save(batch, None, "test", a)
portal = skip_tracker.portals[(None, "test")]
skip_tracker.save(batch, None, "test", b)
assert portal is skip_tracker.portals[(None, "test")]
def test_no_copy_no_portal():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "copy"): (0, 1), (None, "not_copy"): (0, 0)})
skip_tracker = SkipTrackerThroughPotals(skip_layout)
batch = Batch(torch.tensor([1.0]))
a = torch.tensor([2.0])
b = torch.tensor([2.0])
skip_tracker.save(batch, None, "copy", a)
skip_tracker.save(batch, None, "not_copy", b)
assert (None, "copy") in skip_tracker.portals
assert (None, "copy") not in skip_tracker.tensors
assert (None, "not_copy") in skip_tracker.tensors
assert (None, "not_copy") not in skip_tracker.portals
def test_tensor_life_without_checkpointing():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout)
batch = Batch(torch.tensor([1.0]))
tensor = torch.tensor([2.0])
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 1
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 0
def test_tensor_life_with_checkpointing():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout)
batch = Batch(torch.tensor([1.0]))
tensor = torch.tensor([2.0])
with enable_checkpointing():
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 2
with enable_checkpointing():
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 1
with enable_recomputing():
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 0
with enable_recomputing():
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 0
|
pytorch-master
|
test/distributed/pipeline/sync/skip/test_tracker.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from torch import nn
from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables
def test_matching():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
verify_skippables(nn.Sequential(Layer1(), Layer2()))
def test_stash_not_pop():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "no module declared 'foo' as poppable but stashed" in str(e.value)
def test_pop_unknown():
@skippable(pop=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "'0' declared 'foo' as poppable but it was not stashed" in str(e.value)
def test_stash_again():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(stash=["foo"])
class Layer2(nn.Module):
pass
@skippable(pop=["foo"])
class Layer3(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
assert "'1' redeclared 'foo' as stashable" in str(e.value)
def test_pop_again():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(pop=["foo"])
class Layer3(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
assert "'2' redeclared 'foo' as poppable" in str(e.value)
def test_stash_pop_together_different_names():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"], stash=["bar"])
class Layer2(nn.Module):
pass
@skippable(pop=["bar"])
class Layer3(nn.Module):
pass
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
def test_stash_pop_together_same_name():
@skippable(stash=["foo"], pop=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "'0' declared 'foo' both as stashable and as poppable" in str(e.value)
def test_double_stash_pop():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(stash=["foo"])
class Layer3(nn.Module):
pass
@skippable(pop=["foo"])
class Layer4(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3(), Layer4()))
assert "'2' redeclared 'foo' as stashable" in str(e.value)
assert "'3' redeclared 'foo' as poppable" in str(e.value)
def test_double_stash_pop_but_isolated():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(stash=["foo"])
class Layer3(nn.Module):
pass
@skippable(pop=["foo"])
class Layer4(nn.Module):
pass
ns1 = Namespace()
ns2 = Namespace()
verify_skippables(
nn.Sequential(Layer1().isolate(ns1), Layer2().isolate(ns1), Layer3().isolate(ns2), Layer4().isolate(ns2),)
)
|
pytorch-master
|
test/distributed/pipeline/sync/skip/test_verify_skippables.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.skip.portal import Portal
from torch.distributed.pipeline.sync.stream import default_stream
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_copy_returns_on_next_device():
portal = Portal(torch.rand(1), tensor_life=1)
prev_stream = default_stream(torch.device("cpu"))
next_stream = default_stream(torch.device("cuda"))
phony = torch.zeros(0, requires_grad=True)
assert phony.device.type == "cpu"
phony = portal.copy(prev_stream, next_stream, phony)
assert phony.device.type == "cuda"
def test_blue_orange():
tensor1 = torch.rand(1, requires_grad=True)
tensor2 = torch.rand(1, requires_grad=True)
# Same with: output = tensor1*2 + tensor2
#
# +----------------------+
# | |
# tensor2 -- PortalBlue -+ +- PortalOrange -+
# | | |
# tensor1 ------------ Join -- Fork --- Mul --- Add -- output
#
main = tensor1
portal = Portal(tensor2, tensor_life=2)
phony = portal.blue()
main = join(main, phony)
main, phony = fork(main)
sub = portal.orange(phony)
output = main * 2 + sub
output.backward()
assert torch.allclose(tensor1.grad, torch.tensor([2.0]))
assert torch.allclose(tensor2.grad, torch.tensor([1.0]))
def test_blue_orange_not_requires_grad():
tensor1 = torch.rand(1, requires_grad=True)
tensor2 = torch.rand(1)
# Same with: output = tensor1*2 + tensor2
#
# +----------------------+
# | |
# tensor2 -- PortalBlue -+ +- PortalOrange -+
# | | |
# tensor1 ------------ Join -- Fork --- Mul --- Add -- output
#
main = tensor1
portal = Portal(tensor2, tensor_life=2)
phony = portal.blue()
main = join(main, phony)
main, phony = fork(main)
sub = portal.orange(phony)
output = main * 2 + sub
output.backward()
assert torch.allclose(tensor1.grad, torch.tensor([2.0]))
assert tensor2.grad is None
def test_use_grad():
tensor = torch.rand(1, requires_grad=True)
portal = Portal(tensor, tensor_life=1)
portal.put_grad(tensor)
assert portal.use_grad() is tensor
# Gradient in a portal is ephemeral.
with pytest.raises(RuntimeError):
portal.use_grad()
class TestTensorLife:
@pytest.fixture
def new_portal(self):
portal = None
def new_portal(tensor_life):
nonlocal portal
tensor = torch.rand(1, requires_grad=True)
portal = Portal(tensor, tensor_life)
return portal, tensor
yield new_portal
# A test using this fixture must exhaust the tensor in the portal.
with pytest.raises(RuntimeError):
portal.check_tensor_life()
assert portal.tensor is None
def test_tensor_life_0(self, new_portal):
portal, tensor = new_portal(0)
assert portal.tensor is None
def test_tensor_life_1(self, new_portal):
portal, tensor = new_portal(1)
assert portal.tensor is tensor
portal.blue()
def test_tensor_life_2(self, new_portal):
portal, tensor = new_portal(2)
assert portal.tensor is tensor
phony = portal.blue()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
def test_tensor_life_3(self, new_portal):
portal, tensor = new_portal(3)
assert portal.tensor is tensor
phony = portal.blue()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
def test_tensor_life_4(self, new_portal):
portal, tensor = new_portal(4)
assert portal.tensor is tensor
phony = portal.blue()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
portal.blue()
def test_tensor_life_3_plus_1(self, new_portal):
portal, tensor = new_portal(3)
assert portal.tensor is tensor
phony = portal.blue()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
another_tensor = torch.rand(1, requires_grad=True)
portal.put_tensor(another_tensor, tensor_life=1)
portal.blue()
|
pytorch-master
|
test/distributed/pipeline/sync/skip/test_portal.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
@skippable(stash=["skip"])
class Stash(nn.Module):
def forward(self, input):
yield stash("skip", input)
return input # noqa: B901
@skippable(pop=["skip"])
class Pop(nn.Module):
def forward(self, input):
skip = yield pop("skip")
return input + skip
@pytest.mark.parametrize("train", [True, False], ids=["train", "eval"])
@pytest.mark.parametrize("checkpoint", ["always", "except_last", "never"])
def test_delete_portal_tensor(train, checkpoint, setup_rpc):
# Without checkpointing:
# +- Stash --+ +--- Pop ----+ - - - layers
# | 2,blue,1 |--| 1,orange,0 | - - - tensor_life and portal function
# +----------+ +------------+
#
# With checkpointing:
# +- Stash --+ +--- Pop ----+ +--- Pop'----+ +- Stash'--+
# | 3,blue,2 |--| 2,orange,1 |--| 1,orange,0 |--| 1,blue,0 |
# +----------+ +------------+ +------------+ +----------+
def portal_tensor_life_is(tensor_life, skip_tracker=None):
if skip_tracker is None:
skip_tracker = current_skip_tracker()
# Get the current portal.
portal = list(skip_tracker.portals.values())[0]
if tensor_life == 0:
return portal.tensor_life == 0 and portal.tensor is None
else:
return portal.tensor_life == tensor_life and portal.tensor is not None
# Check the portal tensor after 'Stash'.
stash_ = Stash()
@stash_.register_forward_hook
def check_portal_tensor_after_stash(*_):
if is_checkpointing():
assert portal_tensor_life_is(2)
elif is_recomputing():
assert portal_tensor_life_is(0)
else:
assert portal_tensor_life_is(1)
pop_ = Pop()
@pop_.register_forward_hook
def check_portal_tensor_after_pop(*_):
if is_checkpointing():
assert portal_tensor_life_is(1)
elif is_recomputing():
assert portal_tensor_life_is(0)
else:
assert portal_tensor_life_is(0)
class NoPortalTensorAtBackward(nn.Module):
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.skip_tracker = current_skip_tracker()
return input.detach()
@staticmethod
def backward(ctx, grad):
assert portal_tensor_life_is(0, skip_tracker=ctx.skip_tracker)
return grad
def forward(self, input):
return self.F.apply(input)
model = nn.Sequential(NoPortalTensorAtBackward(), stash_, pop_)
model = Pipe(model, chunks=2, checkpoint=checkpoint)
input = torch.rand(10, requires_grad=True)
if train:
model.train()
output = model(input).local_value()
output.norm().backward()
else:
model.eval()
with torch.no_grad():
model(input)
@pytest.mark.parametrize("train", [True, False], ids=["train", "eval"])
def test_no_portal_without_pipe(train, monkeypatch, setup_rpc):
def deny(*args, **kwargs):
raise AssertionError("tried to create Portal without Pipe")
monkeypatch.setattr("torch.distributed.pipeline.sync.skip.portal.Portal.__init__", deny)
model = nn.Sequential(Stash(), Pop())
input = torch.rand(10, requires_grad=True)
if train:
model.train()
output = model(input)
output.norm().backward()
else:
model.eval()
with torch.no_grad():
model(input)
|
pytorch-master
|
test/distributed/pipeline/sync/skip/test_leak.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
from torch import nn
from torch.distributed.pipeline.sync.skip import Namespace, skippable, stash
def test_namespace_difference():
ns1 = Namespace()
ns2 = Namespace()
assert ns1 != ns2
def test_namespace_copy():
ns = Namespace()
assert copy.copy(ns) == ns
assert copy.copy(ns) is not ns
def test_skippable_repr():
@skippable(stash=["hello"])
class Hello(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
yield stash("hello", x)
return self.conv(x) # noqa: B901
m = Hello()
assert (
repr(m)
== """
@skippable(Hello(
(conv): Conv2d(1, 1, kernel_size=(1, 1), stride=(1, 1))
))
""".strip()
)
|
pytorch-master
|
test/distributed/pipeline/sync/skip/test_api.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from torch import nn
from torch.distributed.pipeline.sync.skip import Namespace, pop, skippable, stash
from torch.distributed.pipeline.sync.skip.layout import inspect_skip_layout
class Pass(nn.Module):
def forward(self, input):
return input
@skippable(stash=["foo"])
class StashFoo(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input # noqa: B901
@skippable(pop=["foo"])
class PopFoo(nn.Module):
def forward(self, input):
foo = yield stash("foo")
return input + foo
@skippable(stash=["bar"])
class StashBar(nn.Module):
def forward(self, input):
yield stash("bar", input)
return input # noqa: B901
@skippable(pop=["bar"])
class PopBar(nn.Module):
def forward(self, input):
bar = yield pop("bar")
return input + bar
def test_no_skippables():
p1 = nn.Sequential(Pass())
p2 = nn.Sequential(Pass())
layout = inspect_skip_layout([p1, p2])
policy = [list(layout.copy_policy(i)) for i in range(2)]
assert policy == [[], []]
def test_inner_partition():
p1 = nn.Sequential(StashFoo(), PopFoo())
p2 = nn.Sequential(Pass())
layout = inspect_skip_layout([p1, p2])
policy = [list(layout.copy_policy(i)) for i in range(2)]
assert policy == [[], []]
def test_adjoining_partitions():
p1 = nn.Sequential(StashFoo())
p2 = nn.Sequential(PopFoo())
layout = inspect_skip_layout([p1, p2])
policy = [list(layout.copy_policy(i)) for i in range(2)]
assert policy == [[], [(0, None, "foo")]]
def test_far_partitions():
p1 = nn.Sequential(StashFoo())
p2 = nn.Sequential(Pass())
p3 = nn.Sequential(PopFoo())
layout = inspect_skip_layout([p1, p2, p3])
policy = [list(layout.copy_policy(i)) for i in range(3)]
assert policy == [[], [], [(0, None, "foo")]]
def test_pop_2_from_different_partitions():
p1 = nn.Sequential(StashFoo())
p2 = nn.Sequential(StashBar())
p3 = nn.Sequential(PopBar(), PopFoo())
layout = inspect_skip_layout([p1, p2, p3])
policy = [list(layout.copy_policy(i)) for i in range(3)]
# p3 pops 'bar' before 'foo', but the plan is sorted by source partition index.
assert policy == [[], [], [(0, None, "foo"), (1, None, "bar")]]
def test_namespace():
ns1 = Namespace()
ns2 = Namespace()
p1 = nn.Sequential(StashFoo().isolate(ns1))
p2 = nn.Sequential(StashFoo().isolate(ns2))
p3 = nn.Sequential(PopFoo().isolate(ns2), PopFoo().isolate(ns1))
layout = inspect_skip_layout([p1, p2, p3])
policy = [list(layout.copy_policy(i)) for i in range(3)]
# p3 pops 'bar' before 'foo', but the plan is sorted by source partition index.
assert policy == [[], [], [(0, ns1, "foo"), (1, ns2, "foo")]]
|
pytorch-master
|
test/distributed/pipeline/sync/skip/test_inspect_skip_layout.py
|
# Owner(s): ["oncall: distributed"]
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.portal import PortalBlue, PortalCopy, PortalOrange
from torch.distributed.pipeline.sync.utils import partition_model
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
@pytest.mark.parametrize("balance", [[3], [1, 2], [2, 1], [1, 1, 1]], ids=["3", "1:2", "2:1", "1:1:1"])
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_1to3(balance, checkpoint, setup_rpc):
if torch.cuda.device_count() < len(balance):
pytest.skip("at least %d cuda devices required" % len(balance))
@skippable(stash=["1to3"])
class Layer1(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
def forward(self, input):
yield stash("1to3", input)
output = self.conv(input)
return output # noqa: B901
class Layer2(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
def forward(self, input):
output = self.conv(input)
return output
@skippable(pop=["1to3"])
class Layer3(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
def forward(self, input):
skip_1to3 = yield pop("1to3")
output = self.conv(input) + skip_1to3
return output
model = nn.Sequential(Layer1(), Layer2(), Layer3())
model = partition_model(model, balance)
model = Pipe(model, chunks=3, checkpoint=checkpoint)
in_device = model.devices[0]
out_device = model.devices[-1]
input = torch.rand(30, 3, 224, 224, device=in_device, requires_grad=True)
output = model(input)
loss = output.local_value().mean()
loss.backward()
assert torch.allclose(output.local_value().norm(), torch.tensor(1039.0, device=out_device), atol=6e-1)
assert torch.allclose(input.grad.norm(), torch.tensor(0.0004533053, device=in_device))
def test_none_skip(setup_rpc):
@skippable(stash=["none"])
class Stash(nn.Module):
def forward(self, input):
yield stash("none", None)
return input # noqa: B901
@skippable(pop=["none"])
class Pop(nn.Module):
def forward(self, input):
none = yield pop("none")
assert none is None
return input
model = nn.Sequential(Stash(), Pop())
model = Pipe(model, chunks=5)
input = torch.rand(10, requires_grad=True)
output = model(input)
def assert_grad_fn_is_not_portal(grad_fn, visited=None):
if visited is None:
visited = set()
if grad_fn in visited or grad_fn is None:
return
assert not isinstance(grad_fn, PortalBlue._backward_cls)
assert not isinstance(grad_fn, PortalCopy._backward_cls)
assert not isinstance(grad_fn, PortalOrange._backward_cls)
visited.add(grad_fn)
for next_grad_fn, _ in grad_fn.next_functions:
assert_grad_fn_is_not_portal(next_grad_fn, visited)
assert_grad_fn_is_not_portal(output.local_value().grad_fn)
output.local_value().sum().backward()
assert input.grad.mean().item() == 1
|
pytorch-master
|
test/distributed/pipeline/sync/skip/test_gpipe.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
pytorch-master
|
test/distributed/launcher/__init__.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import runpy
import shutil
import subprocess
import sys
import tempfile
import unittest
import uuid
from contextlib import closing
from unittest import mock
from unittest.mock import Mock, patch
import torch.distributed.run as launch
from torch.distributed.elastic.agent.server.api import RunResult, WorkerState
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.utils import get_socket_with_port
from torch.distributed.elastic.utils.distributed import get_free_port
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
sandcastle_skip_if,
)
def launch_in_proc(args):
launch.main(args)
def path(script):
return os.path.join(os.path.dirname(__file__), script)
def get_child_pids(pid):
pgrep = subprocess.Popen(args=f"pgrep -P {pid}", shell=True, stdout=subprocess.PIPE)
pgrep.wait()
out = pgrep.stdout.read().decode("utf-8").rstrip().split("\n")
pids = []
for pid in out:
if pid:
pids.append(int(pid))
return pids
def pid_exists(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
class MockException(Exception):
pass
class ElasticLaunchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
cls._etcd_endpoint = cls._etcd_server.get_endpoint()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# remove any lingering environment variables
for env in os.environ.keys():
if env.startswith("PET_"):
del os.environ[env]
# set a sentinel env var on the parent proc
# this should be present on the child and gets
# asserted in ``bin/test_script.py``
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_launch_user_script_python(self):
self._test_launch_user_script_python()
def _test_launch_user_script_python(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_launch_user_script_python_caffe2_bc(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
"--master_addr=localhost",
f"--master_port={master_port}",
"--node_rank=0",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_user_script_bash(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_user_script_default_nproc(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
world_size = 1
args = [
f"--nnodes={nnodes}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_with_env_vars(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
os.environ["PET_NNODES"] = str(nnodes)
os.environ["PET_NPROC_PER_NODE"] = str(nproc_per_node)
os.environ["PET_RDZV_BACKEND"] = "etcd"
os.environ["PET_RDZV_ENDPOINT"] = self._etcd_endpoint
os.environ["PET_RDZV_ID"] = run_id
os.environ["PET_MONITOR_INTERVAL"] = "1"
os.environ["PET_START_METHOD"] = "spawn"
os.environ["PET_NO_PYTHON"] = "1"
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
os.environ["PET_MODULE"] = "1"
launch.main(script_args)
os.environ["PET_MODULE"] = "0"
launch.main(script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def _test_nproc_launch_configuration(self, nproc_type, expected_number):
run_id = str(uuid.uuid4().int)
nnodes = 1
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_type}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
launch.main(args + script_args)
world_size = nnodes * expected_number
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_auto_configurations(self):
self._test_nproc_launch_configuration("auto", os.cpu_count())
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_number_configurations(self):
self._test_nproc_launch_configuration("4", 4)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_unknown_configurations(self):
with self.assertRaises(ValueError):
self._test_nproc_launch_configuration("unknown", 4)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
@patch("torch.cuda.is_available", return_value=True)
@patch("torch.cuda.device_count", return_value=3)
def test_nproc_gpu_launch_configurations(self, _mock1, _mock2):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("gpu", 3)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
# we are only launching 1 node (even though max = 2)
world_size = nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@mock.patch("torch.distributed.elastic.events.record")
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
to indicate that worker process failed
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=spawn",
path("bin/test_script.py"),
"--fail",
]
with self.assertRaises(ChildFailedError):
launch.main(args)
record_mock.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
@mock.patch("torch.distributed.elastic.events.record")
def test_launch_elastic_agent_raise_exception(self, record_mock, mock_agent_run):
"""
Asserts that when the agent raises an exception
the launcher re-raises the original exception
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
mock_agent_run.side_effect = MockException
with self.assertRaises(MockException):
launch.main(args)
record_mock.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_standalone(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--standalone",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_run_path(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
"--run_path",
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_multiple_agents(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
nnodes = 2
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
procs = []
for _ in range(nnodes - 1):
p = mp.Process(target=launch.main, args=[args])
procs.append(p)
p.start()
launch.main(args)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_min_max_nodes_parse(self):
min_nodes, max_nodes = launch.parse_min_max_nnodes("1")
self.assertTrue(min_nodes, max_nodes)
self.assertTrue(1, min_nodes)
min_nodes, max_nodes = launch.parse_min_max_nnodes("2:20")
self.assertTrue(2, min_nodes)
self.assertTrue(20, max_nodes)
with self.assertRaises(RuntimeError):
launch.parse_min_max_nnodes("2:20:30")
@patch("torch.distributed.launcher.api.LocalElasticAgent")
def test_launch_shutdown(self, agent_mock_cls):
nnodes = 1
nproc_per_node = 4
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
agent_mock = Mock()
agent_mock.run.return_value = RunResult(WorkerState.SUCCEEDED)
agent_mock_cls.return_value = agent_mock
rdzv_handler_mock = Mock()
with patch(
"torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"
) as param_mock:
param_mock.return_value = rdzv_handler_mock
launch.main(args)
rdzv_handler_mock.shutdown.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_is_torchelastic_launched(self):
# launch test script with torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns True
out_file = f"{os.path.join(self.test_dir, 'out')}"
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=1",
"--monitor_interval=1",
path("bin/test_script_is_torchelastic_launched.py"),
f"--out_file={out_file}",
]
)
with open(out_file, "r") as fp:
is_torchelastic_launched = fp.readline()
self.assertEqual("True", is_torchelastic_launched)
def test_is_not_torchelastic_launched(self):
# launch test script without torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns False
out_file = f"{os.path.join(self.test_dir, 'out')}"
# need to run the script with runpy in the same interpreter
# as the test because otherwise (depending on the environment)
# it will not find torch as a dependency
with patch.object(
sys,
"argv",
[
path("bin/test_script_is_torchelastic_launched.py"),
f"--out_file={out_file}",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
with open(out_file, "r") as fp:
is_torchelastic_launched = fp.readline()
self.assertEqual("False", is_torchelastic_launched)
def test_init_method_tcp(self):
port = get_free_port()
with patch.object(
sys,
"argv",
[
path("bin/test_script_init_method.py"),
f"--init_method=tcp://localhost:{port}",
"--rank=0",
"--world_size=1",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_init_method_tcp_with_torchelastic(self):
port = get_free_port()
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=4",
"--master_addr=localhost",
f"--master_port={port}",
"--monitor_interval=1",
path("bin/test_script_init_method.py"),
f"--init_method=tcp://localhost:{port}",
]
)
# nothing to validate, just make sure it runs
def test_init_method_env(self):
port = get_free_port()
with patch.dict(
os.environ,
{
"RANK": "0",
"WORLD_SIZE": "1",
"MASTER_ADDR": "localhost",
"MASTER_PORT": str(port),
},
), patch.object(
sys,
"argv",
[
path("bin/test_script_init_method.py"),
"--init_method=env://",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_init_method_env_with_torchelastic(self):
port = get_free_port()
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=4",
"--master_addr=localhost",
f"--master_port={port}",
"--monitor_interval=1",
path("bin/test_script_init_method.py"),
"--init_method=env://",
]
)
# nothing to validate, just make sure it runs
|
pytorch-master
|
test/distributed/launcher/run_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import shutil
import signal
import sys
import tempfile
import time
import unittest
import uuid
from contextlib import closing
from typing import Any, Dict, Optional
from unittest import mock
from unittest.mock import MagicMock, Mock, patch
import torch
import torch.distributed as dist
from torch.distributed.elastic.agent.server.api import RunResult, WorkerState
from torch.distributed.elastic.multiprocessing.api import SignalException
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.utils import get_socket_with_port
from torch.distributed.launcher.api import (
LaunchConfig,
_get_entrypoint_name,
elastic_launch,
launch_agent,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
sandcastle_skip_if,
)
def path(script):
return os.path.join(os.path.dirname(__file__), script)
def simple_rank_scale():
rank = int(os.environ["RANK"])
return 10 + rank
def function_with_bug():
raise RuntimeError("test error")
def get_test_launch_config(
rdzv_endpoint: str,
min_nodes: int,
max_nodes: int,
nproc_per_node: int,
run_id: str = "",
rdzv_backend: str = "etcd",
config: Optional[Dict[str, Any]] = None,
) -> LaunchConfig:
rdzv_configs = {}
if config:
rdzv_configs.update(config)
return LaunchConfig(
min_nodes=min_nodes,
max_nodes=max_nodes,
nproc_per_node=nproc_per_node,
run_id=run_id,
rdzv_endpoint=rdzv_endpoint,
monitor_interval=1,
rdzv_backend=rdzv_backend,
start_method="spawn",
max_restarts=0,
rdzv_configs=rdzv_configs,
)
def elastic_launch_wrapper(
test_dir: str,
rdzv_endpoint: str,
min_nodes: int,
max_nodes: int,
nproc_per_node: int,
run_id: str,
):
"""A wrapper function for class `elastic_launch.` in order to make multiprocess returns correct exit code."""
elastic_launch(
get_test_launch_config(
rdzv_endpoint, min_nodes, max_nodes, nproc_per_node, run_id
),
sys.executable,
)("-u", path("bin/test_script.py"), f"--touch_file_dir={test_dir}")
def _dist_sum(wait=0):
rank = int(os.environ["RANK"])
dist.init_process_group(backend="gloo")
t = torch.tensor(rank)
time.sleep(wait)
dist.all_reduce(t, op=dist.reduce_op.SUM)
return t.item()
ELASTIC_AGENT_RUN = "torch.distributed.launcher.api.LocalElasticAgent.run"
EVENTS_RECORD = "torch.distributed.launcher.api.events.record"
GET_RDZV_HANDLER = (
"torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"
)
class MockException(Exception):
pass
def short_hash():
return str(uuid.uuid4()).split("-")[0]
class ElasticLaunchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests.
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
cls._etcd_endpoint = cls._etcd_server.get_endpoint()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server.
cls._etcd_server.stop()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# remove any lingering environment variables.
for env in os.environ.keys():
if env.startswith("PET_"):
del os.environ[env]
# set a sentinel env var on the parent proc.
# this should be present on the child and gets
# asserted in ``bin/test_script.py``.
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
os.environ["OMP_NUM_THREADS"] = str(1)
def tearDown(self):
shutil.rmtree(self.test_dir)
def check_works_ran(self, world_size: int):
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_script_python(self):
nnodes = 1
nproc_per_node = 4
elastic_launch(
get_test_launch_config(self._etcd_endpoint, nnodes, nnodes, nproc_per_node),
sys.executable,
)("-u", path("bin/test_script.py"), f"--touch_file_dir={self.test_dir}")
# make sure all the workers ran.
# each worker touches a file with its global rank as the name.
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_script_python_local_rank_transfer(self):
nnodes = 1
nproc_per_node = 4
elastic_launch(
get_test_launch_config(self._etcd_endpoint, nnodes, nnodes, nproc_per_node),
sys.executable,
)("-u", path("bin/test_script.py"), f"--touch_file_dir={self.test_dir}")
# make sure all the workers ran.
# each worker touches a file with its global rank as the name.
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_script_bash(self):
nnodes = 1
nproc_per_node = 4
elastic_launch(
get_test_launch_config(self._etcd_endpoint, nnodes, nnodes, nproc_per_node),
path("bin/test_script.sh"),
)(f"{self.test_dir}")
world_size = nnodes * nproc_per_node
self.check_works_ran(world_size)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_function(self):
nnodes = 1
nproc_per_node = 4
res = elastic_launch(
get_test_launch_config(self._etcd_endpoint, nnodes, nnodes, nproc_per_node),
simple_rank_scale,
)()
expected_res = [10, 11, 12, 13]
actual_res = sorted(value for value in res.values())
self.assertEqual(expected_res, actual_res)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_dist_sum_with_static_rdzv(self):
nnodes = 1
nproc_per_node = 4
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
rdzv_endpoint = f"127.0.0.1:{master_port}"
rank = 0
rdzv_config = {
"rank": rank,
}
res = elastic_launch(
get_test_launch_config(
rdzv_endpoint,
nnodes,
nnodes,
nproc_per_node,
rdzv_backend="static",
config=rdzv_config,
),
_dist_sum,
)()
expected_res = [sum(range(nproc_per_node))] * nproc_per_node
actual_res = sorted(value for value in res.values())
self.assertEqual(expected_res, actual_res)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic(self):
nproc_per_node = 4
elastic_launch(
get_test_launch_config(self._etcd_endpoint, 1, 2, nproc_per_node),
sys.executable,
)("-u", path("bin/test_script.py"), f"--touch_file_dir={self.test_dir}")
world_size = nproc_per_node
self.check_works_ran(world_size)
@mock.patch("torch.distributed.elastic.events.record")
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
to indicate that worker process failed.
"""
nproc_per_node = 4
with self.assertRaises(ChildFailedError):
elastic_launch(
get_test_launch_config(self._etcd_endpoint, 1, 2, nproc_per_node),
sys.executable,
)("-u", path("bin/test_script.py"), "--fail")
record_mock.assert_called_once()
@mock.patch("torch.distributed.elastic.events.record")
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
def test_launch_elastic_agent_raise_exception(self, record_mock, mock_agent_run):
"""
Asserts that when the agent raises an exception
the launcher re-raises the original exception.
"""
mock_agent_run.side_effect = MockException
with self.assertRaises(MockException):
elastic_launch(
get_test_launch_config(self._etcd_endpoint, 1, 2, 4),
sys.executable,
)("-u", path("bin/test_script.py"), f"--touch_file_dir={self.test_dir}")
record_mock.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_multiple_agents(self):
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
ctx = mp.get_context("spawn")
for _ in range(nnodes - 1):
p = ctx.Process(
target=elastic_launch_wrapper,
args=(
self.test_dir,
self._etcd_endpoint,
min_nodes,
max_nodes,
nproc_per_node,
run_id,
),
)
procs.append(p)
p.start()
elastic_launch_wrapper(
self.test_dir,
self._etcd_endpoint,
min_nodes,
max_nodes,
nproc_per_node,
run_id,
)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
world_size = nnodes * nproc_per_node
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@patch("torch.distributed.launcher.api.LocalElasticAgent")
def test_launch_shutdown(self, agent_mock_cls):
agent_mock = Mock()
agent_mock.run.return_value = RunResult(WorkerState.SUCCEEDED)
agent_mock_cls.return_value = agent_mock
rdzv_handler_mock = Mock()
with patch(
"torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"
) as param_mock:
param_mock.return_value = rdzv_handler_mock
elastic_launch(
get_test_launch_config(self._etcd_endpoint, 1, 1, 4),
sys.executable,
)("-u", path("bin/test_script.py"), f"--touch_file_dir={self.test_dir}")
rdzv_handler_mock.shutdown.assert_called_once()
def test_get_entrypoint_name(self):
self.assertEqual(
"simple_rank_scale", _get_entrypoint_name(simple_rank_scale, [])
)
self.assertEqual("", _get_entrypoint_name(sys.executable, []))
self.assertEqual("", _get_entrypoint_name(sys.executable, ["-u"]))
self.assertEqual(
"test_script.py",
_get_entrypoint_name(sys.executable, ["-u", "test_script.py"]),
)
self.assertEqual("", _get_entrypoint_name(None, []))
@patch(ELASTIC_AGENT_RUN)
@patch(GET_RDZV_HANDLER)
def test_rdzv_handler_shutdown_on_agent_signal(self, mock_get_rdzv, mock_agent_run):
config = get_test_launch_config(
self._etcd_endpoint, min_nodes=1, max_nodes=1, nproc_per_node=1
)
for sigval in [signal.SIGTERM, signal.SIGINT]:
with patch(EVENTS_RECORD) as record_event_mock:
rdzv_handler_mock = MagicMock()
rdzv_handler_mock.get_run_id.return_value = short_hash()
mock_get_rdzv.return_value = rdzv_handler_mock
mock_agent_run.side_effect = SignalException("test", sigval)
with self.assertRaises(SignalException):
launch_agent(config, simple_rank_scale, [])
rdzv_handler_mock.shutdown.assert_not_called()
record_event_mock.assert_called_once()
@patch(ELASTIC_AGENT_RUN)
@patch(GET_RDZV_HANDLER)
def test_rdzv_handler_shutdown_on_agent_error(self, mock_get_rdzv, mock_agent_run):
config = get_test_launch_config(
self._etcd_endpoint, min_nodes=1, max_nodes=1, nproc_per_node=1
)
with patch(EVENTS_RECORD) as record_event_mock:
rdzv_handler_mock = MagicMock()
rdzv_handler_mock.get_run_id.return_value = short_hash()
mock_get_rdzv.return_value = rdzv_handler_mock
mock_agent_run.side_effect = RuntimeError("any other exception")
with self.assertRaises(RuntimeError):
launch_agent(config, simple_rank_scale, [])
rdzv_handler_mock.shutdown.assert_called_once()
record_event_mock.assert_called_once()
|
pytorch-master
|
test/distributed/launcher/api_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import tempfile
import unittest
from contextlib import closing
import torch.distributed.launch as launch
from torch.distributed.elastic.utils import get_socket_with_port
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
sandcastle_skip_if,
)
def path(script):
return os.path.join(os.path.dirname(__file__), script)
class LaunchTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# set a sentinel env var on the parent proc
# this should be present on the child and gets
# asserted in ``bin/test_script.py``
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
def tearDown(self):
shutil.rmtree(self.test_dir)
@sandcastle_skip_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_without_env(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
"--master_addr=localhost",
f"--master_port={master_port}",
"--node_rank=0",
path("bin/test_script_local_rank.py"),
]
launch.main(args)
@sandcastle_skip_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_with_env(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
"--master_addr=localhost",
f"--master_port={master_port}",
"--node_rank=0",
"--use_env",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
|
pytorch-master
|
test/distributed/launcher/launch_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
def parse_args():
parser = argparse.ArgumentParser(description="test script")
parser.add_argument(
"--local_rank",
type=int,
required=True,
help="The rank of the node for multi-node distributed " "training",
)
return parser.parse_args()
def main():
print("Start execution")
args = parse_args()
expected_rank = int(os.environ["LOCAL_RANK"])
actual_rank = args.local_rank
if expected_rank != actual_rank:
raise RuntimeError(
"Parameters passed: --local_rank that has different value "
f"from env var: expected: {expected_rank}, got: {actual_rank}"
)
print("End execution")
if __name__ == "__main__":
main()
|
pytorch-master
|
test/distributed/launcher/bin/test_script_local_rank.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This is a test script that launches as part of the test cases in
run_test.py, to validate the correctness of
the method ``torch.distributed.is_torchelastic_launched()``. To do so,
we run this script with and without torchelastic and validate that the
boolean value written to the out_file is indeed what we expect (e.g.
should be False when not launched with torchelastic, True when launched with)
The script itself is not a test case hence no assertions are made in this script.
see: - test/distributed/launcher/run_test.py#test_is_torchelastic_launched()
- test/distributed/launcher/run_test.py#test_is_not_torchelastic_launched()
"""
import argparse
import torch.distributed as dist
def parse_args():
parser = argparse.ArgumentParser(description="test script")
parser.add_argument(
"--out_file",
help="file to write indicating whether this script was launched with torchelastic",
)
return parser.parse_args()
def main():
args = parse_args()
with open(args.out_file, "w") as out:
out.write(f"{dist.is_torchelastic_launched()}")
if __name__ == "__main__":
main()
|
pytorch-master
|
test/distributed/launcher/bin/test_script_is_torchelastic_launched.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser(description="test script")
parser.add_argument(
"--fail",
default=False,
action="store_true",
help="forces the script to throw a RuntimeError",
)
# file is used for assertions
parser.add_argument(
"--touch_file_dir",
type=str,
help="dir to touch a file with global rank as the filename",
)
return parser.parse_args()
def main():
args = parse_args()
env_vars = [
"LOCAL_RANK",
"RANK",
"GROUP_RANK",
"ROLE_RANK",
"ROLE_NAME",
"LOCAL_WORLD_SIZE",
"WORLD_SIZE",
"ROLE_WORLD_SIZE",
"MASTER_ADDR",
"MASTER_PORT",
"TORCHELASTIC_RESTART_COUNT",
"TORCHELASTIC_MAX_RESTARTS",
"TORCHELASTIC_RUN_ID",
"OMP_NUM_THREADS",
"TEST_SENTINEL_PARENT",
"TORCHELASTIC_ERROR_FILE",
]
print("Distributed env vars set by agent:")
for env_var in env_vars:
value = os.environ[env_var]
print(f"{env_var} = {value}")
if args.fail:
raise RuntimeError("raising exception since --fail flag was set")
else:
file = os.path.join(args.touch_file_dir, os.environ["RANK"])
Path(file).touch()
print(f"Success, created {file}")
if __name__ == "__main__":
main()
|
pytorch-master
|
test/distributed/launcher/bin/test_script.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import torch
import torch.distributed as dist
import torch.nn.functional as F
def parse_args():
parser = argparse.ArgumentParser(description="test script")
parser.add_argument(
"--init_method",
type=str,
required=True,
help="init_method to pass to `dist.init_process_group()` (e.g. env://)",
)
parser.add_argument(
"--world_size",
type=int,
default=os.getenv("WORLD_SIZE", -1),
help="world_size to pass to `dist.init_process_group()`",
)
parser.add_argument(
"--rank",
type=int,
default=os.getenv("RANK", -1),
help="rank to pass to `dist.init_process_group()`",
)
return parser.parse_args()
def main():
args = parse_args()
dist.init_process_group(
backend="gloo",
init_method=args.init_method,
world_size=args.world_size,
rank=args.rank,
)
rank = dist.get_rank()
world_size = dist.get_world_size()
# one hot (by rank) tensor of size world_size
# example:
# rank 0, world_size 4 => [1, 0, 0, 0]
# rank 1, world_size 4 => [0, 1, 0, 0]
# ...
t = F.one_hot(torch.tensor(rank), num_classes=world_size)
# after all_reduce t = tensor.ones(size=world_size)
dist.all_reduce(t)
# adding all elements in t should equal world_size
derived_world_size = torch.sum(t).item()
if derived_world_size != world_size:
raise RuntimeError(
f"Wrong world size derived. Expected: {world_size}, Got: {derived_world_size}"
)
print("Done")
if __name__ == "__main__":
main()
|
pytorch-master
|
test/distributed/launcher/bin/test_script_init_method.py
|
pytorch-master
|
test/distributed/nn/jit/__init__.py
|
|
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
import pathlib
import sys
from typing import Tuple
import torch
from torch import Tensor, nn
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.distributed.nn.jit import instantiator
from torch.testing._internal.common_utils import TestCase, run_tests
@torch.jit.interface
class MyModuleInterface:
def forward(
self, tensor: Tensor, number: int, word: str = "default"
) -> Tuple[Tensor, int, str]:
pass
class MyModule(nn.Module):
pass
def create_module():
return MyModule()
class TestInstantiator(TestCase):
def test_get_arg_return_types_from_interface(self):
(
args_str,
arg_types_str,
return_type_str,
) = instantiator.get_arg_return_types_from_interface(MyModuleInterface)
self.assertEqual(args_str, "tensor, number, word")
self.assertEqual(arg_types_str, "tensor: Tensor, number: int, word: str")
self.assertEqual(return_type_str, "Tuple[Tensor, int, str]")
def test_instantiate_scripted_remote_module_template(self):
dir_path = pathlib.Path(instantiator.INSTANTIATED_TEMPLATE_DIR_PATH)
# Cleanup.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
for file_path in file_paths:
file_path.unlink()
# Check before run.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
num_files_before = len(list(file_paths))
self.assertEqual(num_files_before, 0)
generated_module = instantiator.instantiate_scriptable_remote_module_template(
MyModuleInterface
)
self.assertTrue(hasattr(generated_module, "_remote_forward"))
self.assertTrue(hasattr(generated_module, "_generated_methods"))
# Check after run.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
num_files_after = len(list(file_paths))
self.assertEqual(num_files_after, 1)
def test_instantiate_non_scripted_remote_module_template(self):
dir_path = pathlib.Path(instantiator.INSTANTIATED_TEMPLATE_DIR_PATH)
# Cleanup.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
for file_path in file_paths:
file_path.unlink()
# Check before run.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
num_files_before = len(list(file_paths))
self.assertEqual(num_files_before, 0)
generated_module = (
instantiator.instantiate_non_scriptable_remote_module_template()
)
self.assertTrue(hasattr(generated_module, "_remote_forward"))
self.assertTrue(hasattr(generated_module, "_generated_methods"))
# Check after run.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
num_files_after = len(list(file_paths))
self.assertEqual(num_files_after, 1)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/nn/jit/test_instantiator.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
def main():
print("Success, smoke test")
if __name__ == "__main__":
main()
|
pytorch-master
|
test/distributed/bin/test_script.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.abs
|
pytorch-master
|
test/distributed/elastic/metrics/__init__.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.abs
import abc
import unittest.mock as mock
from torch.distributed.elastic.metrics.api import (
MetricData,
MetricHandler,
MetricStream,
_get_metric_name,
prof,
)
from torch.testing._internal.common_utils import run_tests, TestCase
def foo_1():
pass
class TestMetricsHandler(MetricHandler):
def __init__(self):
self.metric_data = {}
def emit(self, metric_data: MetricData):
self.metric_data[metric_data.name] = metric_data
class Parent(abc.ABC):
@abc.abstractmethod
def func(self):
raise NotImplementedError()
def base_func(self):
self.func()
class Child(Parent):
# need to decorate the implementation not the abstract method!
@prof
def func(self):
pass
class MetricsApiTest(TestCase):
def foo_2(self):
pass
@prof
def bar(self):
pass
@prof
def throw(self):
raise RuntimeError()
@prof(group="torchelastic")
def bar2(self):
pass
def test_get_metric_name(self):
# Note: since pytorch uses main method to launch tests,
# the module will be different between fb and oss, this
# allows keeping the module name consistent.
foo_1.__module__ = "api_test"
self.assertEqual("api_test.foo_1", _get_metric_name(foo_1))
self.assertEqual("MetricsApiTest.foo_2", _get_metric_name(self.foo_2))
def test_profile(self):
handler = TestMetricsHandler()
stream = MetricStream("torchelastic", handler)
# patch instead of configure to avoid conflicts when running tests in parallel
with mock.patch(
"torch.distributed.elastic.metrics.api.getStream", return_value=stream
):
self.bar()
self.assertEqual(1, handler.metric_data["MetricsApiTest.bar.success"].value)
self.assertNotIn("MetricsApiTest.bar.failure", handler.metric_data)
self.assertIn("MetricsApiTest.bar.duration.ms", handler.metric_data)
with self.assertRaises(RuntimeError):
self.throw()
self.assertEqual(
1, handler.metric_data["MetricsApiTest.throw.failure"].value
)
self.assertNotIn("MetricsApiTest.bar_raise.success", handler.metric_data)
self.assertIn("MetricsApiTest.throw.duration.ms", handler.metric_data)
self.bar2()
self.assertEqual(
"torchelastic",
handler.metric_data["MetricsApiTest.bar2.success"].group_name,
)
def test_inheritance(self):
handler = TestMetricsHandler()
stream = MetricStream("torchelastic", handler)
# patch instead of configure to avoid conflicts when running tests in parallel
with mock.patch(
"torch.distributed.elastic.metrics.api.getStream", return_value=stream
):
c = Child()
c.base_func()
self.assertEqual(1, handler.metric_data["Child.func.success"].value)
self.assertIn("Child.func.duration.ms", handler.metric_data)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/elastic/metrics/api_test.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import tempfile
from base64 import b64encode
from datetime import timedelta
from typing import ClassVar, cast, Callable
from unittest import TestCase, mock
from torch.distributed import TCPStore, FileStore
from torch.distributed.elastic.rendezvous import (
RendezvousConnectionError,
RendezvousParameters,
RendezvousError)
from torch.distributed.elastic.rendezvous.c10d_rendezvous_backend import (
C10dRendezvousBackend,
create_backend,
)
from rendezvous_backend_test import RendezvousBackendTestMixin
class TCPStoreBackendTest(TestCase, RendezvousBackendTestMixin):
_store: ClassVar[TCPStore]
@classmethod
def setUpClass(cls) -> None:
cls._store = TCPStore("localhost", 0, is_master=True) # type: ignore[call-arg]
def setUp(self) -> None:
# Make sure we have a clean slate.
self._store.delete_key("torch.rendezvous.dummy_run_id")
self._backend = C10dRendezvousBackend(self._store, "dummy_run_id")
def _corrupt_state(self) -> None:
self._store.set("torch.rendezvous.dummy_run_id", "non_base64")
class FileStoreBackendTest(TestCase, RendezvousBackendTestMixin):
_store: ClassVar[FileStore]
def setUp(self) -> None:
_, path = tempfile.mkstemp()
self._path = path
# Currently, filestore doesn't implement a delete_key method, so a new
# filestore has to be initialized for every test in order to have a
# clean slate.
self._store = FileStore(path)
self._backend = C10dRendezvousBackend(self._store, "dummy_run_id")
def tearDown(self) -> None:
os.remove(self._path)
def _corrupt_state(self) -> None:
self._store.set("torch.rendezvous.dummy_run_id", "non_base64")
class CreateBackendTest(TestCase):
def setUp(self) -> None:
# For testing, the default parameters used are for tcp. If a test
# uses parameters for file store, we set the self._params to
# self._params_filestore.
self._params = RendezvousParameters(
backend="dummy_backend",
endpoint="localhost:29300",
run_id="dummy_run_id",
min_nodes=1,
max_nodes=1,
is_host="true",
store_type="tCp",
read_timeout="10",
)
_, tmp_path = tempfile.mkstemp()
# Parameters for filestore testing.
self._params_filestore = RendezvousParameters(
backend="dummy_backend",
endpoint=tmp_path,
run_id="dummy_run_id",
min_nodes=1,
max_nodes=1,
store_type="fIlE",
)
self._expected_endpoint_file = tmp_path
self._expected_temp_dir = tempfile.gettempdir()
self._expected_endpoint_host = "localhost"
self._expected_endpoint_port = 29300
self._expected_store_type = TCPStore
self._expected_read_timeout = timedelta(seconds=10)
def tearDown(self) -> None:
os.remove(self._expected_endpoint_file)
def _run_test_with_store(self, store_type: str, test_to_run: Callable):
"""
Use this function to specify the store type to use in a test. If
not used, the test will default to TCPStore.
"""
if store_type == "file":
self._params = self._params_filestore
self._expected_store_type = FileStore
self._expected_read_timeout = timedelta(seconds=300)
test_to_run()
def _assert_create_backend_returns_backend(self) -> None:
backend, store = create_backend(self._params)
self.assertEqual(backend.name, "c10d")
self.assertIsInstance(store, self._expected_store_type)
typecast_store = cast(self._expected_store_type, store)
self.assertEqual(typecast_store.timeout, self._expected_read_timeout) # type: ignore[attr-defined]
if (self._expected_store_type == TCPStore):
self.assertEqual(typecast_store.host, self._expected_endpoint_host) # type: ignore[attr-defined]
self.assertEqual(typecast_store.port, self._expected_endpoint_port) # type: ignore[attr-defined]
if (self._expected_store_type == FileStore):
if self._params.endpoint:
self.assertEqual(typecast_store.path, self._expected_endpoint_file) # type: ignore[attr-defined]
else:
self.assertTrue(typecast_store.path.startswith(self._expected_temp_dir)) # type: ignore[attr-defined]
backend.set_state(b"dummy_state")
state = store.get("torch.rendezvous." + self._params.run_id)
self.assertEqual(state, b64encode(b"dummy_state"))
def test_create_backend_returns_backend(self) -> None:
for store_type in ["tcp", "file"]:
with self.subTest(store_type=store_type):
self._run_test_with_store(store_type, self._assert_create_backend_returns_backend)
def test_create_backend_returns_backend_if_is_host_is_false(self) -> None:
store = TCPStore( # type: ignore[call-arg] # noqa: F841
self._expected_endpoint_host, self._expected_endpoint_port, is_master=True
)
self._params.config["is_host"] = "false"
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_if_is_host_is_not_specified(self) -> None:
del self._params.config["is_host"]
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_if_is_host_is_not_specified_and_store_already_exists(
self,
) -> None:
store = TCPStore( # type: ignore[call-arg] # noqa: F841
self._expected_endpoint_host, self._expected_endpoint_port, is_master=True
)
del self._params.config["is_host"]
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_if_endpoint_port_is_not_specified(self) -> None:
self._params.endpoint = self._expected_endpoint_host
self._expected_endpoint_port = 29400
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_if_endpoint_file_is_not_specified(self) -> None:
self._params_filestore.endpoint = ""
self._run_test_with_store("file", self._assert_create_backend_returns_backend)
def test_create_backend_returns_backend_if_store_type_is_not_specified(self) -> None:
del self._params.config["store_type"]
self._expected_store_type = TCPStore
if (not self._params.get("read_timeout")):
self._expected_read_timeout = timedelta(seconds=60)
self._assert_create_backend_returns_backend()
def test_create_backend_returns_backend_if_read_timeout_is_not_specified(self) -> None:
del self._params.config["read_timeout"]
self._expected_read_timeout = timedelta(seconds=60)
self._assert_create_backend_returns_backend()
def test_create_backend_raises_error_if_store_is_unreachable(self) -> None:
self._params.config["is_host"] = "false"
self._params.config["read_timeout"] = "2"
with self.assertRaisesRegex(
RendezvousConnectionError,
r"^The connection to the C10d store has failed. See inner exception for details.$",
):
create_backend(self._params)
def test_create_backend_raises_error_if_endpoint_is_invalid(self) -> None:
for is_host in [True, False]:
with self.subTest(is_host=is_host):
self._params.config["is_host"] = str(is_host)
self._params.endpoint = "dummy_endpoint"
with self.assertRaisesRegex(
RendezvousConnectionError,
r"^The connection to the C10d store has failed. See inner exception for "
r"details.$",
):
create_backend(self._params)
def test_create_backend_raises_error_if_store_type_is_invalid(self) -> None:
self._params.config["store_type"] = "dummy_store_type"
with self.assertRaisesRegex(
ValueError, r"^Invalid store type given. Currently only supports file and tcp.$"
):
create_backend(self._params)
def test_create_backend_raises_error_if_read_timeout_is_invalid(self) -> None:
for read_timeout in ["0", "-10"]:
with self.subTest(read_timeout=read_timeout):
self._params.config["read_timeout"] = read_timeout
with self.assertRaisesRegex(
ValueError, r"^The read timeout must be a positive integer.$"
):
create_backend(self._params)
@mock.patch("tempfile.mkstemp")
def test_create_backend_raises_error_if_tempfile_creation_fails(self, tempfile_mock) -> None:
tempfile_mock.side_effect = OSError("test error")
# Set the endpoint to empty so it defaults to creating a temp file
self._params_filestore.endpoint = ""
with self.assertRaisesRegex(
RendezvousError,
r"The file creation for C10d store has failed. See inner exception for details."
):
create_backend(self._params_filestore)
@mock.patch("torch.distributed.elastic.rendezvous.c10d_rendezvous_backend.FileStore")
def test_create_backend_raises_error_if_file_path_is_invalid(self, filestore_mock) -> None:
filestore_mock.side_effect = RuntimeError("test error")
self._params_filestore.endpoint = "bad file path"
with self.assertRaisesRegex(
RendezvousConnectionError,
r"^The connection to the C10d store has failed. See inner exception for "
r"details.$",
):
create_backend(self._params_filestore)
|
pytorch-master
|
test/distributed/elastic/rendezvous/c10d_rendezvous_backend_test.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import threading
import time
import socket
from datetime import timedelta
from typing import List
from unittest import TestCase
from torch.distributed.elastic.rendezvous.utils import (
_PeriodicTimer,
_delay,
_matches_machine_hostname,
_parse_rendezvous_config,
_try_parse_port,
parse_rendezvous_endpoint,
)
class UtilsTest(TestCase):
def test_parse_rendezvous_config_returns_dict(self) -> None:
expected_config = {
"a": "dummy1",
"b": "dummy2",
"c": "dummy3=dummy4",
"d": "dummy5/dummy6",
}
config = _parse_rendezvous_config(
" b= dummy2 ,c=dummy3=dummy4, a =dummy1,d=dummy5/dummy6"
)
self.assertEqual(config, expected_config)
def test_parse_rendezvous_returns_empty_dict_if_str_is_empty(self) -> None:
config_strs = ["", " "]
for config_str in config_strs:
with self.subTest(config_str=config_str):
config = _parse_rendezvous_config(config_str)
self.assertEqual(config, {})
def test_parse_rendezvous_raises_error_if_str_is_invalid(self) -> None:
config_strs = [
"a=dummy1,",
"a=dummy1,,c=dummy2",
"a=dummy1, ,c=dummy2",
"a=dummy1,= ,c=dummy2",
"a=dummy1, = ,c=dummy2",
"a=dummy1, =,c=dummy2",
" , ",
]
for config_str in config_strs:
with self.subTest(config_str=config_str):
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous configuration string must be in format "
r"<key1>=<value1>,...,<keyN>=<valueN>.$",
):
_parse_rendezvous_config(config_str)
def test_parse_rendezvous_raises_error_if_value_is_empty(self) -> None:
config_strs = [
"b=dummy1,a,c=dummy2",
"b=dummy1,c=dummy2,a",
"b=dummy1,a=,c=dummy2",
" a ",
]
for config_str in config_strs:
with self.subTest(config_str=config_str):
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous configuration option 'a' must have a value specified.$",
):
_parse_rendezvous_config(config_str)
def test_try_parse_port_returns_port(self) -> None:
port = _try_parse_port("123")
self.assertEqual(port, 123)
def test_try_parse_port_returns_none_if_str_is_invalid(self) -> None:
port_strs = [
"",
" ",
" 1",
"1 ",
" 1 ",
"abc",
]
for port_str in port_strs:
with self.subTest(port_str=port_str):
port = _try_parse_port(port_str)
self.assertIsNone(port)
def test_parse_rendezvous_endpoint_returns_tuple(self) -> None:
endpoints = [
"dummy.com:0",
"dummy.com:123",
"dummy.com:65535",
"dummy-1.com:0",
"dummy-1.com:123",
"dummy-1.com:65535",
"123.123.123.123:0",
"123.123.123.123:123",
"123.123.123.123:65535",
"[2001:db8::1]:0",
"[2001:db8::1]:123",
"[2001:db8::1]:65535",
]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
host, port = parse_rendezvous_endpoint(endpoint, default_port=123)
expected_host, expected_port = endpoint.rsplit(":", 1)
if expected_host[0] == "[" and expected_host[-1] == "]":
expected_host = expected_host[1:-1]
self.assertEqual(host, expected_host)
self.assertEqual(port, int(expected_port))
def test_parse_rendezvous_endpoint_returns_tuple_if_endpoint_has_no_port(
self,
) -> None:
endpoints = ["dummy.com", "dummy-1.com", "123.123.123.123", "[2001:db8::1]"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
host, port = parse_rendezvous_endpoint(endpoint, default_port=123)
expected_host = endpoint
if expected_host[0] == "[" and expected_host[-1] == "]":
expected_host = expected_host[1:-1]
self.assertEqual(host, expected_host)
self.assertEqual(port, 123)
def test_parse_rendezvous_endpoint_returns_tuple_if_endpoint_is_empty(self) -> None:
endpoints = ["", " "]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
host, port = parse_rendezvous_endpoint("", default_port=123)
self.assertEqual(host, "localhost")
self.assertEqual(port, 123)
def test_parse_rendezvous_endpoint_raises_error_if_hostname_is_invalid(
self,
) -> None:
endpoints = ["~", "dummy.com :123", "~:123", ":123"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
with self.assertRaisesRegex(
ValueError,
rf"^The hostname of the rendezvous endpoint '{endpoint}' must be a "
r"dot-separated list of labels, an IPv4 address, or an IPv6 address.$",
):
parse_rendezvous_endpoint(endpoint, default_port=123)
def test_parse_rendezvous_endpoint_raises_error_if_port_is_invalid(self) -> None:
endpoints = ["dummy.com:", "dummy.com:abc", "dummy.com:-123", "dummy.com:-"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
with self.assertRaisesRegex(
ValueError,
rf"^The port number of the rendezvous endpoint '{endpoint}' must be an integer "
r"between 0 and 65536.$",
):
parse_rendezvous_endpoint(endpoint, default_port=123)
def test_parse_rendezvous_endpoint_raises_error_if_port_is_too_big(self) -> None:
endpoints = ["dummy.com:65536", "dummy.com:70000"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
with self.assertRaisesRegex(
ValueError,
rf"^The port number of the rendezvous endpoint '{endpoint}' must be an integer "
r"between 0 and 65536.$",
):
parse_rendezvous_endpoint(endpoint, default_port=123)
def test_matches_machine_hostname_returns_true_if_hostname_is_loopback(
self,
) -> None:
hosts = [
"localhost",
"127.0.0.1",
"::1",
"0000:0000:0000:0000:0000:0000:0000:0001",
]
for host in hosts:
with self.subTest(host=host):
self.assertTrue(_matches_machine_hostname(host))
def test_matches_machine_hostname_returns_true_if_hostname_is_machine_hostname(
self,
) -> None:
host = socket.gethostname()
self.assertTrue(_matches_machine_hostname(host))
def test_matches_machine_hostname_returns_true_if_hostname_is_machine_fqdn(
self,
) -> None:
host = socket.getfqdn()
self.assertTrue(_matches_machine_hostname(host))
def test_matches_machine_hostname_returns_true_if_hostname_is_machine_address(
self,
) -> None:
addr_list = socket.getaddrinfo(socket.gethostname(), None, proto=socket.IPPROTO_TCP)
for addr in (addr_info[4][0] for addr_info in addr_list):
with self.subTest(addr=addr):
self.assertTrue(_matches_machine_hostname(addr))
def test_matches_machine_hostname_returns_false_if_hostname_does_not_match(
self,
) -> None:
hosts = ["dummy", "0.0.0.0", "::2"]
for host in hosts:
with self.subTest(host=host):
self.assertFalse(_matches_machine_hostname(host))
def test_delay_suspends_thread(self) -> None:
for seconds in 0.2, (0.2, 0.4):
with self.subTest(seconds=seconds):
time1 = time.monotonic()
_delay(seconds) # type: ignore[arg-type]
time2 = time.monotonic()
self.assertGreaterEqual(time2 - time1, 0.2)
class PeriodicTimerTest(TestCase):
def test_start_can_be_called_only_once(self) -> None:
timer = _PeriodicTimer(timedelta(seconds=1), lambda: None)
timer.start()
with self.assertRaisesRegex(RuntimeError, r"^The timer has already started.$"):
timer.start()
timer.cancel()
def test_cancel_can_be_called_multiple_times(self) -> None:
timer = _PeriodicTimer(timedelta(seconds=1), lambda: None)
timer.start()
timer.cancel()
timer.cancel()
def test_cancel_stops_background_thread(self) -> None:
name = "PeriodicTimer_CancelStopsBackgroundThreadTest"
timer = _PeriodicTimer(timedelta(seconds=1), lambda: None)
timer.set_name(name)
timer.start()
self.assertTrue(any(t.name == name for t in threading.enumerate()))
timer.cancel()
self.assertTrue(all(t.name != name for t in threading.enumerate()))
def test_delete_stops_background_thread(self) -> None:
name = "PeriodicTimer_DeleteStopsBackgroundThreadTest"
timer = _PeriodicTimer(timedelta(seconds=1), lambda: None)
timer.set_name(name)
timer.start()
self.assertTrue(any(t.name == name for t in threading.enumerate()))
del timer
self.assertTrue(all(t.name != name for t in threading.enumerate()))
def test_set_name_cannot_be_called_after_start(self) -> None:
timer = _PeriodicTimer(timedelta(seconds=1), lambda: None)
timer.start()
with self.assertRaisesRegex(RuntimeError, r"^The timer has already started.$"):
timer.set_name("dummy_name")
timer.cancel()
def test_timer_calls_background_thread_at_regular_intervals(self) -> None:
timer_begin_time: float
# Call our function every 200ms.
call_interval = 0.2
# Keep the log of intervals between each consecutive call.
actual_call_intervals: List[float] = []
# Keep the number of times the function was called.
call_count = 0
# In order to prevent a flaky test instead of asserting that the
# function was called an exact number of times we use a lower bound
# that is guaranteed to be true for a correct implementation.
min_required_call_count = 4
timer_stop_event = threading.Event()
def log_call(self):
nonlocal timer_begin_time, call_count
actual_call_intervals.append(time.monotonic() - timer_begin_time)
call_count += 1
if call_count == min_required_call_count:
timer_stop_event.set()
timer_begin_time = time.monotonic()
timer = _PeriodicTimer(timedelta(seconds=call_interval), log_call, self)
timer_begin_time = time.monotonic()
timer.start()
# Although this is theoretically non-deterministic, if our timer, which
# has a 200ms call interval, does not get called 4 times in 60 seconds,
# there is very likely something else going on.
timer_stop_event.wait(60)
timer.cancel()
self.longMessage = False
self.assertGreaterEqual(
call_count,
min_required_call_count,
f"The function has been called {call_count} time(s) but expected to be called at least "
f"{min_required_call_count} time(s).",
)
for actual_call_interval in actual_call_intervals:
self.assertGreaterEqual(
actual_call_interval,
call_interval,
f"The interval between two function calls was {actual_call_interval} second(s) but "
f"expected to be at least {call_interval} second(s).",
)
|
pytorch-master
|
test/distributed/elastic/rendezvous/utils_test.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import etcd
from torch.distributed.elastic.rendezvous.etcd_rendezvous import (
EtcdRendezvous,
EtcdRendezvousHandler,
)
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
if os.getenv("CIRCLECI"):
print("T85992919 temporarily disabling in circle ci", file=sys.stderr)
sys.exit(0)
class EtcdServerTest(unittest.TestCase):
def test_etcd_server_start_stop(self):
server = EtcdServer()
server.start()
try:
port = server.get_port()
host = server.get_host()
self.assertGreater(port, 0)
self.assertEqual("localhost", host)
self.assertEqual(f"{host}:{port}", server.get_endpoint())
self.assertIsNotNone(server.get_client().version)
finally:
server.stop()
def test_etcd_server_with_rendezvous(self):
server = EtcdServer()
server.start()
client = etcd.Client(server.get_host(), server.get_port())
rdzv = EtcdRendezvous(
client=client,
prefix="test",
run_id=1,
num_min_workers=1,
num_max_workers=1,
timeout=60,
last_call_timeout=30,
)
rdzv_handler = EtcdRendezvousHandler(rdzv)
store, rank, world_size = rdzv_handler.next_rendezvous()
self.assertIsNotNone(store)
self.assertEqual(0, rank)
self.assertEqual(1, world_size)
|
pytorch-master
|
test/distributed/elastic/rendezvous/etcd_server_test.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import uuid
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_rendezvous import create_rdzv_handler
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
if os.getenv("CIRCLECI"):
print("T85992919 temporarily disabling in circle ci", file=sys.stderr)
sys.exit(0)
class EtcdRendezvousTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
def test_etcd_rdzv_basic_params(self):
"""
Check that we can create the handler with a minimum set of
params
"""
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint=f"{self._etcd_server.get_endpoint()}",
run_id=f"{uuid.uuid4()}",
min_nodes=1,
max_nodes=1,
)
etcd_rdzv = create_rdzv_handler(rdzv_params)
self.assertIsNotNone(etcd_rdzv)
def test_etcd_rdzv_additional_params(self):
run_id = str(uuid.uuid4())
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint=f"{self._etcd_server.get_endpoint()}",
run_id=run_id,
min_nodes=1,
max_nodes=1,
timeout=60,
last_call_timeout=30,
protocol="http",
)
etcd_rdzv = create_rdzv_handler(rdzv_params)
self.assertIsNotNone(etcd_rdzv)
self.assertEqual(run_id, etcd_rdzv.get_run_id())
def test_get_backend(self):
run_id = str(uuid.uuid4())
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint=f"{self._etcd_server.get_endpoint()}",
run_id=run_id,
min_nodes=1,
max_nodes=1,
timeout=60,
last_call_timeout=30,
protocol="http",
)
etcd_rdzv = create_rdzv_handler(rdzv_params)
self.assertEqual("etcd", etcd_rdzv.get_backend())
|
pytorch-master
|
test/distributed/elastic/rendezvous/etcd_rendezvous_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
pytorch-master
|
test/distributed/elastic/rendezvous/__init__.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import os
import pickle
import socket
import threading
import time
from abc import ABC, abstractmethod
from base64 import b64encode
from datetime import datetime, timedelta
from typing import Callable, Optional, Tuple, cast
from unittest import TestCase
from unittest.mock import MagicMock, Mock, call, patch
from torch.distributed import Store
from torch.distributed.elastic.rendezvous import (
RendezvousClosedError,
RendezvousError,
RendezvousParameters,
RendezvousStateError,
RendezvousTimeoutError,
)
from torch.distributed.elastic.rendezvous.dynamic_rendezvous import (
DynamicRendezvousHandler,
RendezvousBackend,
RendezvousSettings,
RendezvousTimeout,
Token,
_Action,
_BackendRendezvousStateHolder,
_DistributedRendezvousOpExecutor,
_NodeDesc,
_NodeDescGenerator,
_RendezvousCloseOp,
_RendezvousContext,
_RendezvousExitOp,
_RendezvousJoinOp,
_RendezvousKeepAliveOp,
_RendezvousState,
_RendezvousStateHolder,
create_handler,
)
class CustomAssertMixin:
assertDictEqual: Callable
def assert_state_equal(self, actual: _RendezvousState, expected: _RendezvousState) -> None:
self.assertDictEqual(vars(actual), vars(expected))
def assert_state_empty(self, actual: _RendezvousState) -> None:
self.assertDictEqual(vars(actual), vars(_RendezvousState()))
class RendezvousTimeoutTest(TestCase):
def test_init_initializes_timeout(self) -> None:
timeout = RendezvousTimeout(
timedelta(seconds=50),
timedelta(seconds=60),
timedelta(seconds=70),
timedelta(seconds=80),
)
self.assertEqual(timeout.join, timedelta(seconds=50))
self.assertEqual(timeout.last_call, timedelta(seconds=60))
self.assertEqual(timeout.close, timedelta(seconds=70))
self.assertEqual(timeout.heartbeat, timedelta(seconds=80))
def test_init_initializes_timeout_if_no_timeout_is_specified(self) -> None:
timeout = RendezvousTimeout()
self.assertEqual(timeout.join, timedelta(seconds=600))
self.assertEqual(timeout.last_call, timedelta(seconds=30))
self.assertEqual(timeout.close, timedelta(seconds=30))
self.assertEqual(timeout.heartbeat, timedelta(seconds=5))
def test_init_raises_error_if_timeout_is_not_positive(self) -> None:
join_timeouts = [timedelta(seconds=0), timedelta(seconds=-1)]
for join_timeout in join_timeouts:
with self.subTest(join_timeout=join_timeout):
with self.assertRaisesRegex(
ValueError, rf"^The join timeout \({join_timeout}\) must be positive.$"
):
timeout = RendezvousTimeout(join_timeout)
class NodeDescTest(TestCase):
def test_repr(self) -> None:
desc = _NodeDesc("dummy_fqdn", 3, 5)
self.assertEqual(repr(desc), "dummy_fqdn_3_5")
def test_hash(self) -> None:
desc1 = _NodeDesc("dummy_fqdn", 2, 4)
desc2 = _NodeDesc("dummy_fqdn", 3, 5)
descs = {desc1, desc2}
self.assertIn(desc1, descs)
self.assertIn(desc2, descs)
class NodeDescGeneratorTest(TestCase):
def test_generate(self) -> None:
desc_generator = _NodeDescGenerator()
fqdn = socket.getfqdn()
pid = os.getpid()
for local_id in range(4):
with self.subTest(fqdn=fqdn, pid=pid, local_id=local_id):
desc = desc_generator.generate()
self.assertEqual(repr(desc), f"{fqdn}_{pid}_{local_id}")
class RendezvousStateTest(TestCase):
def test_encoded_size_is_within_expected_limit(self) -> None:
state = _RendezvousState()
state.round = 1
state.complete = True
state.deadline = datetime.utcnow()
state.closed = True
# fmt: off
expected_max_sizes = (
( 5, 2 * (2 ** 10),), # 10 machines <= 2KB # noqa: E201, E241, E262
( 50, 16 * (2 ** 10),), # 100 machines <= 16KB # noqa: E201, E241, E262
( 500, 160 * (2 ** 10),), # 1000 machines <= 160KB # noqa: E201, E241, E262
(5000, 1600 * (2 ** 10),), # 10000 machines <= 1.6MB # noqa: E201, E241, E262
)
# fmt: on
for num_nodes, max_byte_size in expected_max_sizes:
with self.subTest(num_nodes=num_nodes, max_byte_size=max_byte_size):
for i in range(num_nodes):
node_running = _NodeDesc(f"dummy{i}.dummy1-dummy1-dummy1-dummy1.com", 12345, i)
node_waiting = _NodeDesc(f"dummy{i}.dummy2-dummy2-dummy2-dummy2.com", 67890, i)
state.participants[node_running] = i
state.wait_list.add(node_waiting)
state.last_heartbeats[node_running] = datetime.utcnow()
state.last_heartbeats[node_waiting] = datetime.utcnow()
bits = pickle.dumps(state)
base64_bits = b64encode(bits)
self.assertLessEqual(len(base64_bits), max_byte_size)
class FakeRendezvousBackend(RendezvousBackend):
_state: Optional[bytes]
_token: int
def __init__(self) -> None:
self._state = None
self._token = 0
@property
def name(self) -> str:
return "fake_backend"
def get_state(self) -> Optional[Tuple[bytes, Token]]:
if self._token == 0:
return None
return self._state, self._token # type: ignore[return-value]
def set_state(
self, state: bytes, token: Optional[Token] = None
) -> Optional[Tuple[bytes, Token, bool]]:
if token is None:
token = 0
if token == self._token:
self._state = state
self._token += 1
has_set = True
else:
has_set = False
return self._state, self._token, has_set # type: ignore[return-value]
def get_state_internal(self) -> _RendezvousState:
return pickle.loads(cast(bytes, self._state))
def set_state_internal(self, state: _RendezvousState) -> None:
self._state = pickle.dumps(state)
self._token += 1
def corrupt_state(self) -> None:
self._state = b"corrupt_state"
self._token += 1
class BackendRendezvousStateHolderTest(TestCase, CustomAssertMixin):
def setUp(self) -> None:
self._backend = FakeRendezvousBackend()
mock_get_state = MagicMock(wraps=self._backend.get_state)
mock_set_state = MagicMock(wraps=self._backend.set_state)
self._mock_backend = Mock()
self._mock_backend.get_state = mock_get_state
self._mock_backend.set_state = mock_set_state
setattr(self._backend, "get_state", mock_get_state) # noqa: B010
setattr(self._backend, "set_state", mock_set_state) # noqa: B010
self._settings = RendezvousSettings(
run_id="dummy_run_id",
min_nodes=1,
max_nodes=1,
timeout=RendezvousTimeout(),
keep_alive_interval=timedelta(seconds=30),
keep_alive_max_attempt=3,
)
self._cache_duration = 0
self._now = datetime(2000, 1, 1, hour=0, minute=0)
self._datetime_patch = patch(
"torch.distributed.elastic.rendezvous.dynamic_rendezvous.datetime"
)
mock_datetime = self._datetime_patch.start()
mock_datetime.utcnow.return_value = self._now
def tearDown(self) -> None:
self._datetime_patch.stop()
def _create_state(self) -> _RendezvousState:
state = _RendezvousState()
state.round = 999
state.complete = True
state.deadline = self._now
state.closed = True
state.participants = {
_NodeDesc("dummy1", 1, 1): 0,
_NodeDesc("dummy2", 1, 1): 1,
_NodeDesc("dummy3", 1, 1): 2,
}
state.wait_list = {
_NodeDesc("dummy4", 1, 1),
_NodeDesc("dummy5", 1, 1),
}
state.last_heartbeats = {
_NodeDesc("dummy1", 1, 1): self._now,
_NodeDesc("dummy2", 1, 1): self._now - timedelta(seconds=15),
_NodeDesc("dummy3", 1, 1): self._now - timedelta(seconds=30),
_NodeDesc("dummy4", 1, 1): self._now - timedelta(seconds=60),
_NodeDesc("dummy5", 1, 1): self._now - timedelta(seconds=90),
}
return state
def _create_state_holder(self) -> _BackendRendezvousStateHolder:
return _BackendRendezvousStateHolder(self._backend, self._settings, self._cache_duration)
def test_init_initializes_state_holder(self) -> None:
state_holder = self._create_state_holder()
self.assert_state_empty(state_holder.state)
self._mock_backend.assert_not_called()
def test_sync_gets_empty_state_if_backend_state_does_not_exist(self) -> None:
state_holder = self._create_state_holder()
has_set = state_holder.sync()
self.assertIsNone(has_set)
self.assert_state_empty(state_holder.state)
self.assertEqual(self._mock_backend.get_state.call_count, 1)
self.assertEqual(self._mock_backend.set_state.call_count, 0)
def test_sync_gets_backend_state_if_local_state_is_clean(self) -> None:
state_holder = self._create_state_holder()
expected_state = self._create_state()
for attempt in range(1, 4):
with self.subTest(attempt=attempt):
expected_state.round = attempt
self._backend.set_state_internal(expected_state)
has_set = state_holder.sync()
self.assertIsNone(has_set)
self.assert_state_equal(state_holder.state, expected_state)
self.assertEqual(self._mock_backend.get_state.call_count, 1)
self.assertEqual(self._mock_backend.set_state.call_count, 0)
self._mock_backend.reset_mock()
def test_sync_gets_backend_state_if_local_state_is_old_and_dirty(self) -> None:
state_holder = self._create_state_holder()
expected_state = self._create_state()
for attempt in range(1, 4):
with self.subTest(attempt=attempt):
self._backend.set_state_internal(expected_state) # Increment token.
state_holder.state.round = attempt
state_holder.mark_dirty()
has_set = state_holder.sync()
self.assertFalse(has_set)
self.assert_state_equal(state_holder.state, expected_state)
self.assertEqual(self._mock_backend.get_state.call_count, 0)
self.assertEqual(self._mock_backend.set_state.call_count, 1)
self._mock_backend.reset_mock()
def test_sync_sets_backend_state_if_local_state_is_new_and_dirty(self) -> None:
state_holder = self._create_state_holder()
for attempt in range(1, 4):
with self.subTest(attempt=attempt):
state_holder.state.round = attempt
state_holder.mark_dirty()
has_set = state_holder.sync()
self.assertTrue(has_set)
expected_state = self._backend.get_state_internal()
self.assert_state_equal(state_holder.state, expected_state)
self.assertEqual(self._mock_backend.get_state.call_count, 0)
self.assertEqual(self._mock_backend.set_state.call_count, 1)
self._mock_backend.reset_mock()
def test_sync_uses_cached_state_if_cache_duration_is_specified(self) -> None:
state = self._create_state()
self._backend.set_state_internal(state)
with patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous.time") as mock_time:
for cache_duration in [1, 5, 10]:
with self.subTest(cache_duration=cache_duration):
self._cache_duration = cache_duration
state_holder = self._create_state_holder()
mock_time.monotonic.return_value = 5
state_holder.sync()
has_set = state_holder.sync()
self.assertIsNone(has_set)
self.assertEqual(self._mock_backend.get_state.call_count, 1)
self.assertEqual(self._mock_backend.set_state.call_count, 0)
mock_time.monotonic.return_value = 5 + self._cache_duration
state_holder.sync()
has_set = state_holder.sync()
self.assertIsNone(has_set)
self.assertEqual(self._mock_backend.get_state.call_count, 1)
self.assertEqual(self._mock_backend.set_state.call_count, 0)
self._mock_backend.get_state.reset_mock()
def test_sync_gets_backend_state_if_cached_state_has_expired(self) -> None:
state = self._create_state()
self._backend.set_state_internal(state)
with patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous.time") as mock_time:
self._cache_duration = 1
state_holder = self._create_state_holder()
mock_time.monotonic.return_value = 5
state_holder.sync()
has_set = state_holder.sync()
self.assertIsNone(has_set)
self.assertEqual(self._mock_backend.get_state.call_count, 1)
self.assertEqual(self._mock_backend.set_state.call_count, 0)
mock_time.monotonic.return_value = 5 + self._cache_duration + 0.01
state_holder.sync()
has_set = state_holder.sync()
self.assertIsNone(has_set)
self.assertEqual(self._mock_backend.get_state.call_count, 2)
self.assertEqual(self._mock_backend.set_state.call_count, 0)
def test_sync_sanitizes_state(self) -> None:
state = self._create_state()
expected_state = copy.deepcopy(state)
dead_node1 = _NodeDesc("dead1", 1, 1)
dead_node2 = _NodeDesc("dead2", 1, 1)
dead_node3 = _NodeDesc("dead3", 1, 1)
dead_node4 = _NodeDesc("dead4", 1, 1)
dead_node5 = _NodeDesc("dead5", 1, 1)
state.last_heartbeats[dead_node1] = self._now - timedelta(seconds=91)
state.last_heartbeats[dead_node2] = self._now - timedelta(seconds=100)
state.last_heartbeats[dead_node3] = self._now - timedelta(seconds=110)
state.last_heartbeats[dead_node4] = self._now - timedelta(seconds=120)
state.last_heartbeats[dead_node5] = self._now - timedelta(seconds=130)
state.participants[dead_node1] = 0
state.participants[dead_node2] = 0
state.participants[dead_node3] = 0
state.wait_list.add(dead_node4)
state.wait_list.add(dead_node5)
self._backend.set_state_internal(state)
state_holder = self._create_state_holder()
state_holder.sync()
self.assert_state_equal(state_holder.state, expected_state)
def test_sync_sanitizes_state_if_no_participants_is_left(self) -> None:
state = self._create_state()
expected_state = copy.deepcopy(state)
for node in state.last_heartbeats:
state.last_heartbeats[node] = self._now - timedelta(seconds=100)
expected_state.complete = False
expected_state.round = 1000
expected_state.participants = {}
expected_state.wait_list = set()
expected_state.last_heartbeats = {}
self._backend.set_state_internal(state)
state_holder = self._create_state_holder()
state_holder.sync()
self.assert_state_equal(state_holder.state, expected_state)
def test_sync_raises_error_if_backend_state_is_corrupt(self) -> None:
self._backend.corrupt_state()
state_holder = self._create_state_holder()
with self.assertRaisesRegex(
RendezvousStateError,
r"^The rendezvous state is corrupt. See inner exception for details.$",
):
state_holder.sync()
class FakeRendezvousStateHolder(_RendezvousStateHolder):
_state: _RendezvousState
_dirty: Optional[bool]
def __init__(self) -> None:
self._state = _RendezvousState()
self._dirty = None
@property
def state(self) -> _RendezvousState:
return self._state
@state.setter
def state(self, value) -> None:
self._state = value
def sync(self) -> Optional[bool]:
self._dirty, dirty = None, self._dirty
return dirty
def mark_dirty(self) -> None:
self._dirty = True
class DistributedRendezvousOpExecutorTest(TestCase, CustomAssertMixin):
def setUp(self) -> None:
self._node = _NodeDesc("this_node", 1, 1)
self._state_holder = FakeRendezvousStateHolder()
mock_sync = MagicMock(wraps=self._state_holder.sync)
mock_mark = MagicMock(wraps=self._state_holder.mark_dirty)
self._mock_state_holder = Mock()
self._mock_state_holder.sync = mock_sync
self._mock_state_holder.mark = mock_mark
setattr(self._state_holder, "sync", mock_sync) # noqa: B010
setattr(self._state_holder, "mark_dirty", mock_mark) # noqa: B010
self._state = self._state_holder.state
self._min_nodes = 1
self._max_nodes = 1
self._timeout = RendezvousTimeout()
self._now = datetime(2000, 1, 1, hour=0, minute=0)
self._datetime_patch = patch(
"torch.distributed.elastic.rendezvous.dynamic_rendezvous.datetime"
)
mock_datetime = self._datetime_patch.start()
mock_datetime.utcnow.return_value = self._now
def tearDown(self) -> None:
self._datetime_patch.stop()
def _create_settings(self) -> RendezvousSettings:
return RendezvousSettings(
run_id="dummy_run_id",
min_nodes=self._min_nodes,
max_nodes=self._max_nodes,
timeout=self._timeout,
keep_alive_interval=timedelta(seconds=30),
keep_alive_max_attempt=3,
)
def _create_op_executor(
self, settings: Optional[RendezvousSettings] = None
) -> _DistributedRendezvousOpExecutor:
self._state_holder.state = self._state
if settings is None:
settings = self._create_settings()
return _DistributedRendezvousOpExecutor(self._node, self._state_holder, settings)
def _run_action(self, action: _Action) -> None:
op_executor = self._create_op_executor()
op = MagicMock(side_effect=[action, _Action.FINISH])
op_executor.run(op, deadline=1)
def _assert_action(self, action: _Action, expected_state: _RendezvousState) -> None:
self._run_action(action)
self.assert_state_equal(self._state, expected_state)
self.assertListEqual(
self._mock_state_holder.mock_calls, [call.sync(), call.mark(), call.sync()]
)
def test_run_passes_expected_context_and_deadline_to_state_handler(self) -> None:
settings = self._create_settings()
op_executor = self._create_op_executor(settings)
op = MagicMock(return_value=_Action.FINISH)
op_executor.run(op, deadline=3)
ctx, deadline = op.call_args[0] # args
self.assertIs(ctx.node, self._node)
self.assertIs(ctx.state, self._state)
self.assertIs(ctx.settings, settings)
self.assertEqual(deadline, 3)
def test_run_keeps_alive(self) -> None:
expected_state = _RendezvousState()
expected_state.last_heartbeats[self._node] = self._now
self._assert_action(_Action.KEEP_ALIVE, expected_state)
def test_run_adds_to_participants(self) -> None:
expected_state = _RendezvousState()
expected_state.participants[self._node] = 0
expected_state.last_heartbeats[self._node] = self._now
self._min_nodes = 2
self._max_nodes = 2
self._assert_action(_Action.ADD_TO_PARTICIPANTS, expected_state)
def test_run_adds_to_participants_if_node_was_in_waitlist(self) -> None:
self._state.wait_list.add(self._node)
expected_state = _RendezvousState()
expected_state.participants[self._node] = 0
expected_state.last_heartbeats[self._node] = self._now
self._min_nodes = 2
self._max_nodes = 2
self._assert_action(_Action.ADD_TO_PARTICIPANTS, expected_state)
def _add_participants(
self, num_participants: int, state: _RendezvousState, ranked: bool = False
) -> None:
for i in range(num_participants):
if ranked:
node = _NodeDesc(f"dummy{i}", 1, 1)
rank = i
else:
node = _NodeDesc(f"dummy{num_participants - i - 1}", 1, 1) # Add in reverse.
rank = 0
state.participants[node] = rank
state.last_heartbeats[node] = self._now
def test_run_adds_to_participants_and_starts_last_call_if_min_nodes_is_reached(self) -> None:
for num_participants in range(3):
self._state = _RendezvousState()
self._add_participants(num_participants, self._state)
self._state.wait_list.add(self._node)
expected_state = _RendezvousState()
self._add_participants(num_participants, expected_state)
expected_state.participants[self._node] = 0
expected_state.last_heartbeats[self._node] = self._now
expected_state.deadline = self._now + self._timeout.last_call
with self.subTest(num_participants=num_participants):
self._min_nodes = num_participants + 1
self._max_nodes = num_participants + 2
self._assert_action(_Action.ADD_TO_PARTICIPANTS, expected_state)
self._mock_state_holder.reset_mock()
def test_run_adds_to_participants_and_completes_rendezvous_if_max_nodes_is_reached(
self,
) -> None:
for min_max_nodes_equal in [False, True]:
for num_participants in range(3):
rank = num_participants
self._state = _RendezvousState()
self._add_participants(num_participants, self._state)
self._state.wait_list.add(self._node)
self._state.deadline = self._now + self._timeout.last_call
expected_state = _RendezvousState()
self._add_participants(num_participants, expected_state, ranked=True)
expected_state.participants[self._node] = rank
expected_state.last_heartbeats[self._node] = self._now
expected_state.complete = True
expected_state.deadline = None
with self.subTest(num_participants=num_participants):
self._min_nodes = num_participants + 1 if min_max_nodes_equal else 0
self._max_nodes = num_participants + 1
self._assert_action(_Action.ADD_TO_PARTICIPANTS, expected_state)
self._mock_state_holder.reset_mock()
def test_run_adds_to_waitlist(self) -> None:
expected_state = _RendezvousState()
expected_state.wait_list.add(self._node)
expected_state.last_heartbeats[self._node] = self._now
self._assert_action(_Action.ADD_TO_WAIT_LIST, expected_state)
def test_run_removes_from_participants(self) -> None:
for complete, last_call_deadline in [(False, self._now), (True, None)]:
self._state = _RendezvousState()
self._add_participants(2, self._state)
self._state.participants[self._node] = 0
self._state.last_heartbeats[self._node] = self._now
self._state.complete = complete
self._state.deadline = last_call_deadline
self._state.round = 1
expected_state = _RendezvousState()
self._add_participants(2, expected_state)
expected_state.complete = complete
expected_state.deadline = last_call_deadline
expected_state.round = 1
with self.subTest(complete=complete):
self._assert_action(_Action.REMOVE_FROM_PARTICIPANTS, expected_state)
self._mock_state_holder.reset_mock()
def test_run_removes_from_participants_and_moves_to_next_round_if_node_is_last_participant(
self,
) -> None:
self._state.participants[self._node] = 0
self._state.last_heartbeats[self._node] = self._now
self._state.complete = True
self._state.round = 1
expected_state = _RendezvousState()
expected_state.complete = False
expected_state.round = 2
self._assert_action(_Action.REMOVE_FROM_PARTICIPANTS, expected_state)
def test_run_removes_from_participants_and_clears_last_call_if_rendezvous_has_less_than_min_nodes(
self,
) -> None:
self._add_participants(2, self._state)
self._state.participants[self._node] = 0
self._state.last_heartbeats[self._node] = self._now
self._state.deadline = self._now
expected_state = _RendezvousState()
self._add_participants(2, expected_state)
self._min_nodes = 3
self._max_nodes = 4
self._assert_action(_Action.REMOVE_FROM_PARTICIPANTS, expected_state)
def test_run_removes_from_waitlist(self) -> None:
self._state.wait_list.add(self._node)
self._state.last_heartbeats[self._node] = self._now
expected_state = _RendezvousState()
self._assert_action(_Action.REMOVE_FROM_WAIT_LIST, expected_state)
def test_run_marks_rendezvous_closed(self) -> None:
expected_state = _RendezvousState()
expected_state.closed = True
self._assert_action(_Action.MARK_RENDEZVOUS_CLOSED, expected_state)
def test_run_raises_error_if_rendezvous_is_closed(self) -> None:
with self.assertRaises(RendezvousClosedError):
self._run_action(_Action.ERROR_CLOSED)
self.assertListEqual(self._mock_state_holder.mock_calls, [call.sync()])
def test_run_raises_error_if_operation_timed_out(self) -> None:
with self.assertRaises(RendezvousTimeoutError):
self._run_action(_Action.ERROR_TIMEOUT)
self.assertListEqual(self._mock_state_holder.mock_calls, [call.sync()])
def test_run_delays_execution_if_sync_requested(self) -> None:
with patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous._delay") as mock_delay:
self._run_action(_Action.SYNC)
mock_delay.assert_called_once_with(seconds=1)
self.assertListEqual(self._mock_state_holder.mock_calls, [call.sync(), call.sync()])
class AbstractTestRendezvousOp(ABC):
assertEqual: Callable
def setUp(self) -> None:
self._node = _NodeDesc("this_node", 1, 1)
self._min_nodes = 1
self._max_nodes = 2
self._keep_alive_interval = timedelta(seconds=30)
self._state = _RendezvousState()
self._state.participants[_NodeDesc("dummy1", 1, 1)] = 1
self._now = datetime(2000, 1, 1, hour=0, minute=0)
self._deadline = 10
self._datetime_patch = patch(
"torch.distributed.elastic.rendezvous.dynamic_rendezvous.datetime"
)
mock_datetime = self._datetime_patch.start()
mock_datetime.utcnow.return_value = self._now
self._time_patch = patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous.time")
mock_time = self._time_patch.start()
mock_time.monotonic.return_value = self._deadline
def tearDown(self) -> None:
self._time_patch.stop()
self._datetime_patch.stop()
def _get_next_action(self) -> _Action:
op = self._create_op()
settings = RendezvousSettings(
run_id="dummy_run_id",
min_nodes=self._min_nodes,
max_nodes=self._max_nodes,
timeout=RendezvousTimeout(),
keep_alive_interval=self._keep_alive_interval,
keep_alive_max_attempt=3,
)
ctx = _RendezvousContext(self._node, self._state, settings)
return op(ctx, self._deadline)
@abstractmethod
def _create_op(self) -> Callable:
pass
def _assert_action(self, expected_action) -> None:
action = self._get_next_action()
self.assertEqual(action, expected_action)
class TestRendezvousExitOp(AbstractTestRendezvousOp, TestCase):
def _create_op(self) -> Callable:
return _RendezvousExitOp()
def test_removes_from_participants_if_node_is_participant(self) -> None:
self._state.participants[self._node] = 1
self._assert_action(_Action.REMOVE_FROM_PARTICIPANTS)
def test_raises_timeout_if_deadline_exceeded(self) -> None:
self._deadline = 0
self._state.participants[self._node] = 1
self._assert_action(_Action.ERROR_TIMEOUT)
def test_finishes_if_node_is_not_participant(self) -> None:
self._assert_action(_Action.FINISH)
class TestRendezvousJoinOp(AbstractTestRendezvousOp, TestCase):
def _create_op(self) -> Callable:
return _RendezvousJoinOp()
def test_raises_closed_if_rendezvous_is_closed(self) -> None:
self._state.closed = True
self._assert_action(_Action.ERROR_CLOSED)
def test_finishes_if_rendezvous_is_complete_and_node_is_participant(self) -> None:
self._state.participants[self._node] = 0
self._state.complete = True
self._assert_action(_Action.FINISH)
def _assert_waits_rendezvous_completion(self) -> None:
keep_alive_time = self._now - self._keep_alive_interval
for delta, expected_action in [
(timedelta(seconds=0), _Action.KEEP_ALIVE),
(timedelta(seconds=1), _Action.SYNC),
]:
self._state.last_heartbeats[self._node] = keep_alive_time + delta
self._assert_action(expected_action)
def test_waits_next_round_if_rendezvous_is_complete(self) -> None:
self._max_nodes = 1
self._state.complete = True
self._assert_waits_rendezvous_completion()
def test_waits_next_round_if_rendezvous_is_complete_and_node_is_in_wait_list(self) -> None:
self._state.wait_list.add(self._node)
self._state.complete = True
self._assert_waits_rendezvous_completion()
def test_adds_to_wait_list_if_rendezvous_is_complete_and_num_nodes_is_less_than_max_nodes(
self,
) -> None:
self._state.complete = True
self._assert_action(_Action.ADD_TO_WAIT_LIST)
def test_waits_rendezvous_to_complete_if_node_is_participant(self) -> None:
self._max_nodes = 3
self._state.participants[self._node] = 0
self._state.deadline = self._now
self._assert_waits_rendezvous_completion()
def test_marks_rendezvous_complete_if_node_is_participant_and_last_call_deadline_exceeded(
self,
) -> None:
self._max_nodes = 3
self._state.participants[self._node] = 0
self._state.deadline = self._now - timedelta(seconds=1)
self._assert_action(_Action.MARK_RENDEZVOUS_COMPLETE)
def test_adds_to_participants(self) -> None:
self._assert_action(_Action.ADD_TO_PARTICIPANTS)
def test_raises_timeout_if_deadline_exceeded(self) -> None:
self._deadline = 0
self._assert_action(_Action.ERROR_TIMEOUT)
def test_raises_timeout_if_rollback_deadline_exceeded_and_node_is_participant(self) -> None:
self._deadline = 0
self._state.participants[self._node] = 0
self._assert_action(_Action.ERROR_TIMEOUT)
def test_raises_timeout_if_rollback_deadline_exceeded_and_node_is_in_wait_list(self) -> None:
self._deadline = 0
self._state.wait_list.add(self._node)
self._assert_action(_Action.ERROR_TIMEOUT)
def test_removes_from_participants_if_timed_out_but_rollback_deadline_is_not_reached(
self,
) -> None:
self._deadline = 5
self._state.participants[self._node] = 0
self._assert_action(_Action.REMOVE_FROM_PARTICIPANTS)
def test_removes_from_wait_list_if_timed_out_but_rollback_deadline_is_not_reached(self) -> None:
self._deadline = 5
self._state.wait_list.add(self._node)
self._assert_action(_Action.REMOVE_FROM_WAIT_LIST)
class TestRendezvousCloseOp(AbstractTestRendezvousOp, TestCase):
def _create_op(self) -> Callable:
return _RendezvousCloseOp()
def test_finishes_if_rendezvous_is_closed(self) -> None:
self._state.closed = True
self._assert_action(_Action.FINISH)
def test_raises_timeout_if_deadline_exceeded(self) -> None:
self._deadline = 0
self._assert_action(_Action.ERROR_TIMEOUT)
def test_marks_rendezvous_closed(self) -> None:
self._assert_action(_Action.MARK_RENDEZVOUS_CLOSED)
class TestRendezvousKeepAliveOp(AbstractTestRendezvousOp, TestCase):
def _create_op(self) -> Callable:
return _RendezvousKeepAliveOp()
def test_updates_keep_alive_if_needed(self) -> None:
keep_alive_time = self._now - self._keep_alive_interval
for delta in [timedelta(seconds=0), timedelta(seconds=-1)]:
with self.subTest(delta=delta):
self._state.last_heartbeats[self._node] = keep_alive_time + delta
self._assert_action(_Action.KEEP_ALIVE)
def test_raises_timeout_if_deadlined_exceeded(self) -> None:
self._deadline = 0
self._state.last_heartbeats[self._node] = self._now - self._keep_alive_interval
self._assert_action(_Action.ERROR_TIMEOUT)
def test_finishes_if_no_keep_alive_update_is_needed(self) -> None:
delta = timedelta(seconds=1)
self._state.last_heartbeats[self._node] = self._now - self._keep_alive_interval + delta
self._assert_action(_Action.FINISH)
class DummyStore(Store):
pass
class DynamicRendezvousHandlerTest(TestCase):
def setUp(self) -> None:
self._node = _NodeDesc("this_node", 1, 1)
self._min_nodes = 1
self._max_nodes = 1
self._join_timeout: Optional[timedelta] = None
self._close_timeout: Optional[timedelta] = None
self._heartbeat_timeout: Optional[timedelta] = None
self._keep_alive_interval = timedelta(seconds=30)
self._store = DummyStore()
self._mock_store_get = MagicMock(return_value=b"dummy_value")
setattr(self._store, "get", self._mock_store_get) # noqa: B010
self._state_holder = FakeRendezvousStateHolder()
self._mock_sync = MagicMock(wraps=self._state_holder.sync)
setattr(self._state_holder, "sync", self._mock_sync) # noqa: B010
self._state = self._state_holder.state
def _create_handler(self) -> DynamicRendezvousHandler:
settings = RendezvousSettings(
run_id="dummy_run_id",
min_nodes=self._min_nodes,
max_nodes=self._max_nodes,
timeout=RendezvousTimeout(
join=self._join_timeout,
close=self._close_timeout,
heartbeat=self._heartbeat_timeout,
),
keep_alive_interval=self._keep_alive_interval,
keep_alive_max_attempt=3,
)
self._state_holder.state = self._state
return DynamicRendezvousHandler(
self._node, settings, "dummy_backend", self._store, self._state_holder
)
@patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous._delay")
def test_next_rendezvous_skews_the_first_join_attempt(self, mock_delay) -> None:
for round, expected_call_count in [(0, True), (1, False)]:
with self.subTest(round=round):
self._state.round = round
handler = self._create_handler()
handler.next_rendezvous()
self.assertEqual(mock_delay.call_count, expected_call_count)
mock_delay.reset_mock()
def test_next_rendezvous_returns_expected_value(self) -> None:
self._state.participants[_NodeDesc("dummy1", 1, 1)] = 0
self._state.participants[_NodeDesc("dummy2", 1, 1)] = 0
self._max_nodes = 3
handler = self._create_handler()
store, rank, world_size = handler.next_rendezvous()
self.assertEqual(rank, 2)
self.assertEqual(world_size, 3)
_ = store.get("dummy_key")
self._mock_store_get.assert_called_once_with("torch.rendezvous.dummy_run_id.0/dummy_key")
def test_next_rendezvous_respects_the_requested_timeout(self) -> None:
self._mock_sync.side_effect = lambda: time.sleep(0.3)
self._join_timeout = timedelta(seconds=0.2)
handler = self._create_handler()
with self.assertRaises(RendezvousTimeoutError):
handler.next_rendezvous()
def test_next_rendezvous_moves_to_next_round_if_called_repeatedly(self) -> None:
handler = self._create_handler()
for i in range(4):
handler.next_rendezvous()
self.assertEqual(self._state.round, i)
def test_is_closed_returns_expected_value(self) -> None:
for closed in [False, True]:
with self.subTest(closed=closed):
self._state.closed = closed
handler = self._create_handler()
self.assertEqual(handler.is_closed(), closed)
self._mock_sync.assert_called_once()
self._mock_sync.reset_mock()
@patch("torch.distributed.elastic.events.record_rdzv_event")
def test_is_closed_records_and_raises_exceptions(self, record_mock) -> None:
self._mock_sync.side_effect = RendezvousError("test error")
handler = self._create_handler()
with self.assertRaises(RendezvousError):
handler.is_closed()
record_mock.assert_called_once()
def test_set_closed_closes_rendezvous(self) -> None:
handler = self._create_handler()
handler.set_closed()
self.assertTrue(self._state.closed)
def test_set_closed_respects_the_requested_timeout(self) -> None:
self._mock_sync.side_effect = lambda: time.sleep(0.3)
self._close_timeout = timedelta(seconds=0.2)
handler = self._create_handler()
with self.assertRaises(RendezvousTimeoutError):
handler.set_closed()
def test_set_closed_can_be_called_multiple_times(self) -> None:
handler = self._create_handler()
handler.set_closed()
handler.set_closed()
self.assertTrue(self._state.closed)
@patch("torch.distributed.elastic.events.record_rdzv_event")
def test_set_closed_records_and_raises_exceptions(self, record_mock) -> None:
with patch.object(DynamicRendezvousHandler, "_close") as close_mock:
close_mock.side_effect = RendezvousError("test error")
handler = self._create_handler()
with self.assertRaises(RendezvousError):
handler.set_closed()
record_mock.assert_called_once()
def test_num_nodes_waiting_returns_expected_value(self) -> None:
self._state.wait_list.add(_NodeDesc("dummy1", 1, 1))
self._state.wait_list.add(_NodeDesc("dummy2", 1, 1))
handler = self._create_handler()
self.assertEqual(handler.num_nodes_waiting(), 2)
self._mock_sync.assert_called_once()
@patch("torch.distributed.elastic.events.record_rdzv_event")
def test_num_nodes_waiting_records_and_raises_exceptions(self, record_mock) -> None:
self._mock_sync.side_effect = RendezvousError("test error")
handler = self._create_handler()
with self.assertRaises(RendezvousError):
handler.num_nodes_waiting()
record_mock.assert_called_once()
def test_shutdown_closes_rendezvous_and_returns_true(self) -> None:
handler = self._create_handler()
result = handler.shutdown()
self.assertTrue(result)
self.assertTrue(self._state.closed)
def test_shutdown_returns_false_if_rendezvous_cannot_be_closed(self) -> None:
self._mock_sync.side_effect = [RendezvousError]
handler = self._create_handler()
result = handler.shutdown()
self.assertFalse(result)
def test_shutdown_can_be_called_multiple_times(self) -> None:
handler = self._create_handler()
handler.shutdown()
handler.shutdown()
self.assertTrue(self._state.closed)
@patch("torch.distributed.elastic.events.record_rdzv_event")
def test_shutdown_records_and_raises_exceptions(self, record_mock) -> None:
with patch.object(DynamicRendezvousHandler, "_close") as close_mock:
close_mock.side_effect = RuntimeError("test error")
handler = self._create_handler()
with self.assertRaises(RuntimeError):
handler.shutdown()
record_mock.assert_called_once()
@patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous.datetime")
def test_keep_alive_updates_last_heartbeat(self, mock_datetime) -> None:
now = datetime(2000, 1, 1, hour=0, minute=0)
mock_datetime.utcnow.return_value = now
self._state.last_heartbeats[self._node] = now - (self._keep_alive_interval * 2)
handler = self._create_handler()
handler._keep_alive()
self.assertEqual(self._state.last_heartbeats[self._node], now)
def _assert_keep_alive_swallows_rendezvous_errors(self) -> None:
last_heartbeat_time = datetime.utcnow() - (self._keep_alive_interval * 2)
self._state.last_heartbeats[self._node] = last_heartbeat_time
handler = self._create_handler()
handler._keep_alive()
self.assertEqual(self._state.last_heartbeats[self._node], last_heartbeat_time)
def test_keep_alive_swallows_rendezvous_errors(self) -> None:
self._mock_sync.side_effect = [RendezvousError]
self._assert_keep_alive_swallows_rendezvous_errors()
def test_keep_alive_respects_the_requested_timeout(self) -> None:
self._mock_sync.side_effect = lambda: time.sleep(0.3)
self._heartbeat_timeout = timedelta(seconds=0.2)
self._assert_keep_alive_swallows_rendezvous_errors()
def test_keep_alive_thread_is_started_with_next_rendezvous_and_stopped_with_shutdown(
self,
) -> None:
self._node = _NodeDesc("this_node", 1, 2)
name = "RendezvousKeepAliveTimer_2"
handler = self._create_handler()
self.assertTrue(all(t.name != name for t in threading.enumerate()))
handler.next_rendezvous()
self.assertTrue(any(t.name == name for t in threading.enumerate()))
handler.shutdown()
self.assertTrue(all(t.name != name for t in threading.enumerate()))
def test_keep_alive_thread_is_started_with_next_rendezvous_and_stopped_with_finalizer(
self,
) -> None:
self._node = _NodeDesc("this_node", 1, 3)
name = "RendezvousKeepAliveTimer_3"
handler = self._create_handler()
self.assertTrue(all(t.name != name for t in threading.enumerate()))
handler.next_rendezvous()
self.assertTrue(any(t.name == name for t in threading.enumerate()))
del handler
self.assertTrue(all(t.name != name for t in threading.enumerate()))
class DummyRendezvousBackend(RendezvousBackend):
@property
def name(self):
return "dummy_backend"
def get_state(self):
return None
def set_state(self, state, token):
return None
class DynamicRendezvousHandlerFromBackendTest(TestCase):
def setUp(self) -> None:
self._run_id = "dummy_run_id"
self._store = DummyStore()
self._backend = DummyRendezvousBackend()
self._min_nodes = 3
self._max_nodes = 6
self._timeout: Optional[RendezvousTimeout] = RendezvousTimeout()
def _create_handler(self) -> DynamicRendezvousHandler:
return DynamicRendezvousHandler.from_backend(
run_id=self._run_id,
store=self._store,
backend=self._backend,
min_nodes=self._min_nodes,
max_nodes=self._max_nodes,
timeout=self._timeout,
)
def test_init_initializes_handler(self) -> None:
handler = self._create_handler()
self.assertEqual(handler.get_backend(), self._backend.name)
self.assertEqual(handler.get_run_id(), self._run_id)
self.assertEqual(handler.settings.run_id, self._run_id)
self.assertEqual(handler.settings.min_nodes, self._min_nodes)
self.assertEqual(handler.settings.max_nodes, self._max_nodes)
if self._timeout is None:
self.assertIsNotNone(handler.settings.timeout)
else:
self.assertIs(handler.settings.timeout, self._timeout)
def test_init_initializes_handler_if_timeout_is_not_specified(self) -> None:
self._timeout = None
self.test_init_initializes_handler()
def test_init_initializes_handler_if_min_and_max_nodes_are_equal(self) -> None:
self._min_nodes = 3
self._max_nodes = 3
self.test_init_initializes_handler()
def test_init_raises_error_if_min_nodes_is_not_positive(self) -> None:
for num in [0, -10]:
with self.subTest(min_nodes=num):
self._min_nodes = num
with self.assertRaisesRegex(
ValueError,
rf"^The minimum number of nodes \({num}\) must be greater than zero.$",
):
self._create_handler()
def test_init_raises_error_if_max_nodes_is_less_than_min(self) -> None:
self._min_nodes = 3
self._max_nodes = 2
with self.assertRaisesRegex(
ValueError,
rf"^The maximum number of nodes \({self._max_nodes}\) must be greater than or equal to "
"the minimum number of nodes "
rf"\({self._min_nodes}\).$",
):
self._create_handler()
class CreateHandlerTest(TestCase):
def setUp(self) -> None:
self._store = DummyStore()
self._backend = DummyRendezvousBackend()
self._params = RendezvousParameters(
backend=self._backend.name,
endpoint="dummy_endpoint",
run_id="dummy_run_id",
min_nodes=3,
max_nodes=6,
join_timeout="50",
last_call_timeout="60",
close_timeout="70",
)
self._expected_timeout = RendezvousTimeout(
timedelta(seconds=50), timedelta(seconds=60), timedelta(seconds=70)
)
def test_create_handler_returns_handler(self) -> None:
handler = create_handler(self._store, self._backend, self._params)
self.assertEqual(handler.get_backend(), self._backend.name)
self.assertEqual(handler.get_run_id(), self._params.run_id)
self.assertEqual(handler.settings.min_nodes, self._params.min_nodes)
self.assertEqual(handler.settings.max_nodes, self._params.max_nodes)
self.assertEqual(handler.settings.timeout.join, self._expected_timeout.join)
self.assertEqual(handler.settings.timeout.last_call, self._expected_timeout.last_call)
self.assertEqual(handler.settings.timeout.close, self._expected_timeout.close)
def test_create_handler_returns_handler_if_timeout_is_not_specified(self) -> None:
del self._params.config["join_timeout"]
del self._params.config["last_call_timeout"]
del self._params.config["close_timeout"]
self._expected_timeout = RendezvousTimeout()
self.test_create_handler_returns_handler()
@patch("torch.distributed.elastic.events.record_rdzv_event")
def test_create_handler_records_and_raises_exceptions(self, record_mock) -> None:
with patch.object(DynamicRendezvousHandler, "from_backend") as from_mock:
from_mock.side_effect = RendezvousError("test error")
with self.assertRaises(RendezvousError):
create_handler(self._store, self._backend, self._params)
record_mock.assert_called_once()
|
pytorch-master
|
test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Any, Callable, Optional, Tuple, cast
from torch.distributed.elastic.rendezvous import RendezvousStateError
from torch.distributed.elastic.rendezvous.dynamic_rendezvous import RendezvousBackend, Token
class RendezvousBackendTestMixin(ABC):
_backend: RendezvousBackend
# Type hints
assertEqual: Callable
assertNotEqual: Callable
assertIsNone: Callable
assertIsNotNone: Callable
assertRaises: Callable
@abstractmethod
def _corrupt_state(self) -> None:
"""Corrupts the state stored in the backend."""
pass
def _set_state(self, state: bytes, token: Optional[Any] = None) -> Tuple[bytes, Token, bool]:
result = self._backend.set_state(state, token)
self.assertIsNotNone(result)
return cast(Tuple[bytes, Token, bool], result)
def test_get_state_returns_backend_state(self) -> None:
self._backend.set_state(b"x")
result = self._backend.get_state()
self.assertIsNotNone(result)
state, token = cast(Tuple[bytes, Token], result)
self.assertEqual(b"x", state)
self.assertIsNotNone(token)
def test_get_state_returns_none_if_backend_state_does_not_exist(self) -> None:
result = self._backend.get_state()
self.assertIsNone(result)
def test_get_state_raises_error_if_backend_state_is_corrupt(self) -> None:
self._corrupt_state()
with self.assertRaises(RendezvousStateError):
self._backend.get_state()
def test_set_state_sets_backend_state_if_it_does_not_exist(self) -> None:
state, token, has_set = self._set_state(b"x")
self.assertEqual(b"x", state)
self.assertIsNotNone(token)
self.assertTrue(has_set)
def test_set_state_sets_backend_state_if_token_is_current(self) -> None:
state1, token1, has_set1 = self._set_state(b"x")
state2, token2, has_set2 = self._set_state(b"y", token1)
self.assertEqual(b"y", state2)
self.assertNotEqual(token1, token2)
self.assertTrue(has_set1)
self.assertTrue(has_set2)
def test_set_state_returns_current_backend_state_if_token_is_old(self) -> None:
state1, token1, _ = self._set_state(b"x")
state2, token2, _ = self._set_state(b"y", token1)
state3, token3, has_set = self._set_state(b"z", token1)
self.assertEqual(state2, state3)
self.assertEqual(token2, token3)
self.assertFalse(has_set)
def test_set_state_returns_current_backend_state_if_token_is_none(self) -> None:
state1, token1, _ = self._set_state(b"x")
state2, token2, has_set = self._set_state(b"y")
self.assertEqual(state1, state2)
self.assertEqual(token1, token2)
self.assertFalse(has_set)
def test_set_state_returns_current_backend_state_if_token_is_invalid(self) -> None:
state1, token1, _ = self._set_state(b"x")
state2, token2, has_set = self._set_state(b"y", token="invalid")
self.assertEqual(state1, state2)
self.assertEqual(token1, token2)
self.assertFalse(has_set)
|
pytorch-master
|
test/distributed/elastic/rendezvous/rendezvous_backend_test.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
from base64 import b64encode
from typing import ClassVar, cast
from unittest import TestCase
from etcd import EtcdKeyNotFound # type: ignore[import]
from torch.distributed.elastic.rendezvous import RendezvousConnectionError, RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_rendezvous_backend import (
EtcdRendezvousBackend,
create_backend,
)
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.rendezvous.etcd_store import EtcdStore
from rendezvous_backend_test import RendezvousBackendTestMixin
class EtcdRendezvousBackendTest(TestCase, RendezvousBackendTestMixin):
_server: ClassVar[EtcdServer]
@classmethod
def setUpClass(cls) -> None:
cls._server = EtcdServer()
cls._server.start(stderr=subprocess.DEVNULL)
@classmethod
def tearDownClass(cls) -> None:
cls._server.stop()
def setUp(self) -> None:
self._client = self._server.get_client()
# Make sure we have a clean slate.
try:
self._client.delete("/dummy_prefix", recursive=True, dir=True)
except EtcdKeyNotFound:
pass
self._backend = EtcdRendezvousBackend(self._client, "dummy_run_id", "/dummy_prefix")
def _corrupt_state(self) -> None:
self._client.write("/dummy_prefix/dummy_run_id", "non_base64")
class CreateBackendTest(TestCase):
_server: ClassVar[EtcdServer]
@classmethod
def setUpClass(cls) -> None:
cls._server = EtcdServer()
cls._server.start(stderr=subprocess.DEVNULL)
@classmethod
def tearDownClass(cls) -> None:
cls._server.stop()
def setUp(self) -> None:
self._params = RendezvousParameters(
backend="dummy_backend",
endpoint=self._server.get_endpoint(),
run_id="dummy_run_id",
min_nodes=1,
max_nodes=1,
protocol="hTTp",
read_timeout="10",
)
self._expected_read_timeout = 10
def test_create_backend_returns_backend(self) -> None:
backend, store = create_backend(self._params)
self.assertEqual(backend.name, "etcd-v2")
self.assertIsInstance(store, EtcdStore)
etcd_store = cast(EtcdStore, store)
self.assertEqual(etcd_store.client.read_timeout, self._expected_read_timeout) # type: ignore[attr-defined]
client = self._server.get_client()
backend.set_state(b"dummy_state")
result = client.get("/torch/elastic/rendezvous/" + self._params.run_id)
self.assertEqual(result.value, b64encode(b"dummy_state").decode())
self.assertLessEqual(result.ttl, 7200)
store.set("dummy_key", "dummy_value")
result = client.get("/torch/elastic/store/" + b64encode(b"dummy_key").decode())
self.assertEqual(result.value, b64encode(b"dummy_value").decode())
def test_create_backend_returns_backend_if_protocol_is_not_specified(self) -> None:
del self._params.config["protocol"]
self.test_create_backend_returns_backend()
def test_create_backend_returns_backend_if_read_timeout_is_not_specified(self) -> None:
del self._params.config["read_timeout"]
self._expected_read_timeout = 60
self.test_create_backend_returns_backend()
def test_create_backend_raises_error_if_etcd_is_unreachable(self) -> None:
self._params.endpoint = "dummy:1234"
with self.assertRaisesRegex(
RendezvousConnectionError,
r"^The connection to etcd has failed. See inner exception for details.$",
):
create_backend(self._params)
def test_create_backend_raises_error_if_protocol_is_invalid(self) -> None:
self._params.config["protocol"] = "dummy"
with self.assertRaisesRegex(ValueError, r"^The protocol must be HTTP or HTTPS.$"):
create_backend(self._params)
def test_create_backend_raises_error_if_read_timeout_is_invalid(self) -> None:
for read_timeout in ["0", "-10"]:
with self.subTest(read_timeout=read_timeout):
self._params.config["read_timeout"] = read_timeout
with self.assertRaisesRegex(
ValueError, r"^The read timeout must be a positive integer.$"
):
create_backend(self._params)
|
pytorch-master
|
test/distributed/elastic/rendezvous/etcd_rendezvous_backend_test.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, SupportsInt, Tuple, cast
from unittest import TestCase
from torch.distributed import Store
from torch.distributed.elastic.rendezvous import (
RendezvousHandler,
RendezvousHandlerRegistry,
RendezvousParameters,
)
class RendezvousParametersTest(TestCase):
def setUp(self) -> None:
self._backend = "dummy_backend"
self._endpoint = "dummy_endpoint"
self._run_id = "dummy_run_id"
self._min_nodes = 3
self._max_nodes = 6
self._kwargs: Dict[str, Any] = {}
def _create_params(self) -> RendezvousParameters:
return RendezvousParameters(
backend=self._backend,
endpoint=self._endpoint,
run_id=self._run_id,
min_nodes=self._min_nodes,
max_nodes=self._max_nodes,
**self._kwargs,
)
def test_init_initializes_params(self) -> None:
self._kwargs["dummy_param"] = "x"
params = self._create_params()
self.assertEqual(params.backend, self._backend)
self.assertEqual(params.endpoint, self._endpoint)
self.assertEqual(params.run_id, self._run_id)
self.assertEqual(params.min_nodes, self._min_nodes)
self.assertEqual(params.max_nodes, self._max_nodes)
self.assertEqual(params.get("dummy_param"), "x")
def test_init_initializes_params_if_min_nodes_equals_to_1(self) -> None:
self._min_nodes = 1
params = self._create_params()
self.assertEqual(params.min_nodes, self._min_nodes)
self.assertEqual(params.max_nodes, self._max_nodes)
def test_init_initializes_params_if_min_and_max_nodes_are_equal(self) -> None:
self._max_nodes = 3
params = self._create_params()
self.assertEqual(params.min_nodes, self._min_nodes)
self.assertEqual(params.max_nodes, self._max_nodes)
def test_init_raises_error_if_backend_is_none_or_empty(self) -> None:
for backend in [None, ""]:
with self.subTest(backend=backend):
self._backend = backend # type: ignore[assignment]
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous backend name must be a non-empty string.$",
):
self._create_params()
def test_init_raises_error_if_min_nodes_is_less_than_1(self) -> None:
for min_nodes in [0, -1, -5]:
with self.subTest(min_nodes=min_nodes):
self._min_nodes = min_nodes
with self.assertRaisesRegex(
ValueError,
rf"^The minimum number of rendezvous nodes \({min_nodes}\) must be greater "
rf"than zero.$",
):
self._create_params()
def test_init_raises_error_if_max_nodes_is_less_than_min_nodes(self) -> None:
for max_nodes in [2, 1, -2]:
with self.subTest(max_nodes=max_nodes):
self._max_nodes = max_nodes
with self.assertRaisesRegex(
ValueError,
rf"^The maximum number of rendezvous nodes \({max_nodes}\) must be greater "
"than or equal to the minimum number of rendezvous nodes "
rf"\({self._min_nodes}\).$",
):
self._create_params()
def test_get_returns_none_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertIsNone(params.get("dummy_param"))
def test_get_returns_default_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertEqual(params.get("dummy_param", default="x"), "x")
def test_get_as_bool_returns_none_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertIsNone(params.get_as_bool("dummy_param"))
def test_get_as_bool_returns_default_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertTrue(params.get_as_bool("dummy_param", default=True))
def test_get_as_bool_returns_true_if_value_represents_true(self) -> None:
for value in ["1", "True", "tRue", "T", "t", "yEs", "Y", 1, True]:
with self.subTest(value=value):
self._kwargs["dummy_param"] = value
params = self._create_params()
self.assertTrue(params.get_as_bool("dummy_param"))
def test_get_as_bool_returns_false_if_value_represents_false(self) -> None:
for value in ["0", "False", "faLse", "F", "f", "nO", "N", 0, False]:
with self.subTest(value=value):
self._kwargs["dummy_param"] = value
params = self._create_params()
self.assertFalse(params.get_as_bool("dummy_param"))
def test_get_as_bool_raises_error_if_value_is_invalid(self) -> None:
for value in ["01", "Flse", "Ture", "g", "4", "_", "truefalse", 2, -1]:
with self.subTest(value=value):
self._kwargs["dummy_param"] = value
params = self._create_params()
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous configuration option 'dummy_param' does not represent a "
r"valid boolean value.$",
):
params.get_as_bool("dummy_param")
def test_get_as_int_returns_none_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertIsNone(params.get_as_int("dummy_param"))
def test_get_as_int_returns_default_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertEqual(params.get_as_int("dummy_param", default=5), 5)
def test_get_as_int_returns_integer_if_value_represents_integer(self) -> None:
for value in ["0", "-10", "5", " 4", "4 ", " 4 ", 0, -4, 3]:
with self.subTest(value=value):
self._kwargs["dummy_param"] = value
params = self._create_params()
self.assertEqual(params.get_as_int("dummy_param"), int(cast(SupportsInt, value)))
def test_get_as_int_raises_error_if_value_is_invalid(self) -> None:
for value in ["a", "0a", "3b", "abc"]:
with self.subTest(value=value):
self._kwargs["dummy_param"] = value
params = self._create_params()
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous configuration option 'dummy_param' does not represent a "
r"valid integer value.$",
):
params.get_as_int("dummy_param")
class _DummyRendezvousHandler(RendezvousHandler):
def __init__(self, params: RendezvousParameters) -> None:
self.params = params
def get_backend(self) -> str:
return "dummy_backend"
def next_rendezvous(self) -> Tuple[Store, int, int]:
raise NotImplementedError()
def is_closed(self) -> bool:
return False
def set_closed(self) -> None:
pass
def num_nodes_waiting(self) -> int:
return 0
def get_run_id(self) -> str:
return ""
def shutdown(self) -> bool:
return False
class RendezvousHandlerRegistryTest(TestCase):
def setUp(self) -> None:
self._params = RendezvousParameters(
backend="dummy_backend",
endpoint="dummy_endpoint",
run_id="dummy_run_id",
min_nodes=1,
max_nodes=1,
)
self._registry = RendezvousHandlerRegistry()
@staticmethod
def _create_handler(params: RendezvousParameters) -> RendezvousHandler:
return _DummyRendezvousHandler(params)
def test_register_registers_once_if_called_twice_with_same_creator(self) -> None:
self._registry.register("dummy_backend", self._create_handler)
self._registry.register("dummy_backend", self._create_handler)
def test_register_raises_error_if_called_twice_with_different_creators(self) -> None:
self._registry.register("dummy_backend", self._create_handler)
other_create_handler = lambda p: _DummyRendezvousHandler(p) # noqa: E731
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous backend 'dummy_backend' cannot be registered with "
rf"'{other_create_handler}' as it is already registered with '{self._create_handler}'.$",
):
self._registry.register("dummy_backend", other_create_handler)
def test_create_handler_returns_handler(self) -> None:
self._registry.register("dummy_backend", self._create_handler)
handler = self._registry.create_handler(self._params)
self.assertIsInstance(handler, _DummyRendezvousHandler)
self.assertIs(handler.params, self._params)
def test_create_handler_raises_error_if_backend_is_not_registered(self) -> None:
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous backend 'dummy_backend' is not registered. Did you forget to call "
r"`register`\?$",
):
self._registry.create_handler(self._params)
def test_create_handler_raises_error_if_backend_names_do_not_match(self) -> None:
self._registry.register("dummy_backend_2", self._create_handler)
with self.assertRaisesRegex(
RuntimeError,
r"^The rendezvous backend 'dummy_backend' does not match the requested backend "
r"'dummy_backend_2'.$",
):
self._params.backend = "dummy_backend_2"
self._registry.create_handler(self._params)
|
pytorch-master
|
test/distributed/elastic/rendezvous/api_test.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from contextlib import closing
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.static_tcp_rendezvous import (
create_rdzv_handler,
)
from torch.distributed.elastic.utils import get_socket_with_port
class StaticTCPRendezvousTest(unittest.TestCase):
def test_missing_port(self):
rdzv_params = RendezvousParameters(
backend="static",
endpoint="localhost",
run_id="test_id",
min_nodes=1,
max_nodes=1,
)
with self.assertRaises(ValueError):
create_rdzv_handler(rdzv_params)
def test_empty_endpoint(self):
rdzv_params = RendezvousParameters(
backend="static",
endpoint="",
run_id="test_id",
min_nodes=1,
max_nodes=1,
)
with self.assertRaises(ValueError):
create_rdzv_handler(rdzv_params)
def test_ipv6_addr(self):
rdzv_params = RendezvousParameters(
backend="static",
endpoint="[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:90",
run_id="test_id",
min_nodes=1,
max_nodes=1,
)
with self.assertRaises(ValueError):
create_rdzv_handler(rdzv_params)
def test_ipv6_addr_localhost(self):
rdzv_params = RendezvousParameters(
backend="static",
endpoint="[::1]:90",
run_id="test_id",
min_nodes=1,
max_nodes=1,
)
with self.assertRaises(ValueError):
create_rdzv_handler(rdzv_params)
def test_get_backend(self):
rdzv_params = RendezvousParameters(
backend="static",
endpoint="localhost:123",
run_id="test",
min_nodes=1,
max_nodes=1,
timeout=60,
rank=0,
)
static_rdzv = create_rdzv_handler(rdzv_params)
self.assertEqual("static", static_rdzv.get_backend())
def test_static_rdzv_multiple_calls(self):
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
master_addr = "localhost"
rdzv_params = RendezvousParameters(
backend="static",
endpoint=f"{master_addr}:{master_port}",
run_id="test_id",
min_nodes=1,
max_nodes=1,
rank=0,
)
rdzv_handler = create_rdzv_handler(rdzv_params)
# Call rendezvous two times
store, rank, world_size = rdzv_handler.next_rendezvous()
self.assertIsNotNone(store)
self.assertEqual(0, rank)
self.assertEqual(1, world_size)
store, rank, world_size = rdzv_handler.next_rendezvous()
self.assertIsNotNone(store)
self.assertEqual(0, rank)
self.assertEqual(1, world_size)
|
pytorch-master
|
test/distributed/elastic/rendezvous/static_rendezvous_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import os
import shutil
import sys
import tempfile
import unittest
from torch.distributed.elastic.multiprocessing.redirects import (
redirect,
redirect_stderr,
redirect_stdout,
)
libc = ctypes.CDLL("libc.so.6")
c_stderr = ctypes.c_void_p.in_dll(libc, "stderr")
class RedirectsTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp(prefix=f"{self.__class__.__name__}_")
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_redirect_invalid_std(self):
with self.assertRaises(ValueError):
with redirect("stdfoo", os.path.join(self.test_dir, "stdfoo.log")):
pass
def test_redirect_stdout(self):
stdout_log = os.path.join(self.test_dir, "stdout.log")
# printing to stdout before redirect should go to console not stdout.log
print("foo first from python")
libc.printf(b"foo first from c\n")
os.system("echo foo first from cmd")
with redirect_stdout(stdout_log):
print("foo from python")
libc.printf(b"foo from c\n")
os.system("echo foo from cmd")
# make sure stdout is restored
print("foo again from python")
libc.printf(b"foo again from c\n")
os.system("echo foo again from cmd")
with open(stdout_log, "r") as f:
# since we print from python, c, cmd -> the stream is not ordered
# do a set comparison
lines = set(f.readlines())
self.assertEqual(
{"foo from python\n", "foo from c\n", "foo from cmd\n"}, lines
)
def test_redirect_stderr(self):
stderr_log = os.path.join(self.test_dir, "stderr.log")
print("bar first from python")
libc.fprintf(c_stderr, b"bar first from c\n")
os.system("echo bar first from cmd 1>&2")
with redirect_stderr(stderr_log):
print("bar from python", file=sys.stderr)
libc.fprintf(c_stderr, b"bar from c\n")
os.system("echo bar from cmd 1>&2")
print("bar again from python")
libc.fprintf(c_stderr, b"bar again from c\n")
os.system("echo bar again from cmd 1>&2")
with open(stderr_log, "r") as f:
lines = set(f.readlines())
self.assertEqual(
{"bar from python\n", "bar from c\n", "bar from cmd\n"}, lines
)
def test_redirect_both(self):
stdout_log = os.path.join(self.test_dir, "stdout.log")
stderr_log = os.path.join(self.test_dir, "stderr.log")
print("first stdout from python")
libc.printf(b"first stdout from c\n")
print("first stderr from python", file=sys.stderr)
libc.fprintf(c_stderr, b"first stderr from c\n")
with redirect_stdout(stdout_log), redirect_stderr(stderr_log):
print("redir stdout from python")
print("redir stderr from python", file=sys.stderr)
libc.printf(b"redir stdout from c\n")
libc.fprintf(c_stderr, b"redir stderr from c\n")
print("again stdout from python")
libc.fprintf(c_stderr, b"again stderr from c\n")
with open(stdout_log, "r") as f:
lines = set(f.readlines())
self.assertEqual(
{"redir stdout from python\n", "redir stdout from c\n"}, lines
)
with open(stderr_log, "r") as f:
lines = set(f.readlines())
self.assertEqual(
{"redir stderr from python\n", "redir stderr from c\n"}, lines
)
def _redirect_large_buffer(self, print_fn, num_lines=500_000):
stdout_log = os.path.join(self.test_dir, "stdout.log")
with redirect_stdout(stdout_log):
for i in range(num_lines):
print_fn(i)
with open(stdout_log) as fp:
actual = {int(line.split(":")[1]) for line in fp.readlines()}
expected = set(range(num_lines))
self.assertSetEqual(expected, actual)
def test_redirect_large_buffer_py(self):
def py_print(i):
print(f"py:{i}")
self._redirect_large_buffer(py_print)
def test_redirect_large_buffer_c(self):
def c_print(i):
libc.printf(bytes(f"c:{i}\n", "utf-8"))
self._redirect_large_buffer(c_print)
|
pytorch-master
|
test/distributed/elastic/multiprocessing/redirects_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import shutil
import sys
import tempfile
import time
import unittest
from concurrent.futures import wait
from concurrent.futures._base import ALL_COMPLETED
from concurrent.futures.thread import ThreadPoolExecutor
from typing import Dict, Set
from unittest import mock
from torch.distributed.elastic.multiprocessing.tail_log import TailLog
def write(max: int, sleep: float, file: str):
with open(file, "w") as fp:
for i in range(max):
print(i, file=fp, flush=True)
time.sleep(sleep)
class TailLogTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp(prefix=f"{self.__class__.__name__}_")
self.threadpool = ThreadPoolExecutor()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_tail(self):
"""
writer() writes 0 - max (on number on each line) to a log file.
Run nprocs such writers and tail the log files into an IOString
and validate that all lines are accounted for.
"""
nprocs = 32
max = 1000
interval_sec = 0.0001
log_files = {
local_rank: os.path.join(self.test_dir, f"{local_rank}_stdout.log")
for local_rank in range(nprocs)
}
dst = io.StringIO()
tail = TailLog("writer", log_files, dst, interval_sec).start()
# sleep here is intentional to ensure that the log tail
# can gracefully handle and wait for non-existent log files
time.sleep(interval_sec * 10)
futs = []
for local_rank, file in log_files.items():
f = self.threadpool.submit(
write, max=max, sleep=interval_sec * local_rank, file=file
)
futs.append(f)
wait(futs, return_when=ALL_COMPLETED)
self.assertFalse(tail.stopped())
tail.stop()
dst.seek(0)
actual: Dict[int, Set[int]] = {}
for line in dst.readlines():
header, num = line.split(":")
nums = actual.setdefault(header, set())
nums.add(int(num))
self.assertEqual(nprocs, len(actual))
self.assertEqual(
{f"[writer{i}]": set(range(max)) for i in range(nprocs)}, actual
)
self.assertTrue(tail.stopped())
def test_tail_no_files(self):
"""
Ensures that the log tail can gracefully handle no log files
in which case it does nothing.
"""
tail = TailLog("writer", log_files={}, dst=sys.stdout).start()
self.assertFalse(tail.stopped())
tail.stop()
self.assertTrue(tail.stopped())
def test_tail_logfile_never_generates(self):
"""
Ensures that we properly shutdown the threadpool
even when the logfile never generates.
"""
tail = TailLog("writer", log_files={0: "foobar.log"}, dst=sys.stdout).start()
tail.stop()
self.assertTrue(tail.stopped())
self.assertTrue(tail._threadpool._shutdown)
@mock.patch("torch.distributed.elastic.multiprocessing.tail_log.log")
def test_tail_logfile_error_in_tail_fn(self, mock_logger):
"""
Ensures that when there is an error in the tail_fn (the one that runs in the
threadpool), it is dealt with and raised properly.
"""
# try giving tail log a directory (should fail with an IsADirectoryError
tail = TailLog("writer", log_files={0: self.test_dir}, dst=sys.stdout).start()
tail.stop()
mock_logger.error.assert_called_once()
|
pytorch-master
|
test/distributed/elastic/multiprocessing/tail_log_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import multiprocessing
import os
import shutil
import signal
import sys
import tempfile
import time
from itertools import product
from typing import Callable, Dict, List, Union
from unittest import mock
import torch
import torch.multiprocessing as mp
from torch.distributed.elastic.multiprocessing import ProcessFailure, start_processes
from torch.distributed.elastic.multiprocessing.api import (
MultiprocessContext,
RunProcsResult,
SignalException,
Std,
_validate_full_rank,
_wrap,
to_map,
)
from torch.distributed.elastic.multiprocessing.errors import ErrorHandler
from torch.testing._internal.common_utils import (
IS_CI,
IS_MACOS,
IS_WINDOWS,
NO_MULTIPROCESSING_SPAWN,
TEST_WITH_ASAN,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
TestCase,
run_tests,
sandcastle_skip_if,
)
class RunProcResultsTest(TestCase):
def setUp(self):
super().setUp()
self.test_dir = tempfile.mkdtemp(prefix=f"{self.__class__.__name__}_")
def tearDown(self):
super().tearDown()
shutil.rmtree(self.test_dir)
def test_is_failed(self):
pr_success = RunProcsResult(return_values={0: "a", 1: "b"})
self.assertFalse(pr_success.is_failed())
fail0 = ProcessFailure(
local_rank=0, pid=998, exitcode=1, error_file="ignored.json"
)
pr_fail = RunProcsResult(failures={0: fail0})
self.assertTrue(pr_fail.is_failed())
def test_get_failures(self):
error_file0 = os.path.join(self.test_dir, "error0.json")
error_file1 = os.path.join(self.test_dir, "error1.json")
eh = ErrorHandler()
with mock.patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": error_file0}):
eh.record_exception(RuntimeError("error 0"))
with mock.patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": error_file0}):
eh.record_exception(RuntimeError("error 1"))
fail0 = ProcessFailure(
local_rank=0, pid=997, exitcode=1, error_file=error_file0
)
fail1 = ProcessFailure(
local_rank=1, pid=998, exitcode=3, error_file=error_file1
)
fail2 = ProcessFailure(
local_rank=2, pid=999, exitcode=15, error_file="no_exist.json"
)
self.assertLessEqual(fail0.timestamp, fail1.timestamp)
self.assertLessEqual(fail1.timestamp, fail2.timestamp)
class StdTest(TestCase):
def test_from_value(self):
self.assertEqual(Std.NONE, Std.from_str("0"))
self.assertEqual(Std.OUT, Std.from_str("1"))
self.assertEqual(Std.ERR, Std.from_str("2"))
self.assertEqual(Std.ALL, Std.from_str("3"))
def test_from_value_map(self):
self.assertEqual({0: Std.OUT}, Std.from_str("0:1"))
self.assertEqual({0: Std.OUT, 1: Std.OUT}, Std.from_str("0:1,1:1"))
def test_from_str_bad_input(self):
bad_inputs = ["0:1,", "11", "0:1,1", "1,0:1"]
for bad in bad_inputs:
with self.subTest(bad=bad):
with self.assertRaises(ValueError):
Std.from_str(bad)
def echo0(msg: str) -> None:
"""
void function
"""
print(msg)
def echo1(msg: str, exitcode: int = 0) -> str:
"""
returns ``msg`` or exits with the given exitcode (if nonzero)
"""
rank = int(os.environ["RANK"])
if exitcode != 0:
print(f"exit {exitcode} from {rank}", file=sys.stderr)
sys.exit(exitcode)
else:
print(f"{msg} stdout from {rank}")
print(f"{msg} stderr from {rank}", file=sys.stderr)
return f"{msg}_{rank}"
def echo2(msg: str, fail: bool = False) -> str:
"""
returns ``msg`` or raises a RuntimeError if ``fail`` is set
"""
if fail:
raise RuntimeError(msg)
return msg
def echo_large(size: int) -> Dict[int, str]:
"""
returns a large output ({0: test0", 1: "test1", ..., (size-1):f"test{size-1}"})
"""
out = {}
for idx in range(0, size):
out[idx] = f"test{idx}"
return out
def echo3(msg: str, fail: bool = False) -> str:
"""
returns ``msg`` or induces a SIGSEGV if ``fail`` is set
"""
if fail:
ctypes.string_at(0)
return msg
def dummy_compute() -> torch.Tensor:
"""
returns a predefined size random Tensor
"""
return torch.rand(100, 100)
def redirects_oss_test() -> List[Std]:
return [
Std.NONE,
]
def redirects_all() -> List[Std]:
return [
Std.NONE,
Std.OUT,
Std.ERR,
Std.ALL,
]
def bin(name: str):
dir = os.path.dirname(__file__)
return os.path.join(dir, "bin", name)
def wait_fn(wait_time: int = 300) -> None:
time.sleep(wait_time)
print("Finished waiting")
def start_processes_zombie_test(
idx: int,
entrypoint: Union[str, Callable],
mp_queue: mp.Queue,
log_dir: str,
nproc: int = 2,
) -> None:
"""
Starts processes
"""
args = {}
envs = {}
for idx in range(nproc):
args[idx] = ()
envs[idx] = {}
pc = start_processes(
name="zombie_test",
entrypoint=entrypoint,
args=args,
envs=envs,
log_dir=log_dir,
redirects=Std.NONE,
)
my_pid = os.getpid()
mp_queue.put(my_pid)
for child_pid in pc.pids().values():
mp_queue.put(child_pid)
try:
pc.wait(period=1, timeout=300)
except SignalException as e:
pc.close(e.sigval)
# tests incompatible with tsan or asan
if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
class StartProcessesTest(TestCase):
def setUp(self):
super().setUp()
self.test_dir = tempfile.mkdtemp(prefix=f"{self.__class__.__name__}_")
self._start_methods = ["spawn"]
def tearDown(self):
super().tearDown()
shutil.rmtree(self.test_dir)
def log_dir(self):
return tempfile.mkdtemp(dir=self.test_dir)
def assert_in_file(self, expected: List[str], filename: str) -> None:
expected = [f"{line.rstrip()}\n" for line in expected]
with open(filename, "r") as fp:
actual = fp.readlines()
for line in expected:
self.assertIn(line, actual)
def assert_pids_noexist(self, pids: Dict[int, int]):
for local_rank, pid in pids.items():
with self.assertRaises(
OSError, msg=f"local_rank: {local_rank} pid: {pid} should not exist"
):
os.kill(pid, 0)
def test_to_map(self):
local_world_size = 2
self.assertEqual(
{0: Std.OUT, 1: Std.OUT}, to_map(Std.OUT, local_world_size)
)
self.assertEqual(
{0: Std.NONE, 1: Std.OUT}, to_map({1: Std.OUT}, local_world_size)
)
self.assertEqual(
{0: Std.ERR, 1: Std.OUT},
to_map({0: Std.ERR, 1: Std.OUT}, local_world_size),
)
def test_invalid_log_dir(self):
with tempfile.NamedTemporaryFile(dir=self.test_dir) as not_a_dir:
cases = {
"does_not_exist": FileNotFoundError,
not_a_dir.name: NotADirectoryError,
# test_dir is not empty since we touched not_a_dir file
self.test_dir: RuntimeError,
}
for (log_dir, expected_error) in cases.items():
with self.subTest(log_dir=log_dir, expected_error=expected_error):
with self.assertRaises(expected_error):
start_processes(
name="echo",
entrypoint=echo1,
args={0: ("hello",)},
envs={0: {"RANK": "0"}},
log_dir=log_dir,
)
def test_args_env_len_mismatch(self):
cases = [
# 1 x args; 2 x envs
{
"args": {0: ("hello",)},
"envs": {0: {"RANK": "0"}, 1: {"RANK": "1"}},
},
# 2 x args; 1 x envs
{
"args": {0: ("hello",), 1: ("world",)},
"envs": {0: {"RANK": "0"}},
},
]
for kwds in cases:
args = kwds["args"]
envs = kwds["envs"]
with self.subTest(args=args, envs=envs):
with self.assertRaises(RuntimeError):
start_processes(
name="echo",
entrypoint=echo1,
args=args,
envs=envs,
log_dir=self.log_dir(),
)
def test_pcontext_wait(self):
pc = start_processes(
name="sleep",
entrypoint=time.sleep,
args={0: (1,)},
envs={0: {}},
log_dir=self.log_dir(),
start_method="spawn",
)
self.assertIsNone(pc.wait(timeout=0.1, period=0.01))
self.assertIsNotNone(pc.wait(period=0.1))
self.assertTrue(pc._stderr_tail.stopped())
self.assertTrue(pc._stdout_tail.stopped())
def test_multiprocess_context_close(self):
pc = start_processes(
name="sleep",
entrypoint=time.sleep,
args={0: (1,)},
envs={0: {}},
log_dir=self.log_dir(),
start_method="spawn",
)
pids = pc.pids()
pc.close()
self.assert_pids_noexist(pids)
self.assertTrue(pc._stderr_tail.stopped())
self.assertTrue(pc._stdout_tail.stopped())
def test_subprocess_context_close(self):
pc = start_processes(
name="sleep",
entrypoint=bin("zombie_test.py"),
args={0: (1,)},
envs={0: {}},
log_dir=self.log_dir(),
)
pids = pc.pids()
pc.close()
self.assert_pids_noexist(pids)
def test_function_with_tensor(self):
for start_method in self._start_methods:
pc = start_processes(
name="dummy_compute",
entrypoint=dummy_compute,
args={},
envs={},
log_dir=self.log_dir(),
start_method=start_method,
)
results = pc.wait()
self.assert_pids_noexist(pc.pids())
for return_value in results.return_values.values():
self.assertIsInstance(return_value, torch.Tensor)
self.assertEqual((100, 100), return_value.shape)
def test_void_function(self):
for start_method in self._start_methods:
with self.subTest(start_method=start_method):
pc = start_processes(
name="echo",
entrypoint=echo0,
args={0: ("hello",), 1: ("world",)},
envs={0: {}, 1: {}},
log_dir=self.log_dir(),
start_method=start_method,
)
results = pc.wait(period=0.1)
self.assertEqual({0: None, 1: None}, results.return_values)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "tests incompatible with asan")
def test_function_large_ret_val(self):
# python multiprocessing.queue module uses pipes and actually PipedQueues
# This means that if a single object is greater than a pipe size
# the writer process will block until reader process will start
# reading the pipe.
# This test makes a worker fn to return huge output, around ~10 MB
size = 200000
for start_method in self._start_methods:
with self.subTest(start_method=start_method):
pc = start_processes(
name="echo",
entrypoint=echo_large,
args={0: (size,), 1: (size,), 2: (size,), 3: (size,)},
envs={0: {}, 1: {}, 2: {}, 3: {}},
log_dir=self.log_dir(),
start_method=start_method,
)
results = pc.wait(period=0.1)
for i in range(pc.nprocs):
self.assertEqual(size, len(results.return_values[i]))
def test_function_raise(self):
"""
run 2x copies of echo2, raise an exception on the first
"""
RAISE = True
for start_method in self._start_methods:
with self.subTest(start_method=start_method):
log_dir = self.log_dir()
pc = start_processes(
name="echo",
entrypoint=echo2,
args={0: ("hello", RAISE), 1: ("world",)},
envs={0: {}, 1: {}},
log_dir=log_dir,
start_method=start_method,
)
results = pc.wait(period=0.1)
self.assert_pids_noexist(pc.pids())
self.assertEqual(1, len(results.failures))
self.assertFalse(results.return_values)
failure = results.failures[0]
error_file = failure.error_file
error_file_data = failure.error_file_data
self.assertEqual(1, failure.exitcode)
self.assertEqual("<N/A>", failure.signal_name())
self.assertEqual(pc.pids()[0], failure.pid)
self.assertEqual(
os.path.join(log_dir, "0", "error.json"), error_file
)
self.assertEqual(
int(error_file_data["message"]["extraInfo"]["timestamp"]),
int(failure.timestamp),
)
self.assertTrue(pc._stderr_tail.stopped())
self.assertTrue(pc._stdout_tail.stopped())
########################################
# start_processes as binary tests
########################################
def test_binary_exit(self):
FAIL = 138
pc = start_processes(
name="echo",
entrypoint=bin("echo1.py"),
args={0: ("--exitcode", FAIL, "foo"), 1: ("--exitcode", 0, "bar")},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
log_dir=self.log_dir(),
redirects={0: Std.ALL},
)
results = pc.wait(period=0.1)
self.assertTrue(results.is_failed())
self.assertEqual(1, len(results.failures))
failure = results.failures[0]
self.assertEqual(138, failure.exitcode)
self.assertEqual("<N/A>", failure.signal_name())
self.assertEqual("<NONE>", failure.error_file_data["message"])
self.assert_in_file([f"exit {FAIL} from 0"], results.stderrs[0])
self.assert_in_file([], results.stdouts[0])
self.assertFalse(results.stderrs[1])
self.assertFalse(results.stdouts[1])
self.assertTrue(pc._stderr_tail.stopped())
self.assertTrue(pc._stdout_tail.stopped())
def test_binary_raises(self):
pc = start_processes(
name="echo",
entrypoint=bin("echo2.py"),
args={0: ("--raises", "true", "foo"), 1: ("bar",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
log_dir=self.log_dir(),
)
results = pc.wait(period=0.1)
self.assert_pids_noexist(pc.pids())
self.assertTrue(results.is_failed())
self.assertEqual(1, len(results.failures))
failure = results.failures[0]
self.assertEqual(1, failure.exitcode)
self.assertEqual("<NONE>", failure.error_file_data["message"])
self.assertEqual("<N/A>", failure.signal_name())
def test_binary_incorrect_entrypoint(self):
with self.assertRaises(FileNotFoundError):
start_processes(
name="echo",
entrypoint="does_not_exist.py",
args={0: ("foo"), 1: ("bar",)},
envs={0: {}, 1: {}},
log_dir=self.log_dir(),
)
def test_validate_full_rank(self):
with self.assertRaises(RuntimeError):
_validate_full_rank({}, 10, "")
@sandcastle_skip_if(
NO_MULTIPROCESSING_SPAWN,
"Disabled for environments that \
don't support multiprocessing with spawn start method",
)
def test_multiprocessing_context_poll_raises_exception(self):
mp_context = MultiprocessContext(
name="test_mp",
entrypoint=echo0,
args={0: (0, 1)},
envs={},
stdouts={0: {}},
stderrs={0: {}},
tee_stdouts={0: "tee_stdout"},
tee_stderrs={0: "tee_stderr"},
error_files={0: "test_file"},
start_method="spawn",
)
mp_context._pc = mock.Mock()
# Using mock since we cannot just set exitcode on process
mock_process = mock.Mock()
mock_process.exitcode = -1
mp_context._pc.processes = [mock_process]
e = mp.ProcessRaisedException(msg="test msg", error_index=0, error_pid=123)
mp_context._pc.join.side_effect = e
with mock.patch.object(mp_context, "close"):
run_result = mp_context._poll()
self.assertEqual(1, len(run_result.failures))
failure = run_result.failures[0]
self.assertEqual(
"Signal 1 (SIGHUP) received by PID 123", failure.message
)
# tests incompatible with tsan or asan, the redirect functionality does not work on macos or windows
if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
class StartProcessesListTest(StartProcessesTest):
########################################
# start_processes as binary tests
########################################
def test_function(self):
for start_method, redirs in product(
self._start_methods, redirects_oss_test()
):
with self.subTest(start_method=start_method, redirs=redirs):
pc = start_processes(
name="echo",
entrypoint=echo1,
args={0: ("hello",), 1: ("hello",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
log_dir=self.log_dir(),
start_method=start_method,
redirects=redirs,
)
results = pc.wait(period=0.1)
nprocs = pc.nprocs
self.assert_pids_noexist(pc.pids())
self.assertEqual(
{i: f"hello_{i}" for i in range(nprocs)}, results.return_values
)
for i in range(nprocs):
if redirs & Std.OUT != Std.OUT:
self.assertFalse(results.stdouts[i])
if redirs & Std.ERR != Std.ERR:
self.assertFalse(results.stderrs[i])
if redirs & Std.OUT == Std.OUT:
self.assert_in_file(
[f"hello stdout from {i}"], results.stdouts[i]
)
if redirs & Std.ERR == Std.ERR:
self.assert_in_file(
[f"hello stderr from {i}"], results.stderrs[i]
)
def test_binary(self):
for redirs in redirects_oss_test():
with self.subTest(redirs=redirs):
pc = start_processes(
name="echo",
entrypoint=bin("echo1.py"),
args={0: ("hello",), 1: ("hello",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
log_dir=self.log_dir(),
redirects=redirs,
)
results = pc.wait(period=0.1)
self.assert_pids_noexist(pc.pids())
# currently binaries return {rank: None}
self.assertEqual(2, len(results.return_values))
self.assertFalse(results.is_failed())
nprocs = pc.nprocs
for i in range(nprocs):
if redirs & Std.OUT != Std.OUT:
self.assertFalse(results.stdouts[i])
if redirs & Std.ERR != Std.ERR:
self.assertFalse(results.stderrs[i])
if redirs & Std.OUT == Std.OUT:
self.assert_in_file(
[f"hello stdout from {i}"], results.stdouts[i]
)
if redirs & Std.ERR == Std.ERR:
self.assert_in_file(
[f"hello stderr from {i}"], results.stderrs[i]
)
def test_binary_redirect_and_tee(self):
pc = start_processes(
name="trainer",
entrypoint=bin("echo1.py"),
args={0: ("hello",), 1: ("world",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
log_dir=self.log_dir(),
start_method="spawn",
redirects={0: Std.ERR, 1: Std.NONE},
tee={0: Std.OUT, 1: Std.ERR},
)
result = pc.wait()
self.assertFalse(result.is_failed())
self.assert_in_file(["hello stdout from 0"], pc.stdouts[0])
self.assert_in_file(["hello stderr from 0"], pc.stderrs[0])
self.assert_in_file(["world stderr from 1"], pc.stderrs[1])
self.assertFalse(pc.stdouts[1])
self.assertTrue(pc._stderr_tail.stopped())
self.assertTrue(pc._stdout_tail.stopped())
# tests incompatible with tsan or asan, the redirect functionality does not work on macos or windows
if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS or IS_CI):
class StartProcessesNotCITest(StartProcessesTest):
def test_wrap_bad(self):
none = ""
stdout_log = os.path.join(self.test_dir, "stdout.log")
stderr_log = os.path.join(self.test_dir, "stderr.log")
redirs = [
(none, none),
(none, stderr_log),
(stdout_log, none),
(stdout_log, stderr_log),
]
for stdout_redir, stderr_redir in redirs:
queue = multiprocessing.SimpleQueue()
worker_finished_event_mock = mock.Mock()
_wrap(
local_rank=0,
fn=echo1,
args={0: ("hello",)},
envs={0: {"RANK": "0"}},
stdout_redirects={0: stdout_redir},
stderr_redirects={0: stderr_redir},
ret_vals={0: queue},
queue_finished_reading_event=worker_finished_event_mock,
)
self.assertEqual("hello_0", queue.get())
if stdout_redir:
self.assert_in_file(["hello stdout from 0"], stdout_log)
if stderr_redir:
self.assert_in_file(["hello stderr from 0"], stderr_log)
worker_finished_event_mock.wait.assert_called_once()
def test_binary_signal(self):
pc = start_processes(
name="echo",
entrypoint=bin("echo3.py"),
args={0: ("--segfault", "true", "foo"), 1: ("bar",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
log_dir=self.log_dir(),
)
results = pc.wait(period=0.1)
self.assert_pids_noexist(pc.pids())
self.assertTrue(results.is_failed())
self.assertEqual(1, len(results.failures))
failure = results.failures[0]
self.assertNotEqual(signal.SIGSEGV, failure.exitcode)
if TEST_WITH_ASAN or TEST_WITH_TSAN:
# ASAN/TSAN exit code is 1.
self.assertEqual("<N/A>", failure.signal_name())
else:
self.assertEqual("SIGSEGV", failure.signal_name())
self.assertEqual("<NONE>", failure.error_file_data["message"])
def test_function_redirect_and_tee(self):
for start_method in self._start_methods:
with self.subTest(start_method=start_method):
log_dir = self.log_dir()
pc = start_processes(
name="trainer",
entrypoint=echo1,
args={0: ("hello",), 1: ("world",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
log_dir=log_dir,
start_method="spawn",
redirects={0: Std.ERR, 1: Std.NONE},
tee={0: Std.OUT, 1: Std.ERR},
)
result = pc.wait()
self.assertFalse(result.is_failed())
self.assert_in_file(["hello stdout from 0"], pc.stdouts[0])
self.assert_in_file(["hello stderr from 0"], pc.stderrs[0])
self.assert_in_file(["world stderr from 1"], pc.stderrs[1])
self.assertFalse(pc.stdouts[1])
self.assertTrue(pc._stderr_tail.stopped())
self.assertTrue(pc._stdout_tail.stopped())
def test_function(self):
for start_method, redirs in product(self._start_methods, redirects_all()):
with self.subTest(start_method=start_method, redirs=redirs):
pc = start_processes(
name="echo",
entrypoint=echo1,
args={0: ("hello",), 1: ("hello",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
log_dir=self.log_dir(),
start_method=start_method,
redirects=redirs,
)
results = pc.wait(period=0.1)
nprocs = pc.nprocs
self.assert_pids_noexist(pc.pids())
self.assertEqual(
{i: f"hello_{i}" for i in range(nprocs)}, results.return_values
)
for i in range(nprocs):
if redirs & Std.OUT != Std.OUT:
self.assertFalse(results.stdouts[i])
if redirs & Std.ERR != Std.ERR:
self.assertFalse(results.stderrs[i])
if redirs & Std.OUT == Std.OUT:
self.assert_in_file(
[f"hello stdout from {i}"], results.stdouts[i]
)
if redirs & Std.ERR == Std.ERR:
self.assert_in_file(
[f"hello stderr from {i}"], results.stderrs[i]
)
def test_function_exit(self):
"""
run 2x copies of echo1 fail (exit) the first
functions that exit from python do not generate an error file
(even if they are decorated with @record)
"""
FAIL = 138
for start_method in self._start_methods:
with self.subTest(start_method=start_method):
log_dir = self.log_dir()
pc = start_processes(
name="echo",
entrypoint=echo1,
args={0: ("hello", FAIL), 1: ("hello",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
log_dir=log_dir,
start_method=start_method,
redirects={0: Std.ERR},
)
results = pc.wait(period=0.1)
self.assert_pids_noexist(pc.pids())
self.assertTrue(results.is_failed())
self.assertEqual(1, len(results.failures))
self.assertFalse(results.return_values)
failure = results.failures[0]
error_file = failure.error_file
self.assertEqual(FAIL, failure.exitcode)
self.assertEqual("<N/A>", failure.signal_name())
self.assertEqual(pc.pids()[0], failure.pid)
self.assertEqual("<N/A>", error_file)
self.assertEqual(
"To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html",
failure.message,
)
self.assertLessEqual(failure.timestamp, int(time.time()))
self.assert_in_file([f"exit {FAIL} from 0"], results.stderrs[0])
self.assertFalse(results.stdouts[0])
self.assertFalse(results.stderrs[1])
self.assertFalse(results.stdouts[1])
self.assertTrue(pc._stderr_tail.stopped())
self.assertTrue(pc._stdout_tail.stopped())
def test_no_zombie_process_binary(self):
signals = [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]
for s in signals:
self._test_zombie_workflow(bin("zombie_test.py"), s)
def test_no_zombie_process_function(self):
signals = [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]
for s in signals:
self._test_zombie_workflow(wait_fn, s)
def _test_zombie_workflow(
self, entrypoint: Union[str, Callable], signal_to_send: signal.Signals
) -> None:
mp_queue = mp.get_context("spawn").Queue()
child_nproc = 2
ctx = mp.spawn(
start_processes_zombie_test,
nprocs=1,
args=(entrypoint, mp_queue, self.log_dir(), child_nproc),
join=False,
)
total_processes = child_nproc + 1
pids = []
for _ in range(total_processes):
pids.append(mp_queue.get(timeout=120))
parent_pid = pids[0]
child_pids = pids[1:]
os.kill(parent_pid, signal.SIGTERM)
# Wait to give time for signal handlers to finish work
time.sleep(5)
for child_pid in child_pids:
# Killing parent should kill all children, we expect that each call to
# os.kill would raise OSError
with self.assertRaises(OSError):
os.kill(child_pid, 0)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/elastic/multiprocessing/api_test.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import ctypes
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="test binary, triggers a segfault (SIGSEGV)"
)
parser.add_argument("--segfault", type=bool, default=False)
parser.add_argument("msg", type=str)
args = parser.parse_args()
rank = int(os.environ["RANK"])
if args.segfault:
ctypes.string_at(0)
else:
print(f"{args.msg} from {rank}")
|
pytorch-master
|
test/distributed/elastic/multiprocessing/bin/echo3.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# TODO <DELETE_ME>
|
pytorch-master
|
test/distributed/elastic/multiprocessing/bin/test_script.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="test binary, raises a RuntimeError")
parser.add_argument("--raises", type=bool, default=False)
parser.add_argument("msg", type=str)
args = parser.parse_args()
rank = int(os.environ["RANK"])
if args.raises:
raise RuntimeError(f"raised from {rank}")
else:
print(f"{args.msg} from {rank}")
|
pytorch-master
|
test/distributed/elastic/multiprocessing/bin/echo2.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="test binary, exits with exitcode")
parser.add_argument("--exitcode", type=int, default=0)
parser.add_argument("msg", type=str)
args = parser.parse_args()
rank = int(os.environ["RANK"])
exitcode = args.exitcode
if exitcode != 0:
print(f"exit {exitcode} from {rank}", file=sys.stderr)
sys.exit(exitcode)
else:
print(f"{args.msg} stdout from {rank}")
print(f"{args.msg} stderr from {rank}", file=sys.stderr)
|
pytorch-master
|
test/distributed/elastic/multiprocessing/bin/echo1.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import time
if __name__ == "__main__":
time.sleep(600)
print("finished work")
|
pytorch-master
|
test/distributed/elastic/multiprocessing/bin/zombie_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
import json
import os
import shutil
import signal
import tempfile
import unittest
from unittest import mock
from torch.distributed.elastic.multiprocessing.errors import (
ChildFailedError,
ProcessFailure,
record,
)
from torch.distributed.elastic.multiprocessing.errors.error_handler import ErrorHandler
class SentinelError(Exception):
# exists so that we can validate that
# the correct error is raised and propagated
pass
@record
def raise_exception_fn():
raise SentinelError("foobar")
@record
def good_fn():
print("hello world")
@record
def raise_child_failure_error_fn(name, child_error_file=""):
if child_error_file:
with mock.patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": child_error_file}):
ErrorHandler().record_exception(SentinelError("foobar"))
pf = ProcessFailure(local_rank=0, pid=997, exitcode=1, error_file=child_error_file)
raise ChildFailedError(name, {0: pf})
def read_resource_file(resource_file: str) -> str:
with open(os.path.join(os.path.dirname(__file__), resource_file), "r") as fp:
return "".join(fp.readlines())
class ApiTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp(prefix=self.__class__.__name__)
self.test_error_file = os.path.join(self.test_dir, "error.json")
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_failure_incorrect_reply_file(self):
content = {"unknown_key": "unknown_value"}
with open(self.test_error_file, "w") as fp:
json.dump(content, fp)
with self.assertRaises(Exception):
ProcessFailure(
local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file
)
def failure_with_error_file(self, exception):
with mock.patch.dict(
os.environ, {"TORCHELASTIC_ERROR_FILE": self.test_error_file}
):
ErrorHandler().record_exception(exception)
return ProcessFailure(
local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file
)
def failure_without_error_file(self, exitcode):
return ProcessFailure(
local_rank=0, pid=997, exitcode=exitcode, error_file="ignored.json"
)
def test_process_failure_new_format(self):
error_data = {"message": "test error message", "timestamp": 10}
with open(self.test_error_file, "w") as fp:
json.dump(error_data, fp)
pf = ProcessFailure(
local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file
)
self.assertEqual("test error message", pf.message)
self.assertEqual(10, pf.timestamp)
def test_process_mast_error_format(self):
error_data = {"message": "test error message", "timestamp": "10"}
with open(self.test_error_file, "w") as fp:
json.dump(error_data, fp)
pf = ProcessFailure(
local_rank=0, pid=997, exitcode=1, error_file=self.test_error_file
)
self.assertEqual("test error message", pf.message)
self.assertEqual(10, pf.timestamp)
def test_process_failure(self):
pf = self.failure_with_error_file(exception=SentinelError("foobar"))
self.assertEqual(0, pf.local_rank)
self.assertEqual(997, pf.pid)
self.assertEqual(1, pf.exitcode)
self.assertEqual(self.test_error_file, pf.error_file)
self.assertEqual(
pf.error_file_data["message"]["extraInfo"]["timestamp"], str(pf.timestamp)
)
self.assertTrue(pf.message) # check not None and not "" (empty string)
self.assertEqual("<N/A>", pf.signal_name())
def test_process_failure_signal(self):
pf = self.failure_without_error_file(exitcode=-signal.SIGSEGV)
self.assertEqual("SIGSEGV", pf.signal_name())
self.assertEqual(
f"Signal {signal.SIGSEGV} (SIGSEGV) received by PID {pf.pid}", pf.message
)
def test_process_failure_no_error_file(self):
pf = self.failure_without_error_file(exitcode=138)
self.assertEqual("<N/A>", pf.signal_name())
self.assertEqual("<N/A>", pf.error_file)
self.assertEqual(
"To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html",
pf.message,
)
def test_child_failed_error(self):
pf0 = self.failure_with_error_file(exception=SentinelError("rank 0"))
pf1 = self.failure_with_error_file(exception=SentinelError("rank 1"))
pf2 = self.failure_without_error_file(exitcode=138)
ex = ChildFailedError("trainer.par", {0: pf0, 1: pf1, 2: pf2})
self.assertEqual(pf0, ex.get_first_failure()[1])
# print is intentional and should prints something like this:
"""
*********************************************
trainer.par FAILED
=============================================
Root Cause:
[0]:
time: 2020-11-25_21:22:31
rank: 0 (local_rank: 0)
exitcode: 1 (pid: 997)
error_file: /tmp/ApiTesttbb37ier/error.json
traceback: "SentinelError: rank 0"
=============================================
Other Failures:
[1]:
time: 2020-11-25_21:22:31
rank: 1 (local_rank: 0)
exitcode: 1 (pid: 997)
error_file: /tmp/ApiTesttbb37ier/error.json
msg: "SentinelError: rank 1"
[2]:
time: 2020-11-25_21:22:31
rank: 2 (local_rank: 0)
exitcode: 138 (pid: 997)
error_file: <N/A>
traceback: To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
*********************************************
"""
print(ex)
def test_record(self):
with mock.patch.dict(
os.environ, {"TORCHELASTIC_ERROR_FILE": self.test_error_file}
):
with self.assertRaises(SentinelError):
raise_exception_fn()
with open(self.test_error_file, "r") as fp:
err = json.load(fp)
self.assertIsNotNone(err["message"]["message"])
self.assertIsNotNone(err["message"]["extraInfo"]["py_callstack"])
self.assertIsNotNone(err["message"]["extraInfo"]["timestamp"])
def test_record_no_error_file(self):
with mock.patch.dict(os.environ, {}):
with self.assertRaises(SentinelError):
raise_exception_fn()
# no error file should have been generated
self.assertFalse(os.path.isfile(self.test_error_file))
def test_record_good_fn(self):
with mock.patch.dict(
os.environ, {"TORCHELASTIC_ERROR_FILE": self.test_error_file}
):
good_fn()
# function did not error; no error file should be produced
self.assertFalse(os.path.isfile(self.test_error_file))
def test_record_child_failure(self):
trainer_log_dir = os.path.join(self.test_dir, "trainer", "0")
os.makedirs(trainer_log_dir)
trainer_error_file = os.path.join(trainer_log_dir, "error.json")
with mock.patch.dict(
os.environ, {"TORCHELASTIC_ERROR_FILE": self.test_error_file}
):
with self.assertRaises(ChildFailedError) as cm:
raise_child_failure_error_fn("trainer", trainer_error_file)
pf = cm.exception.get_first_failure()[1]
# compare worker error file with reply file and overridden error code
expect = json.load(open(pf.error_file, "r"))
expect["message"]["errorCode"] = pf.exitcode
actual = json.load(open(self.test_error_file, "r"))
self.assertTrue(
json.dumps(expect, sort_keys=True),
json.dumps(actual, sort_keys=True),
)
def test_record_child_failure_no_child_error_file(self):
with mock.patch.dict(
os.environ, {"TORCHELASTIC_ERROR_FILE": self.test_error_file}
):
with self.assertRaises(ChildFailedError):
raise_child_failure_error_fn("trainer")
# @record should only copy child error file when ChildFailedError
# is raised - it should NOT record ChildFailedError itself
# it SHOULD re-raise ChildFailedError for any upstream system
# to handle it.
self.assertFalse(os.path.isfile(self.test_error_file))
|
pytorch-master
|
test/distributed/elastic/multiprocessing/errors/api_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
import filecmp
import json
import os
import shutil
import tempfile
import unittest
from unittest.mock import patch
from torch.distributed.elastic.multiprocessing.errors.error_handler import ErrorHandler
from torch.distributed.elastic.multiprocessing.errors.handlers import get_error_handler
def raise_exception_fn():
raise RuntimeError("foobar")
class GetErrorHandlerTest(unittest.TestCase):
def test_get_error_handler(self):
self.assertTrue(isinstance(get_error_handler(), ErrorHandler))
class ErrorHandlerTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp(prefix=self.__class__.__name__)
self.test_error_file = os.path.join(self.test_dir, "error.json")
def tearDown(self):
shutil.rmtree(self.test_dir)
@patch("faulthandler.enable")
def test_initialize(self, fh_enable_mock):
ErrorHandler().initialize()
fh_enable_mock.assert_called_once()
@patch("faulthandler.enable", side_effect=RuntimeError)
def test_initialize_error(self, fh_enable_mock):
# makes sure that initialize handles errors gracefully
ErrorHandler().initialize()
fh_enable_mock.assert_called_once()
def test_record_exception(self):
with patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": self.test_error_file}):
eh = ErrorHandler()
eh.initialize()
try:
raise_exception_fn()
except Exception as e:
eh.record_exception(e)
with open(self.test_error_file, "r") as fp:
err = json.load(fp)
# error file content example:
# {
# "message": {
# "message": "RuntimeError: foobar",
# "extraInfo": {
# "py_callstack": "Traceback (most recent call last):\n <... OMITTED ...>",
# "timestamp": "1605774851"
# }
# }
self.assertIsNotNone(err["message"]["message"])
self.assertIsNotNone(err["message"]["extraInfo"]["py_callstack"])
self.assertIsNotNone(err["message"]["extraInfo"]["timestamp"])
def test_record_exception_no_error_file(self):
# make sure record does not fail when no error file is specified in env vars
with patch.dict(os.environ, {}):
eh = ErrorHandler()
eh.initialize()
try:
raise_exception_fn()
except Exception as e:
eh.record_exception(e)
def test_dump_error_file(self):
src_error_file = os.path.join(self.test_dir, "src_error.json")
eh = ErrorHandler()
with patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": src_error_file}):
eh.record_exception(RuntimeError("foobar"))
with patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": self.test_error_file}):
eh.dump_error_file(src_error_file)
self.assertTrue(filecmp.cmp(src_error_file, self.test_error_file))
with patch.dict(os.environ, {}):
eh.dump_error_file(src_error_file)
# just validate that dump_error_file works when
# my error file is not set
# should just log an error with src_error_file pretty printed
def test_dump_error_file_overwrite_existing(self):
dst_error_file = os.path.join(self.test_dir, "dst_error.json")
src_error_file = os.path.join(self.test_dir, "src_error.json")
eh = ErrorHandler()
with patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": dst_error_file}):
eh.record_exception(RuntimeError("foo"))
with patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": src_error_file}):
eh.record_exception(RuntimeError("bar"))
with patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": dst_error_file}):
eh.dump_error_file(src_error_file)
self.assertTrue(filecmp.cmp(src_error_file, dst_error_file))
|
pytorch-master
|
test/distributed/elastic/multiprocessing/errors/error_handler_test.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import signal
import time
import unittest
import unittest.mock as mock
import torch.distributed.elastic.timer as timer
from torch.distributed.elastic.timer.api import TimerRequest
from torch.distributed.elastic.timer.local_timer import MultiprocessingRequestQueue
from torch.testing._internal.common_utils import (
run_tests,
IS_WINDOWS,
IS_MACOS,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
TestCase
)
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS or TEST_WITH_DEV_DBG_ASAN):
# func2 should time out
def func2(n, mp_queue):
if mp_queue is not None:
timer.configure(timer.LocalTimerClient(mp_queue))
if n > 0:
with timer.expires(after=0.1):
func2(n - 1, None)
time.sleep(0.2)
class LocalTimerTest(TestCase):
def setUp(self):
super().setUp()
self.ctx = mp.get_context("spawn")
self.mp_queue = self.ctx.Queue()
self.max_interval = 0.01
self.server = timer.LocalTimerServer(self.mp_queue, self.max_interval)
self.server.start()
def tearDown(self):
super().tearDown()
self.server.stop()
def test_exception_propagation(self):
with self.assertRaises(Exception, msg="foobar"):
with timer.expires(after=1):
raise Exception("foobar")
def test_no_client(self):
# no timer client configured; exception expected
timer.configure(None)
with self.assertRaises(RuntimeError):
with timer.expires(after=1):
pass
def test_client_interaction(self):
# no timer client configured but one passed in explicitly
# no exception expected
timer_client = timer.LocalTimerClient(self.mp_queue)
timer_client.acquire = mock.MagicMock(wraps=timer_client.acquire)
timer_client.release = mock.MagicMock(wraps=timer_client.release)
with timer.expires(after=1, scope="test", client=timer_client):
pass
timer_client.acquire.assert_called_once_with("test", mock.ANY)
timer_client.release.assert_called_once_with("test")
def test_happy_path(self):
timer.configure(timer.LocalTimerClient(self.mp_queue))
with timer.expires(after=0.5):
time.sleep(0.1)
def test_get_timer_recursive(self):
"""
If a function acquires a countdown timer with default scope,
then recursive calls to the function should re-acquire the
timer rather than creating a new one. That is only the last
recursive call's timer will take effect.
"""
self.server.start()
timer.configure(timer.LocalTimerClient(self.mp_queue))
# func should not time out
def func(n):
if n > 0:
with timer.expires(after=0.1):
func(n - 1)
time.sleep(0.05)
func(4)
p = self.ctx.Process(target=func2, args=(2, self.mp_queue))
p.start()
p.join()
self.assertEqual(-signal.SIGKILL, p.exitcode)
@staticmethod
def _run(mp_queue, timeout, duration):
client = timer.LocalTimerClient(mp_queue)
timer.configure(client)
with timer.expires(after=timeout):
time.sleep(duration)
@unittest.skipIf(TEST_WITH_TSAN, "test is tsan incompatible")
def test_timer(self):
timeout = 0.1
duration = 1
p = mp.Process(target=self._run, args=(self.mp_queue, timeout, duration))
p.start()
p.join()
self.assertEqual(-signal.SIGKILL, p.exitcode)
def _enqueue_on_interval(mp_queue, n, interval, sem):
"""
enqueues ``n`` timer requests into ``mp_queue`` one element per
interval seconds. Releases the given semaphore once before going to work.
"""
sem.release()
for i in range(0, n):
mp_queue.put(TimerRequest(i, "test_scope", 0))
time.sleep(interval)
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS or TEST_WITH_DEV_DBG_ASAN):
class MultiprocessingRequestQueueTest(TestCase):
def test_get(self):
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
requests = request_queue.get(1, timeout=0.01)
self.assertEqual(0, len(requests))
request = TimerRequest(1, "test_scope", 0)
mp_queue.put(request)
requests = request_queue.get(2, timeout=0.01)
self.assertEqual(1, len(requests))
self.assertIn(request, requests)
@unittest.skipIf(
TEST_WITH_TSAN,
"test incompatible with tsan",
)
def test_get_size(self):
"""
Creates a "producer" process that enqueues ``n`` elements
every ``interval`` seconds. Asserts that a ``get(n, timeout=n*interval+delta)``
yields all ``n`` elements.
"""
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
n = 10
interval = 0.1
sem = mp.Semaphore(0)
p = mp.Process(
target=_enqueue_on_interval, args=(mp_queue, n, interval, sem)
)
p.start()
sem.acquire() # blocks until the process has started to run the function
timeout = interval * (n + 1)
start = time.time()
requests = request_queue.get(n, timeout=timeout)
self.assertLessEqual(time.time() - start, timeout + interval)
self.assertEqual(n, len(requests))
def test_get_less_than_size(self):
"""
Tests slow producer.
Creates a "producer" process that enqueues ``n`` elements
every ``interval`` seconds. Asserts that a ``get(n, timeout=(interval * n/2))``
yields at most ``n/2`` elements.
"""
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
n = 10
interval = 0.1
sem = mp.Semaphore(0)
p = mp.Process(
target=_enqueue_on_interval, args=(mp_queue, n, interval, sem)
)
p.start()
sem.acquire() # blocks until the process has started to run the function
requests = request_queue.get(n, timeout=(interval * (n / 2)))
self.assertLessEqual(n / 2, len(requests))
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS or TEST_WITH_DEV_DBG_ASAN):
class LocalTimerServerTest(TestCase):
def setUp(self):
super().setUp()
self.mp_queue = mp.Queue()
self.max_interval = 0.01
self.server = timer.LocalTimerServer(self.mp_queue, self.max_interval)
def tearDown(self):
super().tearDown()
self.server.stop()
def test_watchdog_call_count(self):
"""
checks that the watchdog function ran wait/interval +- 1 times
"""
self.server._run_watchdog = mock.MagicMock(wraps=self.server._run_watchdog)
wait = 0.1
self.server.start()
time.sleep(wait)
self.server.stop()
watchdog_call_count = self.server._run_watchdog.call_count
self.assertGreaterEqual(
watchdog_call_count, int(wait / self.max_interval) - 1
)
self.assertLessEqual(watchdog_call_count, int(wait / self.max_interval) + 1)
def test_watchdog_empty_queue(self):
"""
checks that the watchdog can run on an empty queue
"""
self.server._run_watchdog()
def _expired_timer(self, pid, scope):
expired = time.time() - 60
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=expired)
def _valid_timer(self, pid, scope):
valid = time.time() + 60
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=valid)
def _release_timer(self, pid, scope):
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=-1)
@mock.patch("os.kill")
def test_expired_timers(self, mock_os_kill):
"""
tests that a single expired timer on a process should terminate
the process and clean up all pending timers that was owned by the process
"""
test_pid = -3
self.mp_queue.put(self._expired_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=test_pid, scope="test2"))
self.server._run_watchdog()
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_called_once_with(test_pid, signal.SIGKILL)
@mock.patch("os.kill")
def test_acquire_release(self, mock_os_kill):
"""
tests that:
1. a timer can be acquired then released (should not terminate process)
2. a timer can be vacuously released (e.g. no-op)
"""
test_pid = -3
self.mp_queue.put(self._valid_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._release_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._release_timer(pid=test_pid, scope="test2"))
self.server._run_watchdog()
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_not_called()
@mock.patch("os.kill")
def test_valid_timers(self, mock_os_kill):
"""
tests that valid timers are processed correctly and the process is left alone
"""
self.mp_queue.put(self._valid_timer(pid=-3, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=-3, scope="test2"))
self.mp_queue.put(self._valid_timer(pid=-2, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=-2, scope="test2"))
self.server._run_watchdog()
self.assertEqual(4, len(self.server._timers))
self.assertTrue((-3, "test1") in self.server._timers)
self.assertTrue((-3, "test2") in self.server._timers)
self.assertTrue((-2, "test1") in self.server._timers)
self.assertTrue((-2, "test2") in self.server._timers)
mock_os_kill.assert_not_called()
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/elastic/timer/local_timer_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing as mp
import signal
import time
import torch.distributed.elastic.timer as timer
import torch.multiprocessing as torch_mp
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
IS_WINDOWS,
IS_MACOS,
sandcastle_skip_if,
TestCase
)
logging.basicConfig(
level=logging.INFO, format="[%(levelname)s] %(asctime)s %(module)s: %(message)s"
)
def _happy_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(0.5)
def _stuck_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(5)
# timer is not supported on macos or windowns
if not (IS_WINDOWS or IS_MACOS):
class LocalTimerExample(TestCase):
"""
Demonstrates how to use LocalTimerServer and LocalTimerClient
to enforce expiration of code-blocks.
Since torch multiprocessing's ``start_process`` method currently
does not take the multiprocessing context as parameter argument
there is no way to create the mp.Queue in the correct
context BEFORE spawning child processes. Once the ``start_process``
API is changed in torch, then re-enable ``test_torch_mp_example``
unittest. As of now this will SIGSEGV.
"""
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_torch_mp_example(self):
# in practice set the max_interval to a larger value (e.g. 60 seconds)
mp_queue = mp.get_context("spawn").Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
# all processes should complete successfully
# since start_process does NOT take context as parameter argument yet
# this method WILL FAIL (hence the test is disabled)
torch_mp.spawn(
fn=_happy_function, args=(mp_queue,), nprocs=world_size, join=True
)
with self.assertRaises(Exception):
# torch.multiprocessing.spawn kills all sub-procs
# if one of them gets killed
torch_mp.spawn(
fn=_stuck_function, args=(mp_queue,), nprocs=world_size, join=True
)
server.stop()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_example_start_method_spawn(self):
self._run_example_with(start_method="spawn")
# @sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
# def test_example_start_method_forkserver(self):
# self._run_example_with(start_method="forkserver")
def _run_example_with(self, start_method):
spawn_ctx = mp.get_context(start_method)
mp_queue = spawn_ctx.Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
processes = []
for i in range(0, world_size):
if i % 2 == 0:
p = spawn_ctx.Process(target=_stuck_function, args=(i, mp_queue))
else:
p = spawn_ctx.Process(target=_happy_function, args=(i, mp_queue))
p.start()
processes.append(p)
for i in range(0, world_size):
p = processes[i]
p.join()
if i % 2 == 0:
self.assertEqual(-signal.SIGKILL, p.exitcode)
else:
self.assertEqual(0, p.exitcode)
server.stop()
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/elastic/timer/local_timer_example.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
pytorch-master
|
test/distributed/elastic/timer/__init__.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import unittest.mock as mock
from torch.distributed.elastic.timer import TimerServer
from torch.distributed.elastic.timer.api import RequestQueue, TimerRequest
class MockRequestQueue(RequestQueue):
def size(self):
return 2
def get(self, size, timeout):
return [TimerRequest(1, "test_1", 0), TimerRequest(2, "test_2", 0)]
class MockTimerServer(TimerServer):
"""
Mock implementation of TimerServer for testing purposes.
This mock has the following behavior:
1. reaping worker 1 throws
2. reaping worker 2 succeeds
3. reaping worker 3 fails (caught exception)
For each workers 1 - 3 returns 2 expired timers
"""
def __init__(self, request_queue, max_interval):
super().__init__(request_queue, max_interval)
def register_timers(self, timer_requests):
pass
def clear_timers(self, worker_ids):
pass
def get_expired_timers(self, deadline):
return {
i: [TimerRequest(i, f"test_{i}_0", 0), TimerRequest(i, f"test_{i}_1", 0)]
for i in range(1, 4)
}
def _reap_worker(self, worker_id):
if worker_id == 1:
raise RuntimeError("test error")
elif worker_id == 2:
return True
elif worker_id == 3:
return False
class TimerApiTest(unittest.TestCase):
@mock.patch.object(MockTimerServer, "register_timers")
@mock.patch.object(MockTimerServer, "clear_timers")
def test_run_watchdog(self, mock_clear_timers, mock_register_timers):
"""
tests that when a ``_reap_worker()`` method throws an exception
for a particular worker_id, the timers for successfully reaped workers
are cleared properly
"""
max_interval = 1
request_queue = mock.Mock(wraps=MockRequestQueue())
timer_server = MockTimerServer(request_queue, max_interval)
timer_server._run_watchdog()
request_queue.size.assert_called_once()
request_queue.get.assert_called_with(request_queue.size(), max_interval)
mock_register_timers.assert_called_with(request_queue.get(2, 1))
mock_clear_timers.assert_called_with({1, 2})
|
pytorch-master
|
test/distributed/elastic/timer/api_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch.distributed.elastic.utils.logging as logging
from torch.testing._internal.common_utils import run_tests, TestCase
log = logging.get_logger()
class LoggingTest(TestCase):
def setUp(self):
super().setUp()
self.clazz_log = logging.get_logger()
def test_logger_name(self):
local_log = logging.get_logger()
name_override_log = logging.get_logger("foobar")
self.assertEqual(__name__, log.name)
self.assertEqual(__name__, self.clazz_log.name)
self.assertEqual(__name__, local_log.name)
self.assertEqual("foobar", name_override_log.name)
def test_derive_module_name(self):
module_name = logging._derive_module_name(depth=1)
self.assertEqual(__name__, module_name)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/elastic/utils/logging_test.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
pytorch-master
|
test/distributed/elastic/utils/__init__.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from unittest import mock
import torch.distributed.elastic.utils.store as store_util
from torch.distributed.elastic.utils.logging import get_logger
from torch.testing._internal.common_utils import run_tests, TestCase
class StoreUtilTest(TestCase):
def test_get_all_rank_0(self):
store = mock.MagicMock()
world_size = 3
store_util.get_all(store, 0, "test/store", world_size)
# omit empty kwargs, get only key
actual_set_call_args = [
call_args[0][0] for call_args in store.set.call_args_list
]
self.assertListEqual(["test/store0.FIN"], actual_set_call_args)
actual_get_call_args = [call_args[0] for call_args in store.get.call_args_list]
expected_get_call_args = [
("test/store0",),
("test/store1",),
("test/store2",),
("test/store0.FIN",),
("test/store1.FIN",),
("test/store2.FIN",),
]
self.assertListEqual(expected_get_call_args, actual_get_call_args)
def test_get_all_rank_n(self):
store = mock.MagicMock()
world_size = 3
store_util.get_all(store, 1, "test/store", world_size)
# omit empty kwargs, get only key
actual_set_call_args = [
call_args[0][0] for call_args in store.set.call_args_list
]
self.assertListEqual(["test/store1.FIN"], actual_set_call_args)
actual_get_call_args = [call_args[0] for call_args in store.get.call_args_list]
expected_get_call_args = [
("test/store0",),
("test/store1",),
("test/store2",),
]
self.assertListEqual(expected_get_call_args, actual_get_call_args)
def test_synchronize(self):
store_mock = mock.MagicMock()
data = "data0".encode(encoding="UTF-8")
store_util.synchronize(store_mock, data, 0, 3, key_prefix="torchelastic/test")
actual_set_call_args = store_mock.set.call_args_list
# omit empty kwargs
actual_set_call_args = [call_args[0] for call_args in actual_set_call_args]
expected_set_call_args = [
("torchelastic/test0", b"data0"),
("torchelastic/test0.FIN", b"FIN"),
]
self.assertListEqual(expected_set_call_args, actual_set_call_args)
expected_get_call_args = [
("torchelastic/test0",),
("torchelastic/test1",),
("torchelastic/test2",),
("torchelastic/test0.FIN",),
("torchelastic/test1.FIN",),
("torchelastic/test2.FIN",),
]
actual_get_call_args = store_mock.get.call_args_list
actual_get_call_args = [call_args[0] for call_args in actual_get_call_args]
self.assertListEqual(expected_get_call_args, actual_get_call_args)
class UtilTest(TestCase):
def test_get_logger_different(self):
logger1 = get_logger("name1")
logger2 = get_logger("name2")
self.assertNotEqual(logger1.name, logger2.name)
def test_get_logger(self):
logger1 = get_logger()
self.assertEqual(__name__, logger1.name)
def test_get_logger_none(self):
logger1 = get_logger(None)
self.assertEqual(__name__, logger1.name)
def test_get_logger_custom_name(self):
logger1 = get_logger("test.module")
self.assertEqual("test.module", logger1.name)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/elastic/utils/util_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import socket
import sys
import unittest
from contextlib import closing
from torch.distributed.elastic.utils.distributed import (
create_c10d_store,
get_socket_with_port,
)
from torch.testing._internal.common_utils import (
IS_MACOS,
IS_WINDOWS,
run_tests,
TEST_WITH_TSAN,
TestCase
)
def _create_c10d_store_mp(is_server, server_addr, port, world_size, wait_for_workers):
store = create_c10d_store(is_server, server_addr, port, world_size, wait_for_workers=wait_for_workers, timeout=2)
if store is None:
raise AssertionError()
store.set(f"test_key/{os.getpid()}", "test_value".encode("UTF-8"))
if IS_WINDOWS or IS_MACOS:
print("tests incompatible with tsan or asan", file=sys.stderr)
sys.exit(0)
class DistributedUtilTest(TestCase):
def test_create_store_single_server(self):
store = create_c10d_store(is_server=True, server_addr=socket.gethostname())
self.assertIsNotNone(store)
def test_create_store_no_port_multi(self):
with self.assertRaises(ValueError):
create_c10d_store(
is_server=True, server_addr=socket.gethostname(), world_size=2
)
@unittest.skipIf(TEST_WITH_TSAN, "test incompatible with tsan")
def test_create_store_multi(self):
world_size = 3
wait_for_workers = False
localhost = socket.gethostname()
# start the server on the main process using an available port
store = create_c10d_store(
is_server=True,
server_addr=localhost,
server_port=0,
timeout=2,
world_size=world_size,
wait_for_workers=wait_for_workers,
)
# worker processes will use the port that was assigned to the server
server_port = store.port
worker0 = mp.Process(
target=_create_c10d_store_mp,
args=(False, localhost, server_port, world_size, wait_for_workers),
)
worker1 = mp.Process(
target=_create_c10d_store_mp,
args=(False, localhost, server_port, world_size, wait_for_workers),
)
worker0.start()
worker1.start()
worker0.join()
worker1.join()
# check test_key/pid == "test_value"
self.assertEqual(
"test_value", store.get(f"test_key/{worker0.pid}").decode("UTF-8")
)
self.assertEqual(
"test_value", store.get(f"test_key/{worker1.pid}").decode("UTF-8")
)
self.assertEqual(0, worker0.exitcode)
self.assertEqual(0, worker1.exitcode)
def test_create_store_timeout_on_server(self):
with self.assertRaises(TimeoutError):
# use any available port (port 0) since timeout is expected
create_c10d_store(
is_server=True,
server_addr=socket.gethostname(),
server_port=0,
world_size=2,
timeout=1,
)
def test_create_store_timeout_on_worker(self):
with self.assertRaises(TimeoutError):
# use any available port (port 0) since timeout is expected
create_c10d_store(
is_server=False,
server_addr=socket.gethostname(),
server_port=0,
world_size=2,
timeout=1,
)
def test_port_already_in_use_on_server(self):
# try to create the TCPStore server twice on the same port
# the second should fail due to a port conflict
# first store binds onto a free port
# try creating the second store on the port that the first store binded to
server_addr = socket.gethostname()
pick_free_port = 0
store1 = create_c10d_store(
is_server=True,
server_addr=server_addr,
server_port=pick_free_port,
timeout=1,
)
with self.assertRaises(RuntimeError):
create_c10d_store(
is_server=True, server_addr=server_addr, server_port=store1.port
)
def test_port_already_in_use_on_worker(self):
sock = get_socket_with_port()
with closing(sock):
port = sock.getsockname()[1]
# on the worker port conflict shouldn't matter, it should just timeout
# since we never created a server
with self.assertRaises(TimeoutError):
create_c10d_store(
is_server=False,
server_addr=socket.gethostname(),
server_port=port,
timeout=1,
)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/elastic/utils/distributed_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from torch.distributed.elastic.utils.data import CyclingIterator
class CyclingIteratorTest(unittest.TestCase):
def generator(self, epoch, stride, max_epochs):
# generate an continuously incrementing list each epoch
# e.g. [0,1,2] [3,4,5] [6,7,8] ...
return iter([stride * epoch + i for i in range(0, stride)])
def test_cycling_iterator(self):
stride = 3
max_epochs = 90
def generator_fn(epoch):
return self.generator(epoch, stride, max_epochs)
it = CyclingIterator(n=max_epochs, generator_fn=generator_fn)
for i in range(0, stride * max_epochs):
self.assertEqual(i, next(it))
with self.assertRaises(StopIteration):
next(it)
def test_cycling_iterator_start_epoch(self):
stride = 3
max_epochs = 2
start_epoch = 1
def generator_fn(epoch):
return self.generator(epoch, stride, max_epochs)
it = CyclingIterator(max_epochs, generator_fn, start_epoch)
for i in range(stride * start_epoch, stride * max_epochs):
self.assertEqual(i, next(it))
with self.assertRaises(StopIteration):
next(it)
|
pytorch-master
|
test/distributed/elastic/utils/data/cycling_iterator_test.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
pytorch-master
|
test/distributed/elastic/utils/data/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
pytorch-master
|
test/distributed/elastic/agent/server/test/__init__.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import multiprocessing as mp
import os
import shutil
import signal
import socket
import tempfile
import time
import unittest
import uuid
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Tuple
from unittest import mock
from unittest.mock import Mock, patch
import torch
import torch.distributed as dist
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
import torch.distributed.rpc as rpc
from torch.distributed.elastic.agent.server.api import (
RunResult,
WorkerSpec,
WorkerState,
)
from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
from torch.distributed.elastic.multiprocessing import Std
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError, record
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.rpc.backend_registry import BackendType
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
sandcastle_skip_if,
)
def init_rpc(name, backend):
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
rpc.init_rpc(
name=name,
backend=backend,
rank=rank,
world_size=world_size,
)
def rpc_master(msg):
init_rpc("master", BackendType.TENSORPIPE)
ret = rpc.rpc_sync(to="worker", func=_echo, args=(msg,))
rpc.shutdown()
return f"{ret} from worker"
def rpc_worker():
init_rpc("worker", BackendType.TENSORPIPE)
rpc.shutdown()
def _happy_function():
return
def _sad_function():
raise RuntimeError("sad because i throw")
def dummy_compute() -> torch.Tensor:
"""
returns a predefined size random Tensor
"""
return torch.rand(100, 100)
def _fatal_signal_function(expected_error_index: int, sig: int):
rank = int(os.environ["RANK"])
if rank == expected_error_index:
os.kill(os.getpid(), sig)
def _check_master_port_addr_override(
expected_master_addr: str, expected_master_port: int
):
actual_master_addr = os.environ["MASTER_ADDR"]
actual_master_port = int(os.environ["MASTER_PORT"])
if (
expected_master_addr != actual_master_addr
and expected_master_port != actual_master_port
):
raise RuntimeError(
f"Expected addr: {expected_master_addr}:{expected_master_port}, got addr: {actual_master_addr}:{actual_master_port}"
)
def _bipolar_function():
rank = int(os.environ["RANK"])
if rank % 2 == 0:
_happy_function()
else:
_sad_function()
def _dist_sum(wait=0):
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
dist.init_process_group(backend="gloo")
t = torch.tensor(rank)
time.sleep(wait)
dist.all_reduce(t, op=dist.reduce_op.SUM)
expected_sum = sum(range(world_size))
actual = t.item()
if expected_sum != actual:
raise RuntimeError(f"Expected rank sum {expected_sum}, got {actual}")
def _sleep(sleep_sec) -> int:
time.sleep(sleep_sec)
return int(os.environ["RANK"])
@dataclass
class RankInfo:
rank: int
role_rank: int
group_rank: int
role_world_size: int
world_size: int
def _get_role_info() -> RankInfo:
rank = int(os.environ["RANK"])
role_rank = int(os.environ["ROLE_RANK"])
group_rank = int(os.environ["GROUP_RANK"])
role_world_size = int(os.environ["ROLE_WORLD_SIZE"])
world_size = int(os.environ["WORLD_SIZE"])
return RankInfo(rank, role_rank, group_rank, role_world_size, world_size)
def _echo(msg):
return msg
def _check_env_function():
# just check these env vars exist, os.environ[...] will naturally throw
# if the variable does not exist
env_vars = [
"RANK",
"LOCAL_RANK",
"ROLE_RANK",
"ROLE_NAME",
"GROUP_RANK",
"LOCAL_WORLD_SIZE",
"ROLE_WORLD_SIZE",
"WORLD_SIZE",
"GROUP_WORLD_SIZE",
"MASTER_ADDR",
"MASTER_PORT",
"TORCHELASTIC_RESTART_COUNT",
"TORCHELASTIC_MAX_RESTARTS",
"TORCHELASTIC_RUN_ID",
"TORCHELASTIC_USE_AGENT_STORE",
"NCCL_ASYNC_ERROR_HANDLING",
]
for var in env_vars:
_ = os.environ[var]
def _check_env_value(key: str, expected: str):
# checks if the env var ``key`` matches ``value``
# this function is intended to be used as the entrypoint to the elastic run
if key not in os.environ:
raise RuntimeError(f"Environment variable {key} not found in os.environ")
else:
actual = os.getenv(key)
if expected != actual:
raise RuntimeError(
f"os.environ['{key}']={actual}"
f" does not equal the expected value: {expected}"
)
def acquire_available_port():
"""
Uses sockets to acquire an available port from the os for use.
Note: To reduce the race condition where another process grabs the port
after this function returns an available port, we should aim to use
the port as quickly as possible.
"""
addrs = socket.getaddrinfo(
host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM
)
for addr in addrs:
family, type, proto, _, _ = addr
try:
s = socket.socket(family, type, proto)
s.bind(("localhost", 0))
s.listen(0)
port = s.getsockname()[1]
s.close()
return port
except OSError as e:
s.close()
print(f"Socket creation attempt failed: {e}")
raise RuntimeError("Failed to create a socket")
@dataclass
class Conf:
"""
Holds arguments to launch an agent (e.g. simulates an agent run on a node).
"""
entrypoint: Callable
local_world_size: int
args: Tuple = ()
role: str = "default"
redirects: Std = Std.NONE
tee: Std = Std.NONE
class LocalElasticAgentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
def setUp(self):
self._test_dir = tempfile.mkdtemp(prefix=self.__class__.__name__)
self._run_id = str(uuid.uuid4()).split("-")[0]
def tearDown(self):
shutil.rmtree(self._test_dir)
def log_dir(self) -> str:
return tempfile.mkdtemp(prefix="torchelastic_", dir=self._test_dir)
def get_worker_spec(
self,
node_config: Conf,
min_nodes=1,
max_nodes=1,
max_restarts=0,
monitor_interval=0.01,
master_addr_override: Optional[str] = None,
master_port_override: Optional[int] = None,
is_host=True,
):
rdzv_params = RendezvousParameters(
backend=self._backend,
endpoint=self._endpoint,
run_id=self._run_id,
min_nodes=min_nodes,
max_nodes=max_nodes,
is_host=is_host,
)
rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_params)
return WorkerSpec(
role=node_config.role,
local_world_size=node_config.local_world_size,
entrypoint=node_config.entrypoint,
args=node_config.args,
rdzv_handler=rdzv_handler,
max_restarts=max_restarts,
monitor_interval=monitor_interval,
redirects=node_config.redirects,
tee=node_config.tee,
master_addr=master_addr_override,
master_port=master_port_override,
)
def get_agent(
self, spec: WorkerSpec, start_method: str = "spawn", exit_barrier_timeout=5
) -> LocalElasticAgent:
return LocalElasticAgent(
spec,
start_method=start_method,
exit_barrier_timeout=exit_barrier_timeout,
log_dir=self.log_dir(),
)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.multiprocessing.errors.record`.
@record
def run_agent(
self,
conf: Conf,
agent_results: Optional[mp.Queue] = None, # (role, agent_result)
min_nodes=1,
max_nodes=1,
start_method: str = "spawn",
max_restarts: int = 0,
exit_barrier_timeout=5,
master_addr_override: Optional[str] = None,
master_port_override: Optional[int] = None,
is_host=True,
) -> Optional[RunResult]:
"""
Runs a single agent. This method can be called either on a separate process
or the main test process. When calling this method on a sparate process make
sure to pass the ``agent_results`` multiprocessing Queue so that the agent's
run results can be returned. If ``agent_results`` is omitted, then the
run result is returned from the method.
"""
spec = self.get_worker_spec(
node_config=conf,
min_nodes=min_nodes,
max_nodes=max_nodes,
max_restarts=max_restarts,
master_addr_override=master_addr_override,
master_port_override=master_port_override,
is_host=is_host,
)
agent = self.get_agent(
spec=spec,
start_method=start_method,
exit_barrier_timeout=exit_barrier_timeout,
)
result = agent.run()
spec.rdzv_handler.shutdown()
if agent_results:
agent_results.put((conf.role, result))
if result.is_failed():
raise ChildFailedError(spec.get_entrypoint_name(), result.failures)
else:
if not agent_results:
return result
def run_job(
self, node_configs: List[Conf], exit_barrier_timeout: int = 5
) -> Dict[str, List[RunResult]]:
"""
Simulates running a distributed job by running multiple agents
(one on each process). Agent 0 is run on the main process for
test coverage and ease of debugging
"""
nnodes = len(node_configs)
# each element in this queue holds a tuple (role, RunResult) for each agent
agent_results = mp.Queue()
# run first agent of first config on main process for test coverage + ease of debugging
# it is important we loop in reverse order b/c running fn on the main process blocks
procs = []
for node_idx in reversed(range(len(node_configs))):
conf = node_configs[node_idx]
run_agent_args = {
"conf": conf,
"agent_results": agent_results,
"min_nodes": nnodes,
"max_nodes": nnodes,
"start_method": "spawn",
"max_restarts": 0,
"exit_barrier_timeout": exit_barrier_timeout,
"is_host": node_idx == 0,
}
p = mp.Process(target=self.run_agent, kwargs=run_agent_args)
procs.append(p)
p.start()
for p in procs:
p.join()
results: Dict[str, List[RunResult]] = {}
while not agent_results.empty():
role, run_result = agent_results.get()
results.setdefault(role, []).append(run_result)
return results
def run_test_with_backend(self, backend: str, test_to_run: Callable):
"""
Sets the backend and determines the endpoint before running the
given test.
Note: This method must be invoked to run any test functions that spawn
an agent. This is because this function sets the backend and
endpoint parameters.
"""
self._backend = backend
if self._backend == "etcd-v2" or self._backend == "etcd":
self._endpoint = self._etcd_server.get_endpoint()
else:
# the default is c10d backend
self._endpoint = f"localhost:{acquire_available_port()}"
test_to_run()
def dummy_compute(self):
res = self.run_agent(Conf(entrypoint=dummy_compute, local_world_size=2))
self.assertFalse(res.is_failed())
for return_value in res.return_values.values():
self.assertIsInstance(return_value, torch.Tensor)
self.assertEqual((100, 100), return_value.shape)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_dummy_compute_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.dummy_compute)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_dummy_compute_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.dummy_compute)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_dummy_compute_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.dummy_compute)
def run_happy_function(self):
res = self.run_agent(Conf(entrypoint=_happy_function, local_world_size=2))
self.assertFalse(res.is_failed())
self.assertIsNone(res.return_values[0])
self.assertIsNone(res.return_values[1])
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_happy_function_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.run_happy_function)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_happy_function_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.run_happy_function)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_happy_function_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_happy_function
)
def check_master_addr_port_override(self):
master_addr = "test_host"
master_port = 42
res = self.run_agent(
Conf(
entrypoint=_check_master_port_addr_override,
args=(master_addr, master_port),
local_world_size=1,
),
master_addr_override=master_addr,
master_port_override=master_port,
)
self.assertFalse(res.is_failed())
self.assertIsNone(res.return_values[0])
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_check_master_addr_port_override_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.check_master_addr_port_override
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_check_master_addr_port_override_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.check_master_addr_port_override
)
def run_check_env_function(self):
# just checks that all env vars that we need to set on the user script
# is actually set
res = self.run_agent(Conf(entrypoint=_check_env_function, local_world_size=1))
self.assertFalse(res.is_failed())
def run_check_nccl_async_error_handling_env(self):
# make sure NCCL_ASYNC_ERROR_HANDLING set in os.environ is honored
with patch.dict(os.environ, {"NCCL_ASYNC_ERROR_HANDLING": "0"}):
res = self.run_agent(
Conf(
entrypoint=_check_env_value,
local_world_size=1,
args=("NCCL_ASYNC_ERROR_HANDLING", "0"),
)
)
self.assertFalse(res.is_failed())
def run_check_nccl_async_error_handling_env_default(self):
# if not present in env var it should default to 1
res = self.run_agent(
Conf(
entrypoint=_check_env_value,
local_world_size=1,
args=("NCCL_ASYNC_ERROR_HANDLING", "1"),
)
)
self.assertFalse(res.is_failed())
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_check_env_function_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_check_env_function
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_check_nccl_async_error_handling_env_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_check_nccl_async_error_handling_env
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_check_nccl_async_error_handling_env_default_c10d(self):
self.run_test_with_backend(
backend="c10d",
test_to_run=self.run_check_nccl_async_error_handling_env_default,
)
def run_function_with_return_value(self):
res = self.run_agent(Conf(entrypoint=_echo, args=("foo",), local_world_size=2))
self.assertFalse(res.is_failed())
self.assertEqual("foo", res.return_values[0])
self.assertEqual("foo", res.return_values[1])
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_function_with_return_value_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_function_with_return_value
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_function_with_return_value_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_function_with_return_value
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_function_with_return_value_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_function_with_return_value
)
def simple_dist_sum(self):
res = self.run_agent(Conf(entrypoint=_dist_sum, local_world_size=2))
self.assertFalse(res.is_failed())
# _dist_sum internally checks that the sum computed is valid
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_simple_dist_sum_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.simple_dist_sum)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_simple_dist_sum_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.simple_dist_sum)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_simple_dist_sum_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.simple_dist_sum)
def run_distributed_sum_homogeneous(self):
node_configs = [
Conf(role="sum", entrypoint=_dist_sum, local_world_size=4),
Conf(role="sum", entrypoint=_dist_sum, local_world_size=4),
]
# When the process method is spawn, the coverage collector hangs
# due to getting stuck on the _dist_sum in waiting for TCPStore workers
# to join the cluster
# TODO(aivanou): t83447589 come up with the proper fix
res = self.run_job(node_configs)
self.assertEqual(2, len(res["sum"]))
ranks = set()
for run_results in res["sum"]:
self.assertFalse(run_results.is_failed())
ranks.update(run_results.return_values.keys())
self.assertSetEqual(set(range(4 + 4)), ranks)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_run_distributed_sum_homogeneous_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_distributed_sum_homogeneous
)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_run_distributed_sum_homogeneous_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_distributed_sum_homogeneous
)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_run_distributed_sum_homogeneous_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_distributed_sum_homogeneous
)
def run_distributed_sum_heterogeneous(self):
# sums all ranks on 3 agents; each running 1, 2, 3 workers respectively
# sum should be equal to 0 + (1 + 2) + (3 + 4 + 5) = 15
# sum asserted inside _dist_sum()
node_configs = [
Conf(role="sum", entrypoint=_dist_sum, local_world_size=1),
Conf(role="sum", entrypoint=_dist_sum, local_world_size=2),
Conf(role="sum", entrypoint=_dist_sum, local_world_size=3),
]
# When the process method is spawn, the coverage collector hangs
# due to getting stuck on the _dist_sum in waiting for TCPStore workers
# to join the cluster
# TODO(aivanou): t83447589 come up with the proper fix
res = self.run_job(node_configs)
self.assertEqual(3, len(res["sum"]))
ranks = set()
for run_results in res["sum"]:
self.assertFalse(run_results.is_failed())
ranks.update(run_results.return_values.keys())
self.assertSetEqual(set(range(1 + 2 + 3)), ranks)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_distributed_sum_heterogeneous_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_distributed_sum_heterogeneous
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_distributed_sum_heterogeneous_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_distributed_sum_heterogeneous
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_distributed_sum_heterogeneous_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_distributed_sum_heterogeneous
)
def run_sad_function(self):
"""
checks error propagation logic
"""
replyfile = os.path.join(self._test_dir, "error.json")
with mock.patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": replyfile}):
with self.assertRaises(ChildFailedError) as cm:
self.run_agent(Conf(entrypoint=_sad_function, local_world_size=2))
rank, failure = cm.exception.get_first_failure()
failure_data = failure.error_file_data["message"]
with open(replyfile, "r") as fp:
data = json.load(fp)["message"]
# ran two; both failed; first failure is either rank 0 or 1
self.assertTrue(rank in {0, 1})
self.assertTrue(failure.local_rank in {0, 1})
self.assertEqual(1, failure.exitcode)
self.assertEqual(data["message"], failure_data["message"])
self.assertEqual(int(data["extraInfo"]["timestamp"]), failure.timestamp)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_sad_function_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.run_sad_function)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_sad_function_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.run_sad_function)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_sad_function_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.run_sad_function)
def run_bipolar_function(self):
"""
checks agent failure handling logic
"""
node_conf = Conf(entrypoint=_bipolar_function, local_world_size=4)
spec = self.get_worker_spec(node_conf, max_restarts=2)
agent = self.get_agent(spec)
run_result = agent.run()
self.assertTrue(run_result.is_failed())
self.assertEqual(0, agent._remaining_restarts)
self.assertEqual(WorkerState.FAILED, agent.get_worker_group().state)
self.assertTrue(agent._total_execution_time > 0)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_bipolar_function_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.run_bipolar_function
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_bipolar_function_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.run_bipolar_function
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_bipolar_function_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.run_bipolar_function
)
def correct_rank_assignment_heterogeneous(self):
node_configs = [
Conf(role="master", entrypoint=_get_role_info, local_world_size=8),
Conf(role="trainer", entrypoint=_get_role_info, local_world_size=1),
Conf(role="trainer", entrypoint=_get_role_info, local_world_size=2),
Conf(role="trainer", entrypoint=_get_role_info, local_world_size=3),
Conf(role="trainer", entrypoint=_get_role_info, local_world_size=4),
Conf(role="ps", entrypoint=_get_role_info, local_world_size=5),
Conf(role="ps", entrypoint=_get_role_info, local_world_size=2),
]
results = self.run_job(node_configs)
print(f"heterogeneous job result: {results}")
self.assertEqual(1, len(results["master"]))
self.assertEqual(4, len(results["trainer"]))
self.assertEqual(2, len(results["ps"]))
self.assert_rank_consistency(
results,
expected_role_world_sizes={
"master": 8,
"trainer": 1 + 2 + 3 + 4,
"ps": 5 + 2,
},
)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_correct_rank_assignment_heterogeneous_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.correct_rank_assignment_heterogeneous
)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_correct_rank_assignment_heterogeneous_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.correct_rank_assignment_heterogeneous
)
def correct_rank_assignment_homogeneous(self):
node_configs = [
Conf(role="master", entrypoint=_get_role_info, local_world_size=1),
Conf(role="trainer", entrypoint=_get_role_info, local_world_size=4),
Conf(role="trainer", entrypoint=_get_role_info, local_world_size=4),
Conf(role="trainer", entrypoint=_get_role_info, local_world_size=4),
Conf(role="trainer", entrypoint=_get_role_info, local_world_size=4),
Conf(role="ps", entrypoint=_get_role_info, local_world_size=3),
Conf(role="ps", entrypoint=_get_role_info, local_world_size=3),
]
results = self.run_job(node_configs)
print(f"homogeneous job result: {results}")
self.assertEqual(1, len(results["master"]))
self.assertEqual(4, len(results["trainer"]))
self.assertEqual(2, len(results["ps"]))
self.assert_rank_consistency(
results,
expected_role_world_sizes={"master": 1, "trainer": 4 * 4, "ps": 3 * 2},
)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_correct_rank_assignment_homogeneous_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.correct_rank_assignment_homogeneous
)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_correct_rank_assignment_homogeneous_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.correct_rank_assignment_homogeneous
)
def assert_rank_consistency(
self,
run_results: Dict[str, List[RunResult]],
expected_role_world_sizes: Dict[str, int],
):
"""
Asserts that ranks are consecutive w.r.t role_rank. If local world sizes are 4:
role_rank_0 -> ranks: 0,1,2,3
role_rank_1 -> ranks: 4,5,6,7
... etc ...
"""
global_ranks: List[int] = []
# role -> [role_rank,...]
role_ranks: Dict[str, List[int]] = {}
# group rank -> [(rank, role_rank),...]
grouped_ranks: Dict[int, List[Tuple[int, int]]] = {}
# global world size == sum of all the role world sizes
expected_world_size = sum(expected_role_world_sizes.values())
for role, run_results in run_results.items():
for result in run_results:
res = result.return_values
for role_info in res.values():
rank = role_info.rank
role_rank = role_info.role_rank
group_rank = role_info.group_rank
role_world_size = role_info.role_world_size
world_size = role_info.world_size
self.assertEqual(expected_world_size, world_size)
self.assertEqual(expected_role_world_sizes[role], role_world_size)
grouped_ranks.setdefault(group_rank, []).append((rank, role_rank))
role_ranks.setdefault(role, []).append(role_rank)
global_ranks.append(rank)
global_ranks = sorted(global_ranks)
self.assertEqual(list(range(expected_world_size)), global_ranks)
for role, expected_role_world_size in expected_role_world_sizes.items():
self.assertEqual(
list(range(expected_role_world_size)), sorted(role_ranks[role])
)
# Make sure that each agent assigns consecutive ranks to workers
# The first argument is the global_rank and the second argument
# is role_rank
for ranks_lst in grouped_ranks.values():
self.assert_ranks_sequential(ranks_lst, 0)
self.assert_ranks_sequential(ranks_lst, 1)
def assert_ranks_sequential(self, ranks_pairs, rank_idx):
ranks = sorted(rank_pair[rank_idx] for rank_pair in ranks_pairs)
start_rank, end_rank = ranks[0], ranks[-1]
self.assertEqual(list(range(start_rank, end_rank + 1)), ranks)
def double_agent_fault_tolerance(self):
"""
start ``nnodes`` agents, kill and restart odd ones, validate fault-tolerance works
"""
nnodes = 2
wait = 2
node_conf = Conf(entrypoint=_dist_sum, args=(wait,), local_world_size=2)
agent_results = mp.Queue()
agent_args = {
"conf": node_conf,
"agent_results": agent_results,
"min_nodes": nnodes,
"max_nodes": nnodes,
"max_restarts": 2,
}
procs = []
for _ in range(nnodes):
p = mp.Process(
target=self.run_agent,
kwargs=agent_args,
)
procs.append(p)
p.start()
# restart odd agents
for i in range(nnodes):
if i % 2 != 0:
procs[i].kill()
p = mp.Process(
target=self.run_agent,
kwargs=agent_args,
)
procs[i] = p
p.start()
for i in range(nnodes):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_double_agent_fault_tolerance_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.double_agent_fault_tolerance
)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_double_agent_fault_tolerance_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.double_agent_fault_tolerance
)
def double_agent_elastic(self):
"""
start ``nnodes`` agents, kill odd ones (do not restart), validate
elasticity (scale-down) works. (scale-up covered in fault_tolerance test)
"""
min_nodes = 1
max_nodes = 2
wait = 2
node_conf = Conf(entrypoint=_dist_sum, args=(wait,), local_world_size=2)
agent_results = mp.Queue()
agent_args = {
"conf": node_conf,
"agent_results": agent_results,
"min_nodes": min_nodes,
"max_nodes": max_nodes,
"max_restarts": 2,
}
procs = []
for _ in range(max_nodes):
p = mp.Process(
target=self.run_agent,
kwargs=agent_args,
)
procs.append(p)
p.start()
# kill odd agents
for i in range(max_nodes):
if i % 2 != 0:
procs[i].kill()
for i in range(max_nodes):
p = procs[i]
p.join()
if i % 2 == 0:
self.assertEqual(0, p.exitcode)
else:
self.assertEqual(-signal.SIGKILL, p.exitcode)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_double_agent_elastic_c10d(self):
self.run_test_with_backend(
backend="c10d", test_to_run=self.double_agent_elastic
)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_double_agent_elastic_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.double_agent_elastic
)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_double_agent_elastic_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.double_agent_elastic
)
def torch_rpc(self):
"""
Simple torch rpc example with torchelastic.
Creates two agents (to simulate two node job),
each agent runs a single worker. worker0 calls an rpc_sync on
worker1.
"""
msg = "hello world"
node_configs = [
Conf(
role="master",
entrypoint=rpc_master,
args=(msg,),
local_world_size=1,
tee=Std.ALL,
),
Conf(
role="worker",
entrypoint=rpc_worker,
args=(),
local_world_size=1,
tee=Std.ALL,
),
]
results = self.run_job(node_configs)
master_retvals = results["master"][0].return_values
# there is only one master but the global rank is not stable
# so compare the master return value as a collection
self.assertEqual([f"{msg} from worker"], list(master_retvals.values()))
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_torch_rpc_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.torch_rpc)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_torch_rpc_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.torch_rpc)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_torch_rpc_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.torch_rpc)
def workers_drift_success(self):
"""
two agents (one worker each) finishes within ``sec`` seconds of each other,
exit barrier timeout set to ``sec * 2 * 2``.
"""
sec = 1
node_configs = [
Conf(role="zzz", entrypoint=_sleep, args=(0 * sec,), local_world_size=1),
Conf(role="zzz", entrypoint=_sleep, args=(2 * sec,), local_world_size=1),
]
results = self.run_job(node_configs, exit_barrier_timeout=2 * 2 * sec)
for i in range(2):
run_results = results["zzz"][i]
self.assertFalse(run_results.is_failed())
for rank, output in run_results.return_values.items():
# _sleep() returns its own rank
self.assertEqual(rank, output)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_workers_drift_success_etcd(self):
self.run_test_with_backend(
backend="etcd", test_to_run=self.workers_drift_success
)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_workers_drift_success_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.workers_drift_success
)
def workers_drift_fail(self):
"""
two agents (one worker each) finishes within ``4 x sec`` seconds of each other,
exit barrier timeout set to 0. Exit barriers should NOT fail the job.
"""
sec = 1
node_configs = [
Conf(role="zzz", entrypoint=_sleep, args=(0 * sec,), local_world_size=1),
Conf(role="zzz", entrypoint=_sleep, args=(4 * sec,), local_world_size=1),
]
results = self.run_job(node_configs, exit_barrier_timeout=0)
for i in range(2):
run_results = results["zzz"][i]
self.assertFalse(run_results.is_failed())
for rank, output in run_results.return_values.items():
# _sleep() returns its own rank
self.assertEqual(rank, output)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_workers_drift_fail_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.workers_drift_fail)
@unittest.skipIf(
TEST_WITH_DEV_DBG_ASAN or TEST_WITH_TSAN,
"test incompatible with dev/dbg asan or tsan",
)
def test_workers_drift_fail_etcd_v2(self):
self.run_test_with_backend(
backend="etcd-v2", test_to_run=self.workers_drift_fail
)
@patch("torch.distributed.elastic.utils.store.barrier")
def barrier_failed(self, barrier_mock):
"""
Failure during the barrier should NOT fail the job.
"""
barrier_mock.side_effect = RuntimeError("test error")
res = self.run_agent(Conf(entrypoint=_happy_function, local_world_size=1))
self.assertFalse(res.is_failed())
barrier_mock.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_barrier_failed_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.barrier_failed)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_barrier_failed_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.barrier_failed)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_barrier_failed_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.barrier_failed)
@patch("torch.distributed.elastic.agent.server.local_elastic_agent.start_processes")
def shutdown_called(self, start_processes_mock):
pcontext_mock = Mock()
pcontext_mock.pids.return_value = {0: 0}
start_processes_mock.return_value = pcontext_mock
node_conf = Conf(entrypoint=_happy_function, local_world_size=1)
spec = self.get_worker_spec(node_conf, max_restarts=0)
agent = self.get_agent(spec)
with patch.object(agent, "_monitor_workers") as monitor_mock:
monitor_mock.return_value = RunResult(
state=WorkerState.SUCCEEDED, return_values={0: 0}
)
agent.run("worker")
pcontext_mock.close.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_shutdown_called_c10d(self):
self.run_test_with_backend(backend="c10d", test_to_run=self.shutdown_called)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_shutdown_called_etcd(self):
self.run_test_with_backend(backend="etcd", test_to_run=self.shutdown_called)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_shutdown_called_etcd_v2(self):
self.run_test_with_backend(backend="etcd-v2", test_to_run=self.shutdown_called)
|
pytorch-master
|
test/distributed/elastic/agent/server/test/local_elastic_agent_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import signal
import unittest
import uuid
from typing import Any, Dict
from unittest.mock import call, patch, MagicMock
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
from torch.distributed.elastic.agent.server.api import (
RunResult,
SimpleElasticAgent,
WorkerGroup,
WorkerSpec,
WorkerState,
_get_fq_hostname,
_RoleInstanceInfo,
)
from torch.distributed.elastic.multiprocessing import SignalException
from torch.distributed.elastic.multiprocessing.errors import ProcessFailure
from torch.distributed.elastic.rendezvous import RendezvousHandler, RendezvousParameters
from torch.distributed.elastic.utils.distributed import get_free_port
from torch.testing._internal.common_utils import run_tests
def do_nothing():
pass
class WorkerStateTest(unittest.TestCase):
def test_is_running(self):
for state in WorkerState:
if state == WorkerState.HEALTHY or state == WorkerState.UNHEALTHY:
self.assertTrue(WorkerState.is_running(state))
else:
self.assertFalse(WorkerState.is_running(state))
class WorkerGroupTest(unittest.TestCase):
def test_worker_group_constructor(self):
spec = WorkerSpec(
role="test_trainer",
local_world_size=4,
fn=do_nothing,
args=(),
rdzv_handler=None,
max_restarts=50,
monitor_interval=1,
)
worker_group = WorkerGroup(spec)
self.assertEqual(WorkerState.INIT, worker_group.state)
workers = worker_group.workers
self.assertEqual(4, len(workers))
# validate full, consecutive local ranks
self.assertSetEqual(set(range(4)), {w.local_rank for w in workers})
# global_rank, world_size are assigned after rdzv
# id is assigned after starting worker (by the agent)
# validate there are None
for w in workers:
self.assertEqual(-1, w.global_rank)
self.assertEqual(-1, w.world_size)
self.assertEqual(None, w.id)
# rank and store are assigned after rdzv; validate that they are None
self.assertIsNone(worker_group.group_rank)
self.assertIsNone(worker_group.store)
class RoleInstanceInfoTest(unittest.TestCase):
def test_compare(self):
agent_role1 = _RoleInstanceInfo("role", 1, 10)
agent_role2 = _RoleInstanceInfo("role", 2, 10)
self.assertEqual(1, _RoleInstanceInfo.compare(agent_role2, agent_role1))
agent_role1 = _RoleInstanceInfo("role1", 1, 10)
agent_role2 = _RoleInstanceInfo("role2", 2, 10)
self.assertEqual(-1, _RoleInstanceInfo.compare(agent_role1, agent_role2))
agent_role1 = _RoleInstanceInfo("role1", 1, 10)
agent_role2 = _RoleInstanceInfo("role2", 1, 10)
self.assertEqual(-1, _RoleInstanceInfo.compare(agent_role1, agent_role2))
def test_serde(self):
agent_role = _RoleInstanceInfo("role", 1, 10)
str_data = agent_role.serialize()
actual_agent_role = _RoleInstanceInfo.deserialize(str_data)
self.assertEqual(agent_role.role, actual_agent_role.role)
self.assertEqual(agent_role.rank, actual_agent_role.rank)
self.assertEqual(
agent_role.local_world_size, actual_agent_role.local_world_size
)
def test_find_boundaries(self):
role_infos = [
_RoleInstanceInfo("trainer", 1, 1),
_RoleInstanceInfo("trainer", 2, 2),
_RoleInstanceInfo("trainer", 3, 3),
_RoleInstanceInfo("parameter_server", 4, 5),
_RoleInstanceInfo("parameter_server", 0, 4),
]
start_idx, end_idx = _RoleInstanceInfo.find_role_boundaries(
role_infos, "trainer"
)
self.assertEqual(start_idx, 0)
self.assertEqual(end_idx, 2)
class TestAgent(SimpleElasticAgent):
def __init__(self, spec):
super().__init__(spec)
self.stop_workers_call_count = 0
self.start_workers_call_count = 0
def _stop_workers(self, worker_group: WorkerGroup) -> None:
# workers are fake, nothing to stop; just clear the rdzv info
worker_group.group_rank = None
worker_group.group_world_size = None
self.stop_workers_call_count += 1
def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
# crate fake workers; make worker id equal to global rank
ids = {}
for worker in worker_group.workers:
ids[worker.local_rank] = worker.global_rank
self.start_workers_call_count += 1
return ids
def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
raise NotImplementedError("mock this method")
def _shutdown(self):
pass
def monres(state: WorkerState):
if state == WorkerState.SUCCEEDED:
return RunResult(state=state, return_values={0: 0}, failures={})
elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED}:
pf = ProcessFailure(local_rank=0, pid=999, exitcode=1, error_file="<none>")
return RunResult(state=state, return_values={}, failures={0: pf})
else:
return RunResult(state=state)
class SimpleElasticAgentTest(unittest.TestCase):
def _get_worker_spec(
self,
max_restarts=1,
monitor_interval=1.0,
role="test_trainer",
local_world_size=8,
):
run_id = str(uuid.uuid4().int)
port = get_free_port()
endpoint = f"127.0.0.1:{port}"
rdzv_params = RendezvousParameters(
backend="static",
endpoint=endpoint,
run_id=run_id,
min_nodes=1,
max_nodes=1,
rank=0,
)
rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_params)
spec = WorkerSpec(
role=role,
local_world_size=local_world_size,
fn=do_nothing,
args=(),
rdzv_handler=rdzv_handler,
max_restarts=max_restarts,
monitor_interval=monitor_interval,
)
return spec
def test_agent_constructor(self):
spec = self._get_worker_spec(max_restarts=1)
agent = TestAgent(spec)
worker_group = agent.get_worker_group()
self.assertEquals(WorkerState.INIT, worker_group.state)
self.assertEquals(spec.max_restarts, agent._remaining_restarts)
@patch("torch.distributed.elastic.agent.server.api.put_metric")
def test_record_flakiness_metric(self, put_metric_mock):
spec = self._get_worker_spec(max_restarts=1)
agent = TestAgent(spec)
agent._record_flakiness_metric()
put_metric_mock.assert_called_with("workers.test_trainer.flakiness", 0)
agent._worker_group.spec.max_restarts = 10
agent._remaining_restarts = 3
agent._record_flakiness_metric()
put_metric_mock.assert_called_with("workers.test_trainer.flakiness", 63)
@patch("torch.distributed.elastic.agent.server.api.put_metric")
def test_record_flakiness_metric_zero_restarts(self, put_metric_mock):
spec = self._get_worker_spec(max_restarts=1)
spec.max_restarts = 0
agent = TestAgent(spec)
agent._record_flakiness_metric()
put_metric_mock.assert_called_with("workers.test_trainer.flakiness", 0)
@patch("torch.distributed.elastic.agent.server.api.put_metric")
def test_record_flakiness_metric_user_exception(self, put_metric_mock):
spec = self._get_worker_spec(max_restarts=1)
agent = TestAgent(spec)
agent._record_flakiness_metric(True)
put_metric_mock.assert_called_with("workers.test_trainer.flakiness", 100)
@patch.object(TestAgent, "_invoke_run")
@patch.object(TestAgent, "_record_metrics")
@patch.object(TestAgent, "_record_worker_events")
@patch.object(TestAgent, "_shutdown")
def test_invoke_run(
self, shutdown_mock, record_events_mock, record_metrics_mock, invoke_run_mock
):
spec = self._get_worker_spec(max_restarts=1)
agent = TestAgent(spec)
agent.run()
invoke_run_mock.assert_called_once()
record_metrics_mock.assert_called_once()
record_events_mock.assert_called_once()
shutdown_mock.assert_called_once()
@patch("torch.distributed.elastic.agent.server.api.put_metric")
def test_record_metrics_success_no_retries(self, put_metric_mock):
spec = self._get_worker_spec(max_restarts=1)
agent = TestAgent(spec)
group_result = RunResult({}, {})
agent._record_metrics(group_result)
calls = self._get_record_metrics_test_calls(success_no_retries=1)
put_metric_mock.assert_has_calls(calls, any_order=True)
@patch("torch.distributed.elastic.agent.server.api.put_metric")
def test_record_metrics_success_with_retries(self, put_metric_mock):
spec = self._get_worker_spec(max_restarts=10)
agent = TestAgent(spec)
agent._remaining_restarts = 2
group_result = RunResult({}, {})
agent._record_metrics(group_result)
calls = self._get_record_metrics_test_calls(success_with_retries=1)
put_metric_mock.assert_has_calls(calls, any_order=True)
@patch("torch.distributed.elastic.agent.server.api.put_metric")
def test_record_metrics_failed_with_retries(self, put_metric_mock):
spec = self._get_worker_spec(max_restarts=10)
agent = TestAgent(spec)
agent._remaining_restarts = 2
group_result = RunResult(
state=WorkerState.FAILED, return_values={}, failures={0: 0}
)
agent._record_metrics(group_result)
calls = self._get_record_metrics_test_calls(failed_with_retries=1)
put_metric_mock.assert_has_calls(calls, any_order=True)
@patch("torch.distributed.elastic.agent.server.api.put_metric")
def test_record_metrics_failed_no_retries(self, put_metric_mock):
spec = self._get_worker_spec(max_restarts=10)
agent = TestAgent(spec)
group_result = RunResult(
state=WorkerState.FAILED, return_values={}, failures={0: 0}
)
agent._record_metrics(group_result)
calls = self._get_record_metrics_test_calls(failed_no_retries=1)
put_metric_mock.assert_has_calls(calls, any_order=True)
def _get_record_metrics_test_calls(
self,
success_with_retries=0,
success_no_retries=0,
failed_with_retries=0,
failed_no_retries=0,
):
calls = [
call("workers.test_trainer.run_success_with_retries", success_with_retries),
call("workers.test_trainer.run_success_no_retries", success_no_retries),
call("workers.test_trainer.run_failed_with_retries", failed_with_retries),
call("workers.test_trainer.run_failed_no_retries", failed_no_retries),
]
return calls
def test_rendezvous(self):
spec = self._get_worker_spec(max_restarts=1)
agent = TestAgent(spec)
worker_group = agent.get_worker_group()
agent._rendezvous(worker_group)
# single agent rdzv
self.assertEqual(1, worker_group.group_world_size)
self.assertEqual(0, worker_group.group_rank)
master_addr, master_port = agent._get_master_addr_port(worker_group.store)
self.assertEqual(_get_fq_hostname(), master_addr)
self.assertTrue(master_port > 0)
rank_set = {w.global_rank for w in worker_group.workers}
for w in worker_group.workers:
self.assertIsNone(w.id)
local_world_size = spec.local_world_size
group_world_size = worker_group.group_world_size
group_rank = worker_group.group_rank
self.assertEqual(local_world_size * group_world_size, w.world_size)
self.assertEqual(
local_world_size * group_rank + w.local_rank, w.global_rank
)
self.assertSetEqual(set(range(w.world_size)), rank_set)
def test_initialize_workers(self):
spec = self._get_worker_spec(max_restarts=1)
agent = TestAgent(spec)
worker_group = agent.get_worker_group()
agent._initialize_workers(worker_group)
self.assertEqual(WorkerState.HEALTHY, worker_group.state)
for i in range(spec.local_world_size):
worker = worker_group.workers[i]
self.assertEqual(worker.id, worker.global_rank)
def test_restart_workers(self):
spec = self._get_worker_spec()
agent = TestAgent(spec)
worker_group = agent.get_worker_group()
num_restarts = 3
for _ in range(0, num_restarts):
agent._restart_workers(worker_group)
self.assertEqual(WorkerState.HEALTHY, worker_group.state)
# test_rendezvous and test_initialize_workers
# already validates the correctness of these fields
# simply validate that they are not None
# (e.g. that they get assigned)
self.assertIsNotNone(worker_group.group_rank)
self.assertIsNotNone(worker_group.group_world_size)
for w in worker_group.workers:
self.assertIsNotNone(w.id)
self.assertIsNotNone(w.global_rank)
self.assertIsNotNone(w.world_size)
self.assertEqual(num_restarts, agent.start_workers_call_count)
self.assertEqual(num_restarts, agent.stop_workers_call_count)
@patch.object(
TestAgent,
"_monitor_workers",
side_effect=[
monres(WorkerState.HEALTHY),
monres(WorkerState.HEALTHY),
monres(WorkerState.SUCCEEDED),
],
)
@patch.object(TestAgent, "_record_worker_events")
def test_run_happy_path(self, record_events_mock, mock_monitor_workers):
# worker starts
# is always healthy
# then succeeds
max_restarts = 10
spec = self._get_worker_spec(max_restarts)
agent = TestAgent(spec)
agent.run()
# no failure, no membership changes -> no retries
self.assertEquals(max_restarts, agent._remaining_restarts)
record_events_mock.assert_called_once()
@patch.object(TestAgent, "_initialize_workers", side_effect=RuntimeError())
def test_run_initialization_failure(self, mock_initialize_workers):
spec = self._get_worker_spec()
agent = TestAgent(spec)
worker_group = agent._worker_group
with self.assertRaises(RuntimeError):
agent.run()
self.assertEqual(WorkerState.INIT, worker_group.state)
def test_run_max_retries_exceeded(self):
for restartable_state in [
monres(WorkerState.FAILED),
monres(WorkerState.UNHEALTHY),
]:
with patch.object(
TestAgent, "_monitor_workers", return_value=restartable_state
) as mock_monitor_workers:
spec = self._get_worker_spec(max_restarts=3, monitor_interval=0.1)
agent = TestAgent(spec)
worker_group = agent._worker_group
agent.run()
self.assertEqual(WorkerState.FAILED, worker_group.state)
self.assertEqual(0, agent._remaining_restarts)
# one monitor call for each retry + one to monitor the last retry
self.assertEqual(spec.max_restarts + 1, mock_monitor_workers.call_count)
@patch.object(
TestAgent,
"_monitor_workers",
side_effect=[
monres(WorkerState.HEALTHY),
monres(WorkerState.HEALTHY),
monres(WorkerState.HEALTHY),
monres(WorkerState.SUCCEEDED),
],
)
@patch.object(RendezvousHandler, "num_nodes_waiting", side_effect=[1, 1, 0])
@patch.object(TestAgent, "_record_worker_events")
def test_run_membership_change(
self, record_events_mock, mock_num_nodes_waiting, mock_monitor_workers
):
spec = self._get_worker_spec(max_restarts=1, monitor_interval=0.1)
agent = TestAgent(spec)
worker_group = agent._worker_group
agent.run()
self.assertEquals(WorkerState.SUCCEEDED, worker_group.state)
record_events_mock.assert_called_once()
@patch.object(
TestAgent, "_monitor_workers", return_value=monres(WorkerState.UNKNOWN)
)
def test_run_unknown_state(self, mock_monitor_workers):
# when the state is unknown we exit immediately; no retries
spec = self._get_worker_spec(max_restarts=100, monitor_interval=0.1)
agent = TestAgent(spec)
worker_group = agent._worker_group
with self.assertRaises(Exception):
agent.run()
self.assertEqual(WorkerState.UNKNOWN, worker_group.state)
self.assertEqual(1, mock_monitor_workers.call_count)
self.assertEqual(spec.max_restarts, agent._remaining_restarts)
def test_get_ranks(self):
role_infos = [
_RoleInstanceInfo("parameter_server", 0, 4),
_RoleInstanceInfo("trainer", 1, 1),
_RoleInstanceInfo("trainer", 2, 2),
_RoleInstanceInfo("trainer", 3, 3),
_RoleInstanceInfo("parameter_server", 4, 5),
]
spec = self._get_worker_spec(
max_restarts=3, monitor_interval=0.1, role="not_used", local_world_size=8
)
agent = TestAgent(spec)
total_sum, ranks = agent._get_ranks(role_infos, 0, 0, len(role_infos))
self.assertEquals(15, total_sum)
self.assertEquals([0, 1, 2, 3], list(ranks))
def test_assign_worker_ranks(self):
role_infos = [
_RoleInstanceInfo("parameter_server", 0, 4),
_RoleInstanceInfo("trainer", 1, 1),
_RoleInstanceInfo("trainer", 2, 2),
_RoleInstanceInfo("trainer", 3, 3),
_RoleInstanceInfo("parameter_server", 4, 5),
]
num_agents = len(role_infos)
with patch.object(TestAgent, "_share_and_gather", return_value=role_infos):
self.verify_worker_ranks(
role_infos[0], num_agents, [0, 1, 2, 3], [0, 1, 2, 3]
)
self.verify_worker_ranks(role_infos[1], num_agents, [4], [0])
self.verify_worker_ranks(role_infos[2], num_agents, [5, 6], [1, 2])
self.verify_worker_ranks(role_infos[3], num_agents, [7, 8, 9], [3, 4, 5])
def verify_worker_ranks(
self, agent_config, total_agents, expected_global_ranks, expected_role_ranks
):
role, agent_rank, local_world_size = (
agent_config.role,
agent_config.rank,
agent_config.local_world_size,
)
spec = self._get_worker_spec(
max_restarts=3,
monitor_interval=0.1,
role=role,
local_world_size=local_world_size,
)
agent = TestAgent(spec)
workers = agent._assign_worker_ranks(None, agent_rank, total_agents, spec)
self.assertEqual(
expected_global_ranks, [worker.global_rank for worker in workers]
)
self.assertEqual(expected_role_ranks, [worker.role_rank for worker in workers])
@patch("torch.distributed.elastic.utils.store.synchronize")
def test_share_and_gather(self, sync_mock):
# when the state is unknown we exit immediately; no retries
spec = self._get_worker_spec(max_restarts=100, monitor_interval=0.1)
agent = TestAgent(spec)
expected_agent_infos = [
_RoleInstanceInfo("trainer", 0, 10),
_RoleInstanceInfo("trainer", 1, 10),
_RoleInstanceInfo("validator", 2, 10),
]
sync_mock.return_value = [obj.serialize() for obj in expected_agent_infos]
result = agent._share_and_gather(MagicMock(), 1, 3, spec)
sync_mock.assert_called_once()
for expected_role_info, actual_role_info in zip(expected_agent_infos, result):
self.assertEqual(expected_role_info.role, actual_role_info.role)
self.assertEqual(expected_role_info.rank, actual_role_info.rank)
self.assertEqual(
expected_role_info.local_world_size, actual_role_info.local_world_size
)
def test_get_event(self):
spec = self._get_worker_spec(max_restarts=1)
agent = TestAgent(spec)
event = agent.get_event_succeeded()
self.assertEqual("AGENT", event.source)
self.assertEqual("static", event.metadata["rdzv_backend"])
self.assertEqual("SUCCEEDED", event.metadata["state"])
self.assertEqual(spec.role, event.metadata["role"])
def test_get_worker_status_event(self):
spec = self._get_worker_spec(max_restarts=4)
agent = TestAgent(spec)
agent._remaining_restarts = spec.max_restarts - 2
actual_event = agent._construct_event(
state="SUCCEEDED",
source="WORKER",
worker=agent._worker_group.workers[0],
)
self.assertEqual("WORKER", actual_event.source)
self.assertEqual("static", actual_event.metadata["rdzv_backend"])
self.assertEqual("SUCCEEDED", actual_event.metadata["state"])
self.assertEqual(spec.role, actual_event.metadata["role"])
self.assertEqual(2, actual_event.metadata["agent_restarts"])
@patch("torch.distributed.elastic.agent.server.api.put_metric")
@patch.object(TestAgent, "_invoke_run")
def test_agent_process_signal_exception(self, invoke_run, _):
spec = self._get_worker_spec(max_restarts=0)
agent = TestAgent(spec)
invoke_run.side_effect = SignalException(
"signal exception", sigval=signal.SIGTERM
)
with patch.object(agent, "_shutdown") as shutdown_mock:
with self.assertRaises(SignalException):
agent.run()
args, _ = shutdown_mock.call_args
self.assertEqual(signal.SIGTERM, args[0])
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/elastic/agent/server/test/api_test.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.abs
import json
import logging
import unittest
from dataclasses import asdict
from unittest.mock import patch
from torch.distributed.elastic.events import (
Event,
EventSource,
NodeState,
RdzvEvent,
_get_or_create_logger,
construct_and_record_rdzv_event,
)
from torch.testing._internal.common_utils import run_tests
class EventLibTest(unittest.TestCase):
def assert_event(self, actual_event, expected_event):
self.assertEqual(actual_event.name, expected_event.name)
self.assertEqual(actual_event.source, expected_event.source)
self.assertEqual(actual_event.timestamp, expected_event.timestamp)
self.assertDictEqual(actual_event.metadata, expected_event.metadata)
@patch("torch.distributed.elastic.events.get_logging_handler")
def test_get_or_create_logger(self, logging_handler_mock):
logging_handler_mock.return_value = logging.NullHandler()
logger = _get_or_create_logger("test_destination")
self.assertIsNotNone(logger)
self.assertEqual(1, len(logger.handlers))
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def test_event_created(self):
event = Event(
name="test_event",
source=EventSource.AGENT,
metadata={"key1": "value1", "key2": 2},
)
self.assertEqual("test_event", event.name)
self.assertEqual(EventSource.AGENT, event.source)
self.assertDictEqual({"key1": "value1", "key2": 2}, event.metadata)
def test_event_deser(self):
event = Event(
name="test_event",
source=EventSource.AGENT,
metadata={"key1": "value1", "key2": 2, "key3": 1.0},
)
json_event = event.serialize()
deser_event = Event.deserialize(json_event)
self.assert_event(event, deser_event)
class RdzvEventLibTest(unittest.TestCase):
@patch("torch.distributed.elastic.events.record_rdzv_event")
@patch("torch.distributed.elastic.events.get_logging_handler")
def test_construct_and_record_rdzv_event(self, get_mock, record_mock):
get_mock.return_value = logging.StreamHandler()
construct_and_record_rdzv_event(
run_id="test_run_id",
message="test_message",
node_state=NodeState.RUNNING,
)
record_mock.assert_called_once()
@patch("torch.distributed.elastic.events.record_rdzv_event")
@patch("torch.distributed.elastic.events.get_logging_handler")
def test_construct_and_record_rdzv_event_does_not_run_if_invalid_dest(self, get_mock, record_mock):
get_mock.return_value = logging.NullHandler()
construct_and_record_rdzv_event(
run_id="test_run_id",
message="test_message",
node_state=NodeState.RUNNING,
)
record_mock.assert_not_called()
def assert_rdzv_event(self, actual_event: RdzvEvent, expected_event: RdzvEvent):
self.assertEqual(actual_event.name, expected_event.name)
self.assertEqual(actual_event.run_id, expected_event.run_id)
self.assertEqual(actual_event.message, expected_event.message)
self.assertEqual(actual_event.hostname, expected_event.hostname)
self.assertEqual(actual_event.pid, expected_event.pid)
self.assertEqual(actual_event.node_state, expected_event.node_state)
self.assertEqual(actual_event.master_endpoint, expected_event.master_endpoint)
self.assertEqual(actual_event.rank, expected_event.rank)
self.assertEqual(actual_event.local_id, expected_event.local_id)
self.assertEqual(actual_event.error_trace, expected_event.error_trace)
def get_test_rdzv_event(self) -> RdzvEvent:
return RdzvEvent(
name="test_name",
run_id="test_run_id",
message="test_message",
hostname="test_hostname",
pid=1,
node_state=NodeState.RUNNING,
master_endpoint="test_master_endpoint",
rank=3,
local_id=4,
error_trace="test_error_trace",
)
def test_rdzv_event_created(self):
event = self.get_test_rdzv_event()
self.assertEqual(event.name, "test_name")
self.assertEqual(event.run_id, "test_run_id")
self.assertEqual(event.message, "test_message")
self.assertEqual(event.hostname, "test_hostname")
self.assertEqual(event.pid, 1)
self.assertEqual(event.node_state, NodeState.RUNNING)
self.assertEqual(event.master_endpoint, "test_master_endpoint")
self.assertEqual(event.rank, 3)
self.assertEqual(event.local_id, 4)
self.assertEqual(event.error_trace, "test_error_trace")
def test_rdzv_event_deserialize(self):
event = self.get_test_rdzv_event()
json_event = event.serialize()
deserialized_event = RdzvEvent.deserialize(json_event)
self.assert_rdzv_event(event, deserialized_event)
self.assert_rdzv_event(event, RdzvEvent.deserialize(event))
def test_rdzv_event_str(self):
event = self.get_test_rdzv_event()
self.assertEqual(str(event), json.dumps(asdict(event)))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/elastic/events/lib_test.py
|
# Owner(s): ["oncall: distributed"]
import contextlib
import os
import sys
from typing import Any, Optional
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.distributed.algorithms.join import Join, Joinable, JoinHook
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
require_n_gpus_for_nccl_backend,
)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
if TEST_WITH_DEV_DBG_ASAN:
print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
WORLD_SIZE = min(4, max(2, torch.cuda.device_count()))
# Constants used for testing post-hooks
BEFORE_CONSTANT = 41
AFTER_CONSTANT = 42
class AllReducerJoinHook(JoinHook):
r"""
Join hook for :class:`AllReducer`.
Arguments:
allreducer (AllReducer): the :class:`AllReducer` object using this
hook.
num_allreduces (int): the number of all-reduces to shadow per
iteration.
run_post_hook (bool): a flag enabling the post-hook logic.
"""
def __init__(
self,
allreducer,
num_allreduces,
run_post_hook
):
self.allreducer = allreducer
self.num_allreduces = num_allreduces
self.run_post_hook = run_post_hook
def main_hook(self):
r"""
Shadows each all-reduce; the number of all-reduces is passed into the
constructor as ``num_allreduces``.
"""
device = self.allreducer.device
for _ in range(self.num_allreduces):
t = torch.zeros(1, device=device)
dist.all_reduce(t)
def post_hook(self, is_last_joiner: bool):
r"""
Broadcasts a tensor containing a magic constant ``AFTER_CONSTANT`` from
the last joiner to all other processes.
"""
if not self.run_post_hook:
return
rank = dist.get_rank(self.allreducer.process_group)
common_rank = self.allreducer.find_common_rank(rank, is_last_joiner)
device = self.allreducer.device
if rank == common_rank:
self.allreducer.post_hook_tensor = torch.tensor([AFTER_CONSTANT], device=device)
dist.broadcast(self.allreducer.post_hook_tensor, src=common_rank)
class AllReducer(Joinable):
r"""
Example :class:`Joinable` that performs some number of all-reduces as its
per-iteration collective communication.
"""
def __init__(self, device, process_group):
super(AllReducer, self).__init__()
self.device = device
self.process_group = process_group
self.post_hook_tensor = torch.tensor([BEFORE_CONSTANT], device=self.device)
def __call__(self, num_allreduces=1):
r"""
All-reduces a dim-1 one tensor ``num_allreduces``-many times, and
returns the total result.
"""
Join.notify_join_context(self)
device = self.device
total = 0
for _ in range(num_allreduces):
t = torch.ones(1, device=device)
dist.all_reduce(t)
total += t.item()
return total
def join_hook(self, **kwargs) -> JoinHook:
r"""
Returns a join hook that shadows some number of all-reduces; by default,
this number is 1.
"""
num_allreduces = kwargs.get("num_allreduces", 1)
run_post_hook = kwargs.get("run_post_hooks", False)
return AllReducerJoinHook(
self,
num_allreduces,
run_post_hook
)
@property
def join_device(self) -> torch.device:
return self.device
@property
def join_process_group(self) -> Any:
return self.process_group
def find_common_rank(self, rank, to_consider):
r"""
Returns the max rank of the ones to consider over the process group.
"""
common_rank = torch.tensor(
[rank if to_consider else -1],
device=self.device
)
dist.all_reduce(common_rank, op=dist.ReduceOp.MAX, group=self.process_group)
common_rank = common_rank.item()
assert common_rank >= 0
return common_rank
class TestJoin(MultiProcessTestCase):
r"""Test cases for the generic join context."""
def setUp(self):
super(TestJoin, self).setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["BACKEND"] = BACKEND
self._spawn_processes()
@property
def device(self):
return torch.device(self.rank) if BACKEND == dist.Backend.NCCL \
else torch.device("cpu")
@property
def world_size(self):
return WORLD_SIZE
@property
def process_group(self):
return dist.group.WORLD
def tearDown(self):
try:
dist.destroy_process_group()
except AssertionError:
pass
try:
os.remove(self.file_name)
except OSError:
pass
def dist_init(self, rank, world_size, backend=BACKEND):
store = dist.FileStore(self.file_name, world_size)
return dist.init_process_group(
backend=backend,
store=store,
rank=rank,
world_size=world_size
)
def construct_uneven_inputs(self, base, offset, device=None):
r"""
Returns uneven inputs: rank i gets ``base`` + i * ``offset`` inputs.
"""
if device is None:
device = self.device
return [torch.zeros(1, device=device) for _ in range(base + self.rank * offset)]
def construct_even_inputs(self, base, device=None):
r"""Returns even inputs: each rank gets ``base`` inputs."""
if device is None:
device = self.device
return [torch.zeros(1, device=device) for _ in range(base)]
@property
def base_num_inputs(self):
r"""Base number of inputs to be used by all ranks."""
return 3
@property
def offset(self):
r"""Rank i gets i * ``offset`` additional inputs."""
return 1
def _test_join_base(
self,
uneven_inputs: bool,
num_joinables: int,
enable: bool,
throw_on_early_termination: bool,
num_allreduces: int,
run_post_hooks: bool,
expected_total: Optional[int] = None,
):
r"""
Skeleton for all :class:`Join` tests.
Arguments:
uneven_inputs (bool): ``True`` to use uneven inputs; ``False``
otherwise.
num_joinables (int): number of :class:`AllReducer` s to construct.
enable (bool): ``True`` to enable the join context manager;
``False`` otherwise.
throw_on_early_termination (bool): ``True`` to raise an exception
upon detecting uneven inputs; ``False`` otherwise.
num_allreduces (int): number of all-reduces to perform per input.
run_post_hooks (bool): ``True`` to run post-hooks; ``False``
otherwise.
expected_total (Optional[int]): ``None`` to not check the expected
all-reduce total; otherwise, the expected total; default is
``None``.
"""
self.dist_init(self.rank, self.world_size)
allreducers = [
AllReducer(self.device, self.process_group)
for _ in range(num_joinables)
]
for allreducer in allreducers:
self.assertEqual(allreducer.post_hook_tensor.item(), BEFORE_CONSTANT)
inputs = self.construct_uneven_inputs(self.base_num_inputs, self.offset) \
if uneven_inputs \
else self.construct_even_inputs(self.base_num_inputs)
allreduce_total = 0
# Expect a `RuntimeError` if `throw_on_early_termination=True`
# Rank 0 exhausts its inputs first
expected_msg = "Rank 0 exhausted all inputs." if self.rank == 0 \
else "Detected at least one rank that exhausted inputs. " \
"Throwing across all ranks."
with self.assertRaisesRegex(
RuntimeError,
expected_msg
) if throw_on_early_termination else contextlib.suppress():
with Join(
allreducers,
enable=enable,
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks
):
for _ in inputs:
for allreducer in allreducers:
allreduce_total += allreducer(num_allreduces)
if throw_on_early_termination:
return
# Check `expected_total` if not `None`
if expected_total:
self.assertEqual(allreduce_total, expected_total)
# All `AllReduce` instances should receive the updated
# `post_hook_tensor` from the last-joined process
if run_post_hooks:
for allreducer in allreducers:
self.assertEqual(allreducer.post_hook_tensor.item(), AFTER_CONSTANT)
@require_n_gpus_for_nccl_backend(
WORLD_SIZE, BACKEND
)
def test_single_joinable_main_hooks(self):
r"""Tests the main hooks of a single :class:`Joinable`."""
num_joinables = 1
num_allreduces = 1
run_post_hooks = False
# Non-joined processes all-reduce a 1, so this rank's all-reduce total
# should be precisely equal to the total number of inputs processed
# before it joined
expected_total = self.world_size * self.base_num_inputs
# Rank i runs for i additional iterations
for num_joined in range(1, self.rank + 1):
expected_total += (self.world_size - num_joined) * self.offset
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total
)
@require_n_gpus_for_nccl_backend(
WORLD_SIZE, BACKEND
)
def test_single_joinable_post_hooks(self):
r"""Tests the post-hooks of a single :class:`Joinable`."""
num_joinables = 1
num_allreduces = 0 # set to 0 to skip the main hooks
run_post_hooks = False
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=None
)
@require_n_gpus_for_nccl_backend(
WORLD_SIZE, BACKEND
)
def test_single_joinable(self):
r"""
Tests the main hooks and post-hooks of a single :class:`Joinable`
together.
This combines ``test_single_joinable_main_hooks()`` and
``test_single_joinable_post_hooks()`` into a single test to ensure that
main hooks and post-hooks operate correctly together.
"""
num_joinables = 1
num_allreduces = 1
run_post_hooks = True
expected_total = self.world_size * self.base_num_inputs
for num_joined in range(1, self.rank + 1):
expected_total += (self.world_size - num_joined) * self.offset
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total
)
@require_n_gpus_for_nccl_backend(
WORLD_SIZE, BACKEND
)
def test_multiple_joinables(self):
r"""
Tests the main hooks and post-hooks of multiple :class:`Joinable` s
together.
This generalizes ``test_single_joinable()`` to multiple
:class:`Joinable` s.
"""
num_joinables = 3
num_allreduces = 1
run_post_hooks = True
expected_total = self.world_size * self.base_num_inputs
for num_joined in range(1, self.rank + 1):
expected_total += (self.world_size - num_joined) * self.offset
# The expected total is now multiplied by a factor of `NUM_JOINABLES`
expected_total *= num_joinables
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total
)
@require_n_gpus_for_nccl_backend(
WORLD_SIZE, BACKEND
)
def test_single_joinable_disable(self):
r"""Tests ``enable=False`` for a single :class:`Joinable`."""
num_joinables = 1
num_allreduces = 1
uneven_inputs = False
enable = False
run_post_hooks = False
expected_total = self.world_size * self.base_num_inputs
self._test_join_base(
uneven_inputs=uneven_inputs,
num_joinables=num_joinables,
enable=enable,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total
)
@require_n_gpus_for_nccl_backend(
WORLD_SIZE, BACKEND
)
def test_multiple_joinable_disable(self):
r"""
Tests ``enable=False`` for multiple :class:`Joinable` s.
This generalizes ``test_single_joinable_disable`` to multiple
:class:`Joinable` s.
"""
num_joinables = 3
num_allreduces = 1
uneven_inputs = False
enable = False
run_post_hooks = False
expected_total = self.world_size * self.base_num_inputs * num_joinables
self._test_join_base(
uneven_inputs=uneven_inputs,
num_joinables=num_joinables,
enable=enable,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total
)
@require_n_gpus_for_nccl_backend(
WORLD_SIZE, BACKEND
)
def test_single_joinable_throw(self):
r"""
Tests ``throw_on_early_termination=True`` for a single
:class:`Joinable`.
"""
num_joinables = 1
num_allreduces = 1
throw_on_early_termination = True
run_post_hooks = False
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=None
)
@require_n_gpus_for_nccl_backend(
WORLD_SIZE, BACKEND
)
def test_multiple_joinables_throw(self):
r"""
Tests ``throw_on_early_termination=True`` for multiple
:class:`Joinable` s together.
This generalizes ``test_single_joinable_throw`` to multiple
:class:`Joinable` s.
"""
num_joinables = 3
num_allreduces = 1
throw_on_early_termination = True
run_post_hooks = False
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=None
)
@require_n_gpus_for_nccl_backend(
WORLD_SIZE, BACKEND
)
def test_join_kwargs(self):
r"""
Tests passing keyword arguments to the context manager.
"""
num_joinables = 1
num_allreduces = 2
run_post_hooks = False
expected_total = self.world_size * self.base_num_inputs
for num_joined in range(1, self.rank + 1):
expected_total += (self.world_size - num_joined) * self.offset
# The expected total is now multiplied by a factor of `NUM_ALLREDUCES`
expected_total *= num_allreduces
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total
)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/algorithms/test_join.py
|
# Owner(s): ["oncall: distributed"]
import os
import sys
import torch
from torch import nn
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.distributed.algorithms.ddp_comm_hooks import (
DDPCommHookType,
register_ddp_comm_hook,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
if TEST_WITH_DEV_DBG_ASAN:
print("Multiprocessing spawn is not compatible with dev/dbg asan", file=sys.stderr)
sys.exit(0)
def gpus_for_rank(world_size):
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class Task(nn.Module):
def __init__(self):
super(Task, self).__init__()
torch.manual_seed(0)
self.p = nn.Parameter(torch.randn(40, 20))
def forward(self, x):
return self.p * x
class TestDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x ** (1 + rank))
class DistributedDataParallelCommHookTest(MultiProcessTestCase):
def setUp(self):
super(DistributedDataParallelCommHookTest, self).setUp()
self._spawn_processes()
def tearDown(self):
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _local_model(self):
local_model = TestDdpCommHook().cpu()
return local_model
def _get_grads(self, process_group, hook_type=None):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
TestDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
# Register DDP Communication Hook if defined
if hook_type is not None:
register_ddp_comm_hook(
comm_hook_type=hook_type, model=gpu_model, state=process_group
)
return self._run_and_get_grads(gpu_model)
def _run_and_get_grads(self, model):
torch.manual_seed(2020)
input = torch.randn(40, 20)
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
# The only layer
param = next(model.parameters())
return param.grad
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook(self):
"""
This unit test verifies the ``allreduce`` hook registered case gives same result
with no hook registered case.
"""
store = dist.FileStore(self.file_name, self.world_size)
process_group = dist.ProcessGroupNCCL(store, self.rank, self.world_size)
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.ALLREDUCE)
torch.testing.assert_allclose(hook_grads, reference_grads, rtol=1e-5, atol=0)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_fp16compress_hook(self):
"""
This unit test verifies the ``fp16 compress`` hook registered case
gives close result with no hook registered case.
"""
store = dist.FileStore(self.file_name, self.world_size)
process_group = dist.ProcessGroupNCCL(store, self.rank, self.world_size)
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.FP16_COMPRESS)
torch.testing.assert_allclose(hook_grads, reference_grads, rtol=1e-5, atol=1e-4)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_quantize_per_tensor_hook(self):
"""
This unit test verifies the ``quantize per tensor`` hook registered case
gives close result with no hook registered case.
"""
store = dist.FileStore(self.file_name, self.world_size)
process_group = dist.ProcessGroupNCCL(store, self.rank, self.world_size)
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.QUANTIZE_PER_TENSOR)
torch.testing.assert_allclose(hook_grads, reference_grads, rtol=1e-5, atol=1e-4)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_quantize_per_channel_hook(self):
"""
This unit test verifies the ``quantize per channel`` hook registered case
gives close result with no hook registered case.
"""
store = dist.FileStore(self.file_name, self.world_size)
process_group = dist.ProcessGroupNCCL(store, self.rank, self.world_size)
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(
process_group, DDPCommHookType.QUANTIZE_PER_CHANNEL
)
torch.testing.assert_allclose(hook_grads, reference_grads, rtol=1e-5, atol=1e-4)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_noop_hook(self):
"""
This unit test verifies the ``noop`` hook registered case and a subsequent allreduce
gives same result with no hook registered case.
"""
store = dist.FileStore(self.file_name, self.world_size)
process_group = dist.ProcessGroupNCCL(store, self.rank, self.world_size)
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.NOOP)
# Apply a subsequent allreduce to average grads.
hook_grads.div_(self.world_size)
dist.all_reduce(hook_grads, group=process_group)
torch.testing.assert_allclose(hook_grads, reference_grads, rtol=1e-5, atol=0)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_is_last_hook(self):
store = dist.FileStore(self.file_name, self.world_size)
process_group = dist.ProcessGroupNCCL(store, self.rank, self.world_size)
def hook(flags, bucket):
flags.append(bucket.is_last())
fut = torch.futures.Future()
fut.set_result(bucket.buffer())
return fut
flags = []
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = nn.Sequential(
nn.Linear(2, 4000, bias=False),
*[nn.Linear(4000, 4000, bias=False) for _ in range(10)]
)
gpu_model = DistributedDataParallel(
model.to(device_id),
device_ids=[device_id],
process_group=process_group,
)
gpu_model.register_comm_hook(state=flags, hook=hook)
input = torch.randn(10, 2)
gpu_model(input).sum().backward()
self.assertTrue(flags[-1])
self.assertFalse(any(flags[:-1]))
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
pytorch-master
|
test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
|
# Owner(s): ["oncall: distributed"]
import torch
import os
import torch.cuda
import sys
import torch.distributed as dist
import torch.distributed.algorithms._quantization.quantization as quant
from torch.distributed.algorithms._quantization.quantization import DQuantType
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_gloo,
skip_if_rocm,
skip_if_lt_x_gpu,
requires_nccl,
)
from torch.testing._internal.common_utils import sandcastle_skip_if, run_tests, TEST_WITH_DEV_DBG_ASAN, NO_MULTIPROCESSING_SPAWN
torch.backends.cuda.matmul.allow_tf32 = False
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
def _build_tensor(size, value=None, dtype=torch.float, device_id=None):
if value is None:
value = size
if device_id is None:
return torch.empty(size, dtype=dtype).fill_(value)
else:
return torch.empty(size, dtype=dtype).fill_(value).cuda(device_id)
if TEST_WITH_DEV_DBG_ASAN:
print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
if NO_MULTIPROCESSING_SPAWN:
print("Spawn not available, skipping tests.", file=sys.stderr)
sys.exit(0)
BACKEND = os.environ["BACKEND"]
if BACKEND == "gloo" or BACKEND == "nccl":
class DistQuantizationTests(MultiProcessTestCase):
def setUp(self):
super(DistQuantizationTests, self).setUp()
self._spawn_processes()
torch.backends.cudnn.flags(allow_tf32=False).__enter__()
def tearDown(self):
super(DistQuantizationTests, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return int(os.environ["WORLD_SIZE"])
@requires_gloo()
@sandcastle_skip_if(BACKEND != "gloo", "Only gloo backend supports all_gather_fp16")
def test_all_gather_fp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='gloo')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.group.WORLD
self._test_all_gather(group, group_id, self.rank, dtype=torch.float32, qtype=DQuantType.FP16)
@requires_gloo()
@sandcastle_skip_if(BACKEND != "gloo", "Only gloo backend supports all_gather_fp16")
def test_all_gather_bfp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='gloo')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.group.WORLD
self._test_all_gather(group, group_id, self.rank, dtype=torch.float32, qtype=DQuantType.BFP16)
@requires_nccl()
@sandcastle_skip_if(BACKEND != "nccl", "Only nccl backend supports all_to_all_fp16")
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
@skip_if_rocm
def test_all_to_all_fp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.FP16)
@requires_nccl()
@sandcastle_skip_if(BACKEND != "nccl", "Only nccl backend supports all_to_all_fp16")
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
@skip_if_rocm
def test_all_to_all_bfp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.BFP16)
@requires_nccl()
@sandcastle_skip_if(BACKEND != "nccl", "Only nccl backend supports all_to_all_single_fp16")
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_all_to_all_single_fp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all_single(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.FP16
)
@requires_nccl()
@sandcastle_skip_if(BACKEND != "nccl", "Only nccl backend supports all_to_all_single_bfp16")
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_all_to_all_single_bfp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all_single(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.BFP16
)
def _test_all_gather(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float, qtype=None):
for dest in group:
tensor = _build_tensor([dest + 1, dest + 1], rank, dtype=dtype)
tensors = [_build_tensor([dest + 1, dest + 1], -1, dtype=dtype) for i in group]
expected_tensors = [
_build_tensor([dest + 1, dest + 1], i, dtype=dtype) for i in group
]
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
if tensors[0].dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(tensors[0]).shape]
else:
tensor_shapes = [tensors[0].shape]
allgather = quant.auto_quantize(dist.all_gather, qtype, quant_loss=None)
allgather(tensors, tensor, group=group_id, async_op=False)
for t1, t2 in zip(tensors, expected_tensors):
self.assertEqual(t1, t2)
def _test_all_to_all(
self,
group,
group_id,
rank,
cuda=False,
rank_to_GPU=None,
dtype=torch.float,
qtype=None
):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
in_tensors = [
torch.ones([in_splits[i], size], dtype=dtype) * rank
for i, _ in enumerate(group)
]
out_tensors = [
torch.ones([(rank + 1), size], dtype=dtype) for _ in group
]
expected_tensors = [
torch.ones([rank + 1, size], dtype=dtype) * i for i in group
]
if cuda:
in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors]
expected_tensors = [
t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors
]
out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors]
quantize_alltoall = quant.auto_quantize(dist.all_to_all, qtype, quant_loss=None)
quantize_alltoall(out_tensors, in_tensors, group=group_id)
for t1, t2 in zip(out_tensors, expected_tensors):
self.assertEqual(t1, t2)
def _test_all_to_all_single(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float, qtype=DQuantType.FP16
):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
out_splits = [rank + 1 for _ in group]
in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank
out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype)
expected_tensor = torch.cat(
[torch.ones([rank + 1, size], dtype=dtype) * i for i in group]
)
if cuda:
rank_to_GPU = rank_to_GPU[rank][0]
in_tensor = in_tensor.cuda(rank_to_GPU)
expected_tensor = expected_tensor.cuda(rank_to_GPU)
out_tensor = out_tensor.cuda(rank_to_GPU)
quantize_alltoall_single = quant.auto_quantize(dist.all_to_all_single, qtype, quant_loss=None)
quantize_alltoall_single(out_tensor, in_tensor, out_splits=out_splits, in_splits=in_splits, group=group_id)
self.assertEqual(out_tensor, expected_tensor)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/algorithms/quantization/test_quantization.py
|
# Owner(s): ["oncall: distributed"]
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
import os
import sys
import unittest
from contextlib import suppress
from typing import Any, List, cast
import numpy as np
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.distributed.algorithms.ddp_comm_hooks.ddp_zero_hook import (
hook_with_zero_step,
hook_with_zero_step_interleaved,
)
from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import (
allreduce_hook,
)
from torch.distributed.algorithms.join import Join, Joinable, JoinHook
from torch.distributed.optim import ZeroRedundancyOptimizer
from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD, AdamW
from torch.testing._internal import common_distributed
from torch.testing._internal.common_utils import (
IS_WINDOWS,
TEST_WITH_ASAN,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
# Use GLOO on GPU when running CUDA + Windows
def _get_backend_for_tests():
return (
dist.Backend.NCCL if not IS_WINDOWS and torch.cuda.is_available()
# Windows only has GLOO, but GLOO GPU works. And use GLOO CPU when
# no GPUs are available.
else dist.Backend.GLOO
)
BACKEND = _get_backend_for_tests()
@unittest.skipIf(
TEST_WITH_ASAN or TEST_WITH_DEV_DBG_ASAN, "CUDA + ASAN does not work."
)
class TestZeroRedundancyOptimizer(common_distributed.MultiProcessTestCase):
def setUp(self):
super(TestZeroRedundancyOptimizer, self).setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
self._spawn_processes()
@property
def device(self):
return torch.device("cuda") if torch.cuda.is_available() \
else torch.device("cpu")
@property
def world_size(self):
return 1
def tearDown(self):
try:
torch.distributed.destroy_process_group()
except AssertionError:
pass
try:
os.remove(self.file_name)
except OSError:
pass
def dist_init(self, rank, world_size=-1, backend=BACKEND):
if (world_size < 1):
world_size = self.world_size
store = dist.FileStore(self.file_name, world_size)
return dist.init_process_group(
backend=backend, store=store, rank=rank, world_size=world_size,
)
# TODO: sandcastle_skip_if does not work here.
@unittest.skipIf(
TEST_WITH_ASAN or TEST_WITH_DEV_DBG_ASAN, "CUDA + ASAN does not work."
)
class TestZeroRedundancyOptimizerSingleRank(TestZeroRedundancyOptimizer):
def test_state_dict(self):
"""Check that ZeroRedundancyOptimizer exposes the expected state dict
interface, irrespective of the sharding."""
self.dist_init(self.rank)
LR1 = 0.1
LR2 = 0.01
MOMENTUM = 0.9
RECIPIENT_RANK = 0 # rank 0 is the only rank since the world size is 1
x = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer(
[x], optimizer_class=SGD, lr=LR1, momentum=MOMENTUM,
)
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.9], device=self.device))
self.assertEqual(
o.optim.state[x]["momentum_buffer"],
torch.tensor([1.0], device=self.device),
)
o.zero_grad()
o.consolidate_state_dict(to=RECIPIENT_RANK)
state_dict = o.state_dict()
# Check that the state dict has keys compliant with PyTorch
self.assertIn("param_groups", state_dict.keys())
self.assertIn("state", state_dict.keys())
# Check that the state has the expected keys
self.assertEqual(state_dict["param_groups"][0]["lr"], 0.1)
self.assertEqual(state_dict["param_groups"][0]["momentum"], 0.9)
self.assertFalse(state_dict["param_groups"][0]["nesterov"])
self.assertEqual(state_dict["param_groups"][0]["weight_decay"], 0.0)
self.assertEqual(state_dict["param_groups"][0]["dampening"], 0.0)
# Check that the state and the `param_groups` attribute are in sync
for k in state_dict["param_groups"][0]:
if k != "params":
self.assertEqual(
state_dict["param_groups"][0][k],
o.param_groups[0][k],
)
# Check that the state is reloaded with the correct values and device
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=LR2)
o.load_state_dict(state_dict)
self.assertEqual(
o.optim.state[x]["momentum_buffer"],
torch.tensor([1.0], device=self.device),
)
# We should we using `LR1` and not `LR2` after reloading, both within
# the optimizer and as exposed by the `param_groups` attribute
self.assertEqual(o.param_groups[0]["lr"], LR1)
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.71], device=self.device))
self.assertEqual(
o.optim.state[x]["momentum_buffer"],
torch.tensor([1.9], device=self.device),
)
# Check that the exposed `param_groups`` are on the proper device
self.assertEqual(o.param_groups[0]["params"][0].device, x.device)
def test_lr_scheduler(self):
"""Check that a normal PyTorch ``lr_scheduler`` is usable with
ZeroRedundancyOptimizer."""
self.dist_init(self.rank)
NUM_ITERS = 5
LR = 0.01
x = torch.tensor([1.0], device=self.device, requires_grad=True)
x2 = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=LR)
o2 = torch.optim.SGD([x2], lr=LR)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(NUM_ITERS):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
self.assertEqual(x, x2)
def test_step_with_kwargs(self):
"""Check that the ``step(**kwargs)`` interface is properly exposed."""
self.dist_init(self.rank)
LR = 0.1
class SGDWithStepKWArg(torch.optim.SGD):
def step(self, closure=None, kwarg=None):
super().step()
kwarg.append(5)
kwarg: List[Any] = []
x = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer(
[x], optimizer_class=SGDWithStepKWArg, lr=LR,
)
x.backward()
o.step(0, kwarg=kwarg)
self.assertEqual(kwarg, [5])
self.assertEqual(x, torch.tensor([0.9], device=self.device))
def test_step_with_extra_inner_key(self):
"""Check that ZeroRedundancyOptimizer wrapping an optimizer that adds
extra keys to ``param_groups`` exposes those keys through ZeRO's own
``param_groups``."""
self.dist_init(self.rank)
LR = 0.1
class SGDWithNewKey(torch.optim.SGD):
# Dummy optimizer which adds a new key to the param groups
def step(self, closure=None):
super().step()
self.param_groups[0]["new_key"] = 0.1
x = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGDWithNewKey, lr=LR)
x.backward()
o.step()
self.assertEqual(o.param_groups[0]["new_key"], 0.1)
self.assertEqual(x, torch.tensor([0.9], device=self.device))
def test_step_without_closure(self):
"""Check that the ``step()`` method (without closure) is handled as
expected."""
self.dist_init(self.rank)
LR = 0.1
class SGDWithoutClosure(torch.optim.SGD):
def step(self):
return super().step()
x = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer(
[x], optimizer_class=SGDWithoutClosure, lr=LR,
)
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.9], device=self.device))
def test_zero_grad(self):
"""Check that the ``zero_grad`` method is properly handled."""
self.dist_init(self.rank)
LR = 0.01
x = torch.rand(1)
m = torch.nn.Linear(1, 1)
o = ZeroRedundancyOptimizer(m.parameters(), optimizer_class=SGD, lr=LR)
y = m(x)
y.backward(x)
self.assertNotEqual(m.weight.grad, torch.zeros_like(m.weight))
self.assertNotEqual(m.weight.grad, torch.zeros_like(m.weight))
o.zero_grad()
self.assertFalse(m.weight.grad)
self.assertFalse(m.bias.grad)
def test_constructor(self):
"""Check the robustness of the ZeroRedundancyOptimizer constructor by
passing different values for the ``params`` argument."""
self.dist_init(self.rank)
LR = 0.01
m = torch.nn.Sequential(
torch.nn.Linear(5, 10),
torch.nn.Linear(10, 10),
torch.nn.Linear(10, 10),
)
# Test various constructor inputs in the form: (input, expected error)
ctor_inputs = [
([], ValueError), # empty parameter list
(torch.randn(1), TypeError), # non-iterable: `torch.Tensor`
(1.2, TypeError), # non-iterable: `float`
([
{"params": [l.weight for l in m]},
{"params": [l.bias for l in m]},
], None), # iterable of dict
(list(m.parameters()) + [42], TypeError), # iterable containing invalid type
(m.parameters(), None), # `params` as a generator
(list(m.parameters()), None) # `params` as a list
]
for ctor_input, error in ctor_inputs:
context = self.assertRaises(error) if error else suppress()
with context:
ZeroRedundancyOptimizer(
ctor_input, optimizer_class=SGD, lr=LR,
)
# Test constructing with multiple parameter groups more thoroughly
WD = 0.01
BETAS = (0.9, 0.999)
EPS = 1e-8
params = [
{"params": [l.weight for l in m], "weight_decay": 0.},
{"params": [l.bias for l in m], "weight_decay": WD},
]
o = ZeroRedundancyOptimizer(
params, optimizer_class=AdamW,
lr=LR, betas=BETAS, eps=EPS,
)
assert len(o.param_groups) == 2, \
f"Expected 2 ZeRO param groups, but got {len(o.param_groups)}"
assert len(o.optim.param_groups) == 2, \
"Expected 2 local optimizer param groups, but got " \
f"{len(o.optim.param_groups)}"
def test_same_dense_param_type(self):
"""Check that ZeroRedundancyOptimizer raises an exception if the input
parameters include sparse tensors or different dense types.
NOTE: This test should be removed once support for sparse parameters
and varying parameter types is added.
"""
self.dist_init(self.rank)
LR = 0.01
inputs = [
[torch.sparse_coo_tensor(size=(2, 3))],
[torch.FloatTensor(1), torch.DoubleTensor(1)],
[torch.FloatTensor(1), torch.FloatTensor(1),
torch.sparse_coo_tensor(size=(2, 3))]
]
for input in inputs:
with self.assertRaises(ValueError):
ZeroRedundancyOptimizer(input, optimizer_class=SGD, lr=LR)
class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer):
@property
def device(self):
return torch.device(self.rank) if torch.cuda.is_available() \
else torch.device("cpu")
@property
def world_size(self):
return min(4, max(2, torch.cuda.device_count()))
@property
def context(self):
return suppress() if not torch.cuda.is_available() \
else torch.cuda.device(self.rank)
def _check_same_model_params(
self,
model_a: torch.nn.Module,
model_b: torch.nn.Module,
message: str = "",
) -> None:
# Check that model parameters match
for p_a, p_b in zip(model_a.parameters(), model_b.parameters()):
torch.testing.assert_close(
p_a, p_b, atol=1e-3, rtol=1e-5,
msg=f"Model parameters differ:\n{p_a} {p_b}\n" + message,
)
# Check that model buffers match
for b_a, b_b in zip(model_a.buffers(), model_b.buffers()):
torch.testing.assert_close(
b_a, b_b,
msg=f"Model buffers differ:\n{b_a} {b_b}\n" + message,
)
@common_distributed.skip_if_no_gpu
@common_distributed.skip_if_rocm
def test_step(self):
"""Check that ZeroRedundancyOptimizer properly exposes the ``step()``
interface."""
self.dist_init(self.rank, world_size=self.world_size)
LR = 0.01
with self.context:
x = torch.tensor([float(self.rank + 1)], device=self.device)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[1.0]])
m.bias.data = torch.tensor([2.0])
m = m.to(self.device)
m_zero = copy.deepcopy(m).to(self.device)
o = SGD(m.parameters(), lr=LR)
o_zero = ZeroRedundancyOptimizer(
m_zero.parameters(), optimizer_class=SGD, lr=LR,
)
y = m(x)
y.backward(x)
y_zero = m_zero(x)
y_zero.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
o.step()
for p in m_zero.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
o_zero.step()
self.assertEqual(m.weight, m_zero.weight)
self.assertEqual(m.bias, m_zero.bias)
@common_distributed.skip_if_no_gpu
@common_distributed.skip_if_rocm
def test_step_with_closure(self):
"""Check that ZeroRedundancyOptimizer properly exposes the
``step(closure)`` interface."""
self.dist_init(self.rank, world_size=self.world_size)
with self.context:
for bucket_view in [False, True]:
x_val = self.rank + 1
weight = 1.0
bias = 2.0
error = 1.0
target = torch.tensor(
[x_val * weight + bias + error],
device=self.device,
)
loss_fn = torch.nn.L1Loss()
x = torch.tensor([float(x_val)], device=self.device)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[weight]])
m.bias.data = torch.tensor([bias])
m.to(self.device)
o = ZeroRedundancyOptimizer(
m.parameters(),
optimizer_class=SGD,
parameters_as_bucket_view=bucket_view,
lr=0.1,
)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
def closure():
o.zero_grad()
output = m(x)
loss = loss_fn(output, target)
loss.backward()
return loss
loss = o.step(closure=closure)
self.assertEqual(loss, torch.tensor(error))
self.assertEqual(m.weight, torch.tensor([[1.1]]))
self.assertEqual(m.bias, torch.tensor([2.1]))
@common_distributed.skip_if_no_gpu
def test_lr_scheduler(self):
"""Check that a normal PyTorch ``lr_scheduler`` is usable with
ZeroRedundancyOptimizer."""
self.dist_init(self.rank)
x = torch.tensor([1.0], device=self.device, requires_grad=True)
x2 = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=0.01)
o2 = torch.optim.SGD([x2], lr=0.01)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(5):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
self.assertEqual(x, x2)
def test_sharding(self):
"""
Check ZeroRedundancyOptimizer's parameter sharding at construction
time.
NOTE: The correctness of this test depends on the ZeRO implementation
using the sorted-greedy partitioning algorithm. For details, see
``ZeroRedundancyOptimizer._partition_parameters()`` in
zero_redundancy_optimizer.py.
"""
self.dist_init(self.rank)
LR = 0.01
sizes = [9, 7, 5, 3]
params = []
for size in sizes * self.world_size:
params.append(torch.rand(size, 1))
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=LR)
self.assertEqual(
sum([x.numel() for x in o.optim.param_groups[0]["params"]]),
sum(sizes),
)
def test_add_param_group(self):
"""Check that ZeroRedundancyOptimizer properly handles adding a new
parameter group a posteriori and that all ranks get a shard of the
contained parameters.
NOTE: The correctness of this test depends on the ZeRO implementation
using the sorted-greedy partitioning algorithm. For details, see
``ZeroRedundancyOptimizer._partition_parameters()`` in
zero_redundancy_optimizer.py.
"""
self.dist_init(self.rank)
LR = 0.01
# Test with all parameters trainable to begin with
def all_trainable():
params = []
sizes = [9, 7, 5, 3]
sizes_world = sizes * self.world_size
for size in sizes_world[:-1]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable so that they are factored
# into the size-based parameter partitioning
for p in params:
p.requires_grad = True
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=LR)
self.assertEqual(len(o.param_groups), 1)
o.add_param_group({"params": [torch.rand(3, 1)]})
# Verify that new group is added to the correct partition, making
# all partitions have the same elements
self.assertEqual(len(o.param_groups), 2)
self.assertEqual(
sum([
x.numel()
for g in o.optim.param_groups
for x in g["params"]
]),
sum(sizes),
)
self.assertEqual(len(o.optim.param_groups), 2)
# Test a pathological config with a first big non-trainable param
def some_trainable():
params = []
for size in [100, 3, 5, 2, 6, 4]:
params.append(torch.rand(size, 1))
# Make sure that all but the first param are trainable so that they
# are factored into the size-based parameter partitioning
for p in params[1:]:
p.requires_grad = True
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=LR)
self.assertEqual(len(o.param_groups), 1)
o.add_param_group({"params": [torch.rand(3, 1)]})
self.assertEqual(len(o.param_groups), 2)
self.assertEqual(len(o.optim.param_groups), 2)
all_trainable()
some_trainable()
@common_distributed.skip_if_no_gpu
def test_multiple_param_groups(self):
"""
Check parity between constructing ZeRO with multiple parameter groups
upfront versus adding parameter groups to ZeRO after construction
versus a non-sharded optimizer.
"""
self.dist_init(self.rank)
BATCH_SIZE, NUM_ITERS = 8, 3
INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM = 5, 10, 5
WD, LR = 0.01, 0.01
model1 = torch.nn.Sequential(
torch.nn.Linear(INPUT_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, OUTPUT_DIM),
)
model2 = copy.deepcopy(model1)
model3 = copy.deepcopy(model1)
model1 = model1.to(self.device)
model2 = model2.to(self.device)
model3 = model3.to(self.device)
inputs = [
torch.randn(BATCH_SIZE, INPUT_DIM).to(self.device)
for _ in range(NUM_ITERS)
]
# Construct `optim1` with both parameter groups upfront
optim1 = ZeroRedundancyOptimizer(
[
{"params": [l.weight for l in model1], "weight_decay": 0.},
{"params": [l.bias for l in model1], "weight_decay": WD},
],
optimizer_class=AdamW, lr=LR,
)
# Construct `optim2` by adding the second parameter after
optim2 = ZeroRedundancyOptimizer(
[l.weight for l in model2],
optimizer_class=AdamW, lr=LR, weight_decay=0.,
)
optim2.add_param_group(
{"params": [l.bias for l in model2], "weight_decay": WD}
)
# Construct `optim3` as a non-sharded optimizer
optim3 = AdamW(
[
{"params": [l.weight for l in model3], "weight_decay": 0.},
{"params": [l.bias for l in model3], "weight_decay": WD},
], lr=LR,
)
# Check parity over a few iterations
for input in inputs:
for model, optim in (
(model1, optim1), (model2, optim2), (model3, optim3),
):
optim.zero_grad()
out = model(input)
loss = out.sum()
loss.backward()
optim.step()
for layer1, layer2, layer3 in zip(model1, model2, model3):
torch.testing.assert_close(layer1.weight, layer2.weight)
torch.testing.assert_close(layer1.weight, layer3.weight)
torch.testing.assert_close(layer1.bias, layer2.bias)
torch.testing.assert_close(layer1.bias, layer3.bias)
@common_distributed.skip_if_no_gpu
@common_distributed.skip_if_rocm
def test_collect_shards(self):
"""Check the state consolidation mechanism and the state dict exposed
by ZeroRedundancyOptimizer."""
self.dist_init(self.rank)
LR = 1e-3
MOMENTUM = 0.99
BATCH_SIZE, INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM = 3, 20, 10, 5
REFERENCE_RANK = 0
target = torch.rand((BATCH_SIZE, OUTPUT_DIM), device=self.device)
inputs = torch.rand((BATCH_SIZE, INPUT_DIM), device=self.device)
model = torch.nn.Sequential(
torch.nn.Linear(INPUT_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, OUTPUT_DIM),
).to(self.device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(self.device)
optimizer = ZeroRedundancyOptimizer(
model.parameters(),
optimizer_class=SGD,
lr=LR,
momentum=MOMENTUM, # ensure there exists state to shard
)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
# Run a dummy step so that the optimizer state dict exists
_ = optimizer.step(closure=closure)
# Get the optimizer state on the reference rank
optimizer.consolidate_state_dict(to=REFERENCE_RANK)
if self.rank == REFERENCE_RANK:
# Check that the state has the correct size
optimizer_state_dict = optimizer.state_dict()
self.assertEqual(
len(optimizer_state_dict["state"]),
len(list(model.parameters())),
)
else:
optimizer_state_dict = {}
# Load the optimizer state on all ranks without any exceptions
optimizer_state_dict = _broadcast_object(
optimizer_state_dict,
src_rank=REFERENCE_RANK,
group=dist.group.WORLD,
device=self.device,
)
optimizer.load_state_dict(optimizer_state_dict)
def test_nondefault_process_group(self):
"""Check that ZeroRedundancyOptimizer works with a non-default process
group consisting only of even ranks."""
# Skip the test if below the minimum world size since then the test is
# trivial
MIN_WORLD_SIZE = 4
if self.world_size < MIN_WORLD_SIZE:
common_distributed.logger.info(
"Skipping `test_nondefault_process_group()` since world size "
f"of {self.world_size} is less than {MIN_WORLD_SIZE}"
)
return
BACKEND = dist.Backend.GLOO
self.dist_init(self.rank, self.world_size, BACKEND)
# Use GPU if enough are available, or fall back to CPU otherwise, which
# is fine since Gloo backend supports both
if torch.cuda.is_available() and \
torch.cuda.device_count() >= self.world_size:
device = torch.device(self.rank)
else:
device = torch.device("cpu")
# Create a new process group consisting of the even ranks to exercise
# the case where the global and local ranks do not necessarily match
subgroup_ranks = [r for r in range(self.world_size) if r % 2 == 0]
process_group = dist.new_group(
ranks=subgroup_ranks, backend=BACKEND,
)
# Ranks not participating in the new process group are no longer needed
if self.rank not in subgroup_ranks:
return
# Set different seeds across ranks so that each rank gets different
# training data and hence the model sync check is meaningful
torch.manual_seed(self.rank)
np.random.seed(self.rank)
EPOCHS, BATCH_SIZE, INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM = 5, 3, 20, 10, 5
LR = 1e-3
MOMENTUM = 0.99
REFERENCE_RANK = 0
assert REFERENCE_RANK in subgroup_ranks, \
"Reference rank must be in the new process group"
loss_fn = torch.nn.L1Loss().to(device)
def check(optimizer):
for _ in range(EPOCHS):
target = torch.rand((BATCH_SIZE, OUTPUT_DIM), device=device)
inputs = torch.rand((BATCH_SIZE, INPUT_DIM), device=device)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss /= self.world_size
loss.backward()
dist.all_reduce(loss, group=process_group)
return loss
_ = optimizer.step(closure=closure)
# Check that the parameters match across ranks after a step
for pg in optimizer.param_groups:
for p in pg["params"]:
receptacle = [
p.clone() for _ in subgroup_ranks
] if self.rank == REFERENCE_RANK else []
dist.gather(
p, receptacle, dst=REFERENCE_RANK,
group=process_group,
)
if self.rank == REFERENCE_RANK:
reference_param = receptacle[0]
for param in receptacle[1:]:
torch.testing.assert_close(
reference_param,
param,
msg="Models differ between ranks",
)
model = torch.nn.Sequential(
torch.nn.Linear(INPUT_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, OUTPUT_DIM),
).to(device)
optimizer = ZeroRedundancyOptimizer(
model.parameters(),
optimizer_class=SGD,
lr=LR,
momentum=MOMENTUM, # ensure there exists state to shard
process_group=process_group,
)
check(optimizer)
@common_distributed.skip_if_no_gpu
@parametrize(
"optimizer_class_str",
["Adam", "AdamW", "SGD"],
# Use string to appease the internal test name parser
)
@parametrize(
"maximize",
[False, True],
)
def test_local_optimizer_parity(
self,
optimizer_class_str: str,
maximize: bool,
):
"""When combined with DDP, check that a local optimizer gives the same
results as wrapping that optimizer with ZeroRedundancyOptimizer."""
self.dist_init(self.rank)
BATCHES = 20
BATCH_SIZE = 64
LR = 1e-3
INPUT_DIM = 2
HIDDEN_DIM = 3
OUTPUT_DIM = 3
torch.manual_seed(self.rank)
np.random.seed(self.rank)
if optimizer_class_str == "Adam":
optimizer_class = torch.optim.Adam
elif optimizer_class_str == "AdamW":
optimizer_class = torch.optim.AdamW
elif optimizer_class_str == "SGD":
optimizer_class = torch.optim.SGD
else:
assert 0, f"Unsupported optimizer class: {optimizer_class_str}"
with self.context:
# Define a base model with a different buffer for each rank
model = torch.nn.Sequential(
torch.nn.Linear(INPUT_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, OUTPUT_DIM),
).to(self.device)
model.register_buffer(
"test_buffer", torch.ones((1), device=self.device) * self.rank,
)
# Define models/optimizers for DDP with ZeRO and DDP with local
# optimizer
defaults = {"maximize": True} if maximize else {}
sharded_optimizer = ZeroRedundancyOptimizer(
params=model.parameters(), optimizer_class=optimizer_class,
lr=LR, **defaults,
)
sharded_ddp_model = DDP(
module=model, device_ids=[self.rank],
broadcast_buffers=True, find_unused_parameters=True,
)
local_model = copy.deepcopy(model).to(self.device)
ddp_optimizer = optimizer_class(
local_model.parameters(), lr=LR, **defaults,
)
ddp_model = DDP(
local_model, device_ids=[self.rank],
broadcast_buffers=True, find_unused_parameters=True,
)
# Check that the model is properly synchronized between ranks
# at construction time
self._check_same_model_params(
sharded_ddp_model, ddp_model,
"Models differ from the start",
)
def check_step():
input_tensor = torch.rand((BATCH_SIZE, INPUT_DIM))
def closure_ddp(input_tensor=input_tensor):
ddp_optimizer.zero_grad()
ddp_loss = ddp_model(input_tensor).abs().sum()
ddp_loss.backward()
return ddp_loss
def closure_sharded(input_tensor=input_tensor):
sharded_optimizer.zero_grad()
sharded_loss = sharded_ddp_model(input_tensor).abs().sum()
sharded_loss.backward()
return sharded_loss
loss_ddp = cast(
torch.Tensor, ddp_optimizer.step(closure=closure_ddp),
)
loss_sharded_optim = cast(
torch.Tensor,
sharded_optimizer.step(closure=closure_sharded),
)
torch.testing.assert_close(
loss_ddp, loss_sharded_optim,
msg="Losses differ between local optimizer and ZeRO",
)
self._check_same_model_params(
sharded_ddp_model, ddp_model,
"Models differ after a step",
)
# Check that parity is maintained
for i in range(BATCHES):
check_step()
# For the second half of batches, change the parameter
# trainability to further test parity
if i > BATCHES // 2:
next(ddp_model.parameters()).requires_grad = bool(i % 2)
next(sharded_ddp_model.parameters()).requires_grad = bool(i % 2)
# Check that the `state_dict` checkpoints are compatible between
# the local optimizer and ZeRO
REFERENCE_RANK = 0
# - Get states
ddp_state_dict = ddp_optimizer.state_dict()
sharded_optimizer.consolidate_state_dict(to=REFERENCE_RANK)
sharded_optim_state_dict = [
sharded_optimizer.state_dict()
if self.rank == REFERENCE_RANK else {}
]
dist.broadcast_object_list(
sharded_optim_state_dict, src=REFERENCE_RANK,
group=dist.group.WORLD,
)
sharded_optim_state_dict = sharded_optim_state_dict[0]
# - Cross-load the states
# Run one step and check that the models are still the same
ddp_state_dict_ref = copy.deepcopy(ddp_state_dict)
ddp_optimizer.load_state_dict(sharded_optim_state_dict)
sharded_optimizer.load_state_dict(ddp_state_dict)
check_step()
# - Reload their respective states
# Run one step and check that the models are still the same
ddp_optimizer.load_state_dict(ddp_state_dict_ref)
sharded_optimizer.load_state_dict(sharded_optim_state_dict)
check_step()
def _test_zero_join(self, device):
"""Check that the ZeRO join hook allows training with uneven inputs
when using the given device."""
NUM_INPUTS = 3
NUM_EPOCHS = 2
LR = 0.01
torch.manual_seed(0)
torch.cuda.manual_seed(0)
rank = self.rank
world_size = self.world_size
is_gpu = device.type == "cuda"
backend = _get_backend_for_tests() if is_gpu else dist.Backend.GLOO
self.dist_init(rank, world_size, backend)
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Linear(3, 3),
torch.nn.Linear(3, 3),
)
model.to(device)
# DDP ensures correct gradients in data parallel training, so DDP with
# local optimizers on uneven inputs should be equivalent to ZeRO on
# uneven inputs with gradients being manually set
ddp_model = DDP(model, device_ids=[rank]) if is_gpu else DDP(model)
local_optim = torch.optim.Adam(ddp_model.parameters(), lr=LR)
zero_model = copy.deepcopy(model)
zero_model.to(device)
zero_optim = ZeroRedundancyOptimizer(
zero_model.parameters(), torch.optim.Adam, lr=LR,
)
loss_fn = torch.nn.MSELoss()
# Use uneven inputs: rank i has i extra inputs
inputs = [
torch.randn(20, 2).to(device) for _ in range(NUM_INPUTS + rank)
]
labels = torch.randn(20, 3).to(device)
# Save the gradients and parameters from DDP as the ground truth; do
# so on the last-joining rank (in this case, the largest rank)
grads_at_each_iter = []
params_at_each_iter = []
with ddp_model.join():
for _ in range(NUM_EPOCHS):
for input in inputs:
output = ddp_model(input)
loss_fn(output, labels).backward()
if rank == world_size - 1:
grads = []
for p in ddp_model.parameters():
grads.append(p.grad.detach().clone().to(device))
local_optim.step()
if rank == world_size - 1:
params = []
for p in ddp_model.parameters():
params.append(p.detach().clone().to(device))
grads_at_each_iter.append(grads)
params_at_each_iter.append(params)
# Broadcast the saved gradients and parameters to all of the other
# ranks (which joined early)
grads_and_params = [grads_at_each_iter, params_at_each_iter]
grads_and_params = _broadcast_object(
grads_and_params, src_rank=world_size - 1, group=dist.group.WORLD,
device=device,
)
grads_at_each_iter = grads_and_params[0]
params_at_each_iter = grads_and_params[1]
# TODO: Replace this `_broadcast_object` with `broadcast_object_list`
# once the latter supports loading to the destination device instead
# of the source device
# A process must still set the remaining gradients after joining, so we
# define a join hook to do this before the ZeRO join hook
class _JoinGradInfo():
def __init__(self, grads):
self.grads = grads # remaining gradients to set (in order)
self.index = 0
class _SetGradsJoinHook(JoinHook):
def __init__(self, zero_optim, grads):
zero_optim._join_grad_info = _JoinGradInfo(grads)
self.zero = zero_optim
super().__init__()
def main_hook(self):
join_grad_info = self.zero._join_grad_info
grads = self.zero._join_grad_info.grads[join_grad_info.index]
join_grad_info.index += 1
for p, grad in zip(self.zero._all_params, grads):
p.grad = grad.detach().clone().to(device)
class _GradientSetter(Joinable):
def __init__(self):
super().__init__()
def join_hook(self, **kwargs):
assert "zero_optim" in kwargs
assert "grads" in kwargs
zero_optim = kwargs["zero_optim"]
grads = kwargs["grads"]
return _SetGradsJoinHook(zero_optim, grads)
@property
def join_device(self):
return device
@property
def join_process_group(self):
return dist.group.WORLD
num_grads_after_joining = NUM_EPOCHS * (world_size - rank - 1)
grads = grads_at_each_iter[-num_grads_after_joining:]
gradient_setter = _GradientSetter()
iter = 0
with Join(
[gradient_setter, zero_optim], zero_optim=zero_optim, grads=grads,
):
for _ in range(NUM_EPOCHS):
for input in inputs:
# Notify join context that this process has not joined
Join.notify_join_context(gradient_setter)
# Set gradients manually
for p, grad in zip(
zero_model.parameters(), grads_at_each_iter[iter],
):
p.grad = grad.detach().clone().to(device)
# Perform optimizer step and check parity
zero_optim.step()
for p, ddp_p in zip(
zero_model.parameters(), params_at_each_iter[iter],
):
torch.testing.assert_close(
p, ddp_p,
msg="Parameters differ between using ZeRO and "
"local optimizer",
)
iter += 1
@common_distributed.requires_nccl()
@common_distributed.skip_if_no_gpu
def test_zero_join_gpu(self):
"""Check that the ZeRO join hook allows training with uneven inputs
on GPU."""
self._test_zero_join(self.device)
@common_distributed.requires_gloo()
def test_zero_join_cpu(self):
"""Check that the ZeRO join hook allows training with uneven inputs
on CPU."""
self._test_zero_join(torch.device("cpu"))
def _test_zero_model_parallel(self, parameters_as_bucket_view: bool):
# Use two processes each with two GPUs
assert self.rank < 2
NUM_EPOCHS = 2
NUM_INPUTS = 4
LR = 0.01
torch.manual_seed(0)
torch.cuda.manual_seed(0)
class ModelParallelModel(torch.nn.Module):
def __init__(self, dev0, dev1):
super().__init__()
self.dev0 = dev0
self.dev1 = dev1
self.net0 = torch.nn.Linear(10, 10).to(dev0)
self.relu = torch.nn.ReLU()
self.net1 = torch.nn.Linear(10, 5).to(dev1)
def forward(self, x):
x = x.to(self.dev0)
x = self.relu(self.net0(x))
x = x.to(self.dev1)
return self.net1(x)
class LocalModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.net0 = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
self.net1 = torch.nn.Linear(10, 5)
def forward(self, x):
return self.net1(self.relu(self.net0(x)))
dev0 = torch.device(2 * self.rank)
dev1 = torch.device(2 * self.rank + 1)
mp_model = ModelParallelModel(dev0, dev1)
ddp_model = DDP(mp_model)
local_model = LocalModel().to(dev0)
# Ensure the parameters are the same across the two models
def copy_param(p):
return torch.nn.Parameter(p.detach().clone().to(dev0))
local_model.net0.weight = copy_param(mp_model.net0.weight)
local_model.net0.bias = copy_param(mp_model.net0.bias)
local_model.net1.weight = copy_param(mp_model.net1.weight)
local_model.net1.bias = copy_param(mp_model.net1.bias)
# Compare parity between DDP with model parallelism using ZeRO and
# a local model using a local optimizer
zero_optim = ZeroRedundancyOptimizer(
ddp_model.parameters(),
optimizer_class=torch.optim.Adam,
parameters_as_bucket_view=parameters_as_bucket_view,
lr=LR,
)
local_optim = torch.optim.Adam(local_model.parameters(), lr=LR)
inputs = [torch.randn(20, 10).to(dev0) for _ in range(NUM_INPUTS)]
for _ in range(NUM_EPOCHS):
for input in inputs:
def closure_local():
local_optim.zero_grad()
local_loss = local_model(input).abs().sum()
local_loss.backward()
return local_loss
def closure_ddp():
zero_optim.zero_grad()
ddp_loss = ddp_model(input).abs().sum()
ddp_loss.backward()
return ddp_loss
local_loss = cast(
torch.Tensor, local_optim.step(closure=closure_local)
)
ddp_loss = cast(
torch.Tensor, zero_optim.step(closure=closure_ddp)
)
# Increased tolerances are needed to pass when using TF32
# See: https://github.com/pytorch/pytorch/issues/67764
torch.testing.assert_close(
local_loss.cpu(), ddp_loss.cpu(), rtol=1e-03, atol=1e-08,
), "Losses differ between local optimizer and ZeRO"
for local_p, ddp_p in zip(
local_model.parameters(),
ddp_model.parameters()
):
torch.testing.assert_close(
local_p.cpu(), ddp_p.cpu(), rtol=1e-03, atol=1e-04,
), "Models differ after a step"
@common_distributed.skip_if_lt_x_gpu(4)
@parametrize(
"parameters_as_bucket_view",
[False, True],
)
def test_zero_model_parallel(
self,
parameters_as_bucket_view: bool,
):
"""Check that ZeRO works with model parallelism where the model's
layers are assigned to different devices."""
if self.rank >= 2:
return
# Disable DDP + ReplicatedTensor when `parameter_as_bucket_view=True`
# since then ZeroRedundancyOptimizer modifies the model parameters in
# place.
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
context = _ddp_replicated_tensor(False) if parameters_as_bucket_view \
else suppress()
with context:
self.dist_init(self.rank, world_size=2)
self._test_zero_model_parallel(parameters_as_bucket_view)
def _test_ddp_zero_overlap(
self,
device,
hook_constructor,
gradient_as_bucket_view,
static_graph,
**kwargs,
):
SGD_LR = 0.01
SGD_MOMENTUM = 0.9
SGD_WEIGHT_DECAY = 0.001
NUM_INPUTS = 5
torch.manual_seed(0)
torch.cuda.manual_seed(0)
rank = self.rank
is_gpu = device.type == "cuda"
if is_gpu:
torch.cuda.set_device(device)
models_to_test = [(
torch.nn.Sequential(
torch.nn.Linear(1000, 2000),
torch.nn.Linear(2000, 500),
),
[torch.randn(1, 1000).to(device) for _ in range(NUM_INPUTS)],
)]
if HAS_TORCHVISION:
models_to_test.append((
torchvision.models.resnet50(),
[
torch.randn(1, 3, 3, 1000).to(device)
for _ in range(NUM_INPUTS)
]
))
for (model, inputs) in models_to_test:
# Enable determinism in cudnn operators
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
device_ids = [rank] if is_gpu else None
# Set up the DDP model overlapping with ZeRO
ddp_model_overlap = DDP(
copy.deepcopy(model).to(device),
device_ids=device_ids,
gradient_as_bucket_view=gradient_as_bucket_view
)
if static_graph:
ddp_model_overlap._set_static_graph()
zero_optim = ZeroRedundancyOptimizer(
ddp_model_overlap.parameters(),
optimizer_class=torch.optim.SGD,
overlap_with_ddp=True,
lr=SGD_LR,
momentum=SGD_MOMENTUM,
weight_decay=SGD_WEIGHT_DECAY,
)
ddp_model_overlap.register_comm_hook(
None,
hook_constructor(
allreduce_hook, ddp_model_overlap, zero_optim,
**kwargs,
)
)
# Set up the DDP model with local optimizer
ddp_model_local = DDP(
copy.deepcopy(model).to(device),
device_ids=device_ids,
gradient_as_bucket_view=gradient_as_bucket_view
)
if static_graph:
ddp_model_local._set_static_graph()
local_optim = torch.optim.SGD(
ddp_model_local.parameters(),
lr=SGD_LR,
momentum=SGD_MOMENTUM,
weight_decay=SGD_WEIGHT_DECAY
)
# Check that the parameters match initially
for p1, p2 in zip(
ddp_model_overlap.parameters(),
ddp_model_local.parameters()
):
self.assertEqual(p1, p2)
# Save the parameters to ensure they were updated
init_params_overlap = copy.deepcopy(
list(ddp_model_overlap.parameters())
)
# Ensure that this test runs independently
dist.barrier()
# Run the DDP model overlapping with ZeRO
# NOTE: Overlapping currently requires 2 or 3 warmup iterations
# to ensure DDP buckets have been rebuilt (depending on the
# value of `static_graph`)
num_warmup_inputs = 2 if not static_graph else 3
for input in inputs[:num_warmup_inputs]:
output = ddp_model_overlap(input)
loss = output.sum()
loss.backward()
for input in inputs:
zero_optim.zero_grad()
output = ddp_model_overlap(input)
loss = output.sum()
loss.backward()
# Run the DDP model with local optimizer
for input in inputs:
local_optim.zero_grad()
output = ddp_model_local(input)
loss = output.sum()
loss.backward()
local_optim.step()
dist.barrier()
# Check that the parameters are equal
for p1, p2 in zip(
ddp_model_overlap.parameters(),
ddp_model_local.parameters()
):
self.assertEqual(p1, p2)
# Check that the parameters were updated
self.assertNotEqual(
init_params_overlap, list(ddp_model_overlap.parameters()),
)
# Ensure that this test runs independently
dist.barrier()
# NOTE: The test is skipped if using Windows since functional optimizers
# are not currently supported.
@common_distributed.skip_if_win32()
@common_distributed.requires_nccl()
@common_distributed.skip_if_no_gpu
@common_distributed.skip_if_rocm
@parametrize(
"use_gpu",
[True],
# Add `False` once the Gloo sync issue causing hangs is fixed
# See: https://github.com/pytorch/pytorch/issues/62300
)
@parametrize(
"use_interleaved_hook",
[False, True],
)
@parametrize(
"gradient_as_bucket_view",
[False, True],
)
@parametrize(
"static_graph",
[False, True],
)
@parametrize(
"shard_buckets",
[False, True],
)
def test_ddp_zero_overlap(
self,
use_gpu: bool,
use_interleaved_hook: bool,
gradient_as_bucket_view: bool,
static_graph: bool,
shard_buckets: bool,
):
"""
Check that overlapping DDP with ZeRO using the given method determined
by ``hook_constructor`` and ``shard_buckets`` and using the given ZeRO
and DDP arguments achieves parity with DDP using a local optimizer.
"""
device = torch.device(self.rank) if use_gpu else torch.device("cpu")
backend = _get_backend_for_tests()
self.dist_init(self.rank, self.world_size, backend)
hook_constructor = hook_with_zero_step if not use_interleaved_hook \
else hook_with_zero_step_interleaved
# Disable DDP + ReplicatedTensor since ZeroRedundancyOptimizer
# modifies the model parameters in place.
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
with _ddp_replicated_tensor(False):
self._test_ddp_zero_overlap(
device, hook_constructor, gradient_as_bucket_view, static_graph,
shard_buckets=shard_buckets,
)
instantiate_parametrized_tests(TestZeroRedundancyOptimizerSingleRank)
instantiate_parametrized_tests(TestZeroRedundancyOptimizerDistributed)
if __name__ == "__main__":
# ! unittest should not be used here, else the tests are not properly registered
run_tests()
|
pytorch-master
|
test/distributed/optim/test_zero_redundancy_optimizer.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.nn import Linear
from torch.optim import SGD
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
FSDPTest,
)
from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, run_tests
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestUnevenParamShard(FSDPTest):
def _get_ref_results(self, model, input, my_lr):
with torch.no_grad():
# Compute one iteration local output.
weight = model.weight.T.clone().to(self.rank)
v = torch.Tensor(input[self.rank]).to(self.rank)
ref_forward_output_my_rank = torch.matmul(v, weight)
# Compute one iteration global weight update.
v = torch.Tensor(input[: self.world_size]).to(self.rank)
grad = v.float().sum(0).repeat(weight.shape[0], 1).div(self.world_size)
ref_weight_out = weight - grad.T * my_lr
return ref_forward_output_my_rank, ref_weight_out
@skip_if_lt_x_gpu(2)
def test_one_iteration(self):
"""Test FSDP with uneven divide of parameter shards."""
model = Linear(3, 3, bias=False)
input = torch.rand(8, 3)
my_lr = 0.1
ref_forward_output_my_rank, ref_weight_out = self._get_ref_results(
model, input, my_lr
)
model.to(self.rank)
model = FSDP(model)
optim = SGD(model.parameters(), lr=my_lr)
self.assertTrue(len(input) >= self.world_size)
in_data = torch.Tensor(input[self.rank]).to(self.rank)
out = model(in_data)
out.float().sum().backward()
optim.step()
optim.zero_grad()
with model.summon_full_params(model):
weight_out = model.module.weight.T.clone()
self.assertEqual(ref_forward_output_my_rank, out)
self.assertEqual(ref_weight_out, weight_out)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_uneven.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.nn as nn
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
FSDPInitMode,
FSDPTest,
TransformerWithSharedParams,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer0 = torch.nn.Linear(3, 5)
layer1_modules = [
torch.nn.Linear(5, 4),
torch.nn.Linear(4, 4),
torch.nn.Linear(4, 4),
]
self.layer1 = torch.nn.Sequential(*layer1_modules)
self.layer2 = torch.nn.Linear(4, 2)
self.layer3 = torch.nn.Linear(2, 2)
self.relu = torch.nn.ReLU()
def forward(self, x):
z = self.relu(self.layer0(x))
z = self.relu(self.layer1(z))
z = self.relu(self.layer2(z))
z = self.relu(self.layer3(z))
return z
def get_input(self, device):
return (torch.randn((8, 3)).to(device),)
def get_loss(self, input, output):
return output.sum()
def run_backward(self, loss):
loss.backward()
class IgnoredModule(torch.nn.Module):
def __init__(self, in_dim: int, out_dim: int) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.randn((in_dim, out_dim)))
def forward(self, x):
return x @ self.weight
class ModelWithIgnoredModules(Model):
"""Adds a variable number of :class:`IgnoredModule` to ``self.layer1``."""
def __init__(self, num_ignored: int) -> None:
assert num_ignored >= 0
super().__init__()
layer1_modules = [torch.nn.Linear(5, 4), torch.nn.Linear(4, 4)] + \
[IgnoredModule(4, 4) for _ in range(num_ignored)] + \
[torch.nn.Linear(4, 4)]
self.layer1 = torch.nn.Sequential(*layer1_modules)
class TestFSDPIgnoredModules(FSDPTest):
def _train_model(self, model, optim, num_iters, device=torch.device("cuda")):
for _ in range(num_iters):
inp = model.module.get_input(device)
output = model(*inp)
loss = model.module.get_loss(inp, output).to(device)
model.module.run_backward(loss)
optim.step()
@skip_if_lt_x_gpu(2)
def test_ignored_modules_transformer(self):
"""Tests that ignored modules' parameters are not flattened for a
transformer model with shared parameters."""
# Initialize an FSDP-wrapped transformer model that has FSDP ignore
# the `nn.Transformer` module's parameters
model: nn.Module = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.NO_FSDP,
CUDAInitMode.CUDA_BEFORE,
deterministic=True,
)
wrapped_model = FSDP(
model,
self.process_group,
ignored_modules=[model.transformer],
)
# Check that the wrapped model's flattened parameter does not include
# the ignored transformer module's parameters
nonwrapped_model: nn.Module = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.NO_FSDP,
CUDAInitMode.CUDA_BEFORE,
deterministic=True,
)
total_numel = sum(p.numel() for p in nonwrapped_model.parameters())
ignored_numel = sum(
p.numel() for p in nonwrapped_model.transformer.parameters()
)
nonignored_numel = total_numel - ignored_numel
with FSDP.summon_full_params(wrapped_model):
flat_param_numel = wrapped_model.params[0].numel()
self.assertEqual(flat_param_numel, nonignored_numel)
# Check that we can run a few iterations
optim = torch.optim.Adam(wrapped_model.parameters(), lr=1e-3)
self._train_model(wrapped_model, optim, 3)
@skip_if_lt_x_gpu(2)
def test_ignored_modules_nested(self):
"""Tests that passing a module with nested FSDP modules does not
error and still ignores non-FSDP modules' parameters."""
# Initialize an FSDP-wrapped nested model that first wraps the nested
# sequential's second linear layer (`layer1[1]`) and then wraps the
# overall model while ignoring the nested sequential (`layer1`)
model = Model().cuda()
model.layer1[1] = FSDP(model.layer1[1])
wrapped_model = FSDP(model, ignored_modules=[model.layer1])
# Check that the wrapped model's flattened parameter does not include
# the ignored nested sequential's parameters
nonwrapped_model = Model()
total_numel = sum(p.numel() for p in nonwrapped_model.parameters())
ignored_numel = sum(
p.numel() for p in nonwrapped_model.layer1.parameters()
)
nonignored_numel = total_numel - ignored_numel
with FSDP.summon_full_params(wrapped_model):
flat_param_numel = wrapped_model.params[0].numel()
self.assertEqual(flat_param_numel, nonignored_numel)
# Check that we can run a few iterations
optim = torch.optim.Adam(wrapped_model.parameters(), lr=1e-3)
self._train_model(wrapped_model, optim, 3)
@skip_if_lt_x_gpu(2)
def test_ignored_modules_invalid(self):
"""Tests that passing an FSDP module as an ignored module or the
top-level module itself errors."""
model = Model().cuda()
model.layer1 = FSDP(model.layer1)
# Passing an FSDP module as an ignored module should error
with self.assertRaises(
ValueError,
msg="`ignored_modules` should not include FSDP modules",
):
FSDP(model, ignored_modules=[model.layer1])
with self.assertWarnsRegex(
expected_warning=UserWarning,
expected_regex="Trying to ignore the top-level module passed into "
"the FSDP constructor itself will result in all parameters being "
"ignored and is not supported",
):
FSDP(model, ignored_modules=[model])
@skip_if_lt_x_gpu(2)
@parametrize("pass_ignored_modules_to_root", [False, True])
def test_diff_ignored_modules_across_ranks(self, pass_ignored_modules_to_root: bool):
"""
Tests ignoring different modules across ranks.
Args:
pass_ignored_modules_to_root (bool): If ``False``, does not pass
any ignored modules (including those already ignored in child
FSDP instances) to the root FSDP instance; if ``True``, passes
all ignored modules (representing a superset of the children's
ignored modules) to the root FSDP instance.
"""
# To exercise different `FlatParameter` enumerations across ranks,
# we wrap `layer3` with FSDP, where `layer3` is registered as a module
# after `layer1`, which has the variable number of ignored modules
model = ModelWithIgnoredModules(num_ignored=self.rank + 1).cuda()
layer1_ignored_modules = [
m for m in model.layer1.modules() if isinstance(m, IgnoredModule)
]
model.layer1 = FSDP(model.layer1, ignored_modules=layer1_ignored_modules)
model.layer3 = FSDP(model.layer3)
model_ignored_modules = [
m for m in model.modules() if isinstance(m, IgnoredModule)
] if pass_ignored_modules_to_root else []
wrapped_model = FSDP(model, ignored_modules=model_ignored_modules)
optim = torch.optim.Adam(wrapped_model.parameters(), lr=1e-3)
self._train_model(wrapped_model, optim, 3)
instantiate_parametrized_tests(TestFSDPIgnoredModules)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_ignored_modules.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.nn import Linear, Module
from torch.optim import SGD
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
FSDPTest,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
subtest,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestInput(FSDPTest):
@property
def world_size(self):
return 1
@skip_if_lt_x_gpu(1)
@parametrize("input_cls", [subtest(dict, name="dict"), subtest(list, name="list")])
def test_input_type(self, input_cls):
"""Test FSDP with input being a list or a dict, only single GPU."""
class Model(Module):
def __init__(self):
super().__init__()
self.layer = Linear(4, 4)
def forward(self, input):
if isinstance(input, list):
input = input[0]
else:
assert isinstance(input, dict), input
input = input["in"]
return self.layer(input)
model = FSDP(Model()).cuda()
optim = SGD(model.parameters(), lr=0.1)
for _ in range(5):
in_data = torch.rand(64, 4).cuda()
in_data.requires_grad = True
if input_cls is list:
in_data = [in_data]
else:
self.assertTrue(input_cls is dict)
in_data = {"in": in_data}
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
instantiate_parametrized_tests(TestInput)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_input.py
|
# Owner(s): ["oncall: distributed"]
import random
import sys
from typing import List
import unittest
from collections import OrderedDict
import torch
import torch.nn as nn
from torch import distributed as dist
from torch.distributed.fsdp._utils import _apply_to_tensors
from torch.distributed.utils import _replace_by_prefix
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
TestCase,
instantiate_parametrized_tests,
parametrize,
run_tests,
subtest,
)
from dataclasses import dataclass
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestUtils(TestCase):
@parametrize(
"devices", [["cpu"], ["cuda"], subtest(["cpu", "cuda"], name="cpu_cuda")]
)
def test_apply_to_tensors(self, devices):
if "cuda" in devices and (
not torch.cuda.is_available() or torch.cuda.device_count() < 1
):
raise unittest.SkipTest("Skipped due to lack of GPU")
expected = 0
def get_a_tensor():
"""Return a random tensor on random device."""
dev = random.choice(devices)
shape = random.choice(((1), (2, 3), (4, 5, 6), (7, 8, 9, 10)))
t = torch.rand(shape).to(dev)
nonlocal expected
expected += t.numel()
return t
@dataclass
class SomeDataClass:
some_key: str
some_float: float
some_tensor: List[torch.Tensor]
# create a mixed bag of data.
data = [1, "str"]
data.append({"key1": get_a_tensor(), "key2": {1: get_a_tensor()}, "key3": 3})
data.insert(0, set(["x", get_a_tensor(), get_a_tensor()]))
data.append(([1], get_a_tensor(), (1), [get_a_tensor()], set((1, 2))))
data.append({"abc": SomeDataClass("some_key", 1.0, [get_a_tensor()])})
od = OrderedDict()
od["k"] = "value"
data.append(od)
total = 0
def fn(t):
nonlocal total
total += t.numel()
return t
new_data = _apply_to_tensors(fn, data)
self.assertEqual(total, expected)
for i, v in enumerate(data):
self.assertEqual(type(new_data[i]), type(v))
def test_replace_by_prefix(self):
state_dict = {
"layer.a": torch.tensor(1),
"abc.layer.def": torch.tensor(2),
"layer.b": torch.tensor(3),
}
original_state_dict = state_dict.copy()
_replace_by_prefix(state_dict, "layer.", "module.layer.")
assert state_dict == {
"module.layer.a": torch.tensor(1),
"abc.layer.def": torch.tensor(2),
"module.layer.b": torch.tensor(3),
}
_replace_by_prefix(state_dict, "module.layer.", "layer.")
assert state_dict == original_state_dict
def test_packed_sequence(self):
"""Test to ensure RNN packed sequences are modified correctly."""
rnn = nn.RNN(5, 5)
x = torch.rand((5, 1, 5), dtype=torch.float)
seq_length = torch.tensor([4], dtype=torch.int)
def fill_fn(x):
x.fill_(0)
x = nn.utils.rnn.pack_padded_sequence(x, seq_length)
x, h = rnn(x)
x = _apply_to_tensors(fill_fn, x)
x, _ = nn.utils.rnn.pad_packed_sequence(x)
self.assertEqual(torch.sum(x), 0)
instantiate_parametrized_tests(TestUtils)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_utils.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.nn import Linear, Module
from torch.nn.parallel import DistributedDataParallel
from torch.optim import SGD
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
FSDPTest,
get_full_params,
)
from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, run_tests
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class Model(Module):
def __init__(self, wrap_fsdp):
super().__init__()
# keep everything deterministic for model initialization
torch.manual_seed(0)
self.inner = Linear(4, 4)
if wrap_fsdp:
self.inner = FSDP(self.inner)
self.outer = Linear(4, 5)
def forward(self, x):
# Forward twice.
i = self.inner(x)
j = self.inner(x)
return self.outer(i + j)
class TestMultiForward(FSDPTest):
def _dist_train(self, wrap_fsdp):
# keep everything deterministic for input data
torch.manual_seed(0)
model = Model(wrap_fsdp).cuda()
if wrap_fsdp:
model = FSDP(model)
else:
model = DistributedDataParallel(model, device_ids=[self.rank])
optim = SGD(model.parameters(), lr=0.1)
in_data = torch.rand(64, 4).cuda()
in_data.requires_grad = True
for _ in range(3):
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
if wrap_fsdp:
return get_full_params(model)
return list(model.parameters())
@skip_if_lt_x_gpu(2)
def test_multi_forward(self):
# DDP
ddp_state = self._dist_train(wrap_fsdp=False)
# FSDP
fsdp_state = self._dist_train(wrap_fsdp=True)
self.assertEqual(ddp_state, fsdp_state)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_multiple_forward.py
|
# Owner(s): ["oncall: distributed"]
import sys
from torch import distributed as dist
from torch.distributed.fsdp import CPUOffload
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
FSDPInitMode,
FSDPTest,
NestedWrappedModule,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestPureFP16(FSDPTest):
@skip_if_lt_x_gpu(2)
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=True), CPUOffload(offload_params=False)],
)
def test_pure_fp16(self, cpu_offload: CPUOffload):
"""Tests pure FP16 training, including when the parameter's dtype is
changed after FSDP initialization and before training."""
self._test_fsdp_parity(
NestedWrappedModule,
FSDPInitMode.RECURSIVE,
cuda_init_mode=CUDAInitMode.CUDA_AFTER,
# Run one iteration to avoid NaN without a gradient scaler
num_iters=1,
cpu_offload=cpu_offload,
use_pure_fp16=True,
)
instantiate_parametrized_tests(TestPureFP16)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_pure_fp16.py
|
# Owner(s): ["oncall: distributed"]
import sys
from math import inf
import torch
from torch import distributed as dist
from torch.distributed.fsdp.fully_sharded_data_parallel import (
FullyShardedDataParallel as FSDP,
CPUOffload,
_calc_grad_norm,
)
from torch.nn import utils as nn_utils
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
DeterministicModel,
FSDPTest,
_collect_total_grad_norm_fsdp,
_collect_total_grad_norm_local,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
parametrize,
instantiate_parametrized_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestClipGradNorm(FSDPTest):
def _run_fsdp_one_iteration(self, norm_type, nested_fsdp, cpu_offload):
"""Test FSDP with clip grad norm."""
fsdp_model = DeterministicModel(nested_fsdp, cpu_offload=cpu_offload)
local_model = DeterministicModel(False)
input = torch.rand(14, 2, device=self.rank)
fsdp_model = FSDP(fsdp_model, cpu_offload=cpu_offload)
self.assertTrue(len(input) >= self.world_size)
out = local_model(input[: self.world_size])
out.sum().backward()
in_data = torch.tensor(input[self.rank], device=self.rank)
out_fsdp = fsdp_model(in_data)
out_fsdp.sum().backward()
total_norms_fsdp = _collect_total_grad_norm_fsdp(
fsdp_model, norm_type, self.rank
)
total_norms_local = _collect_total_grad_norm_local(local_model, norm_type)
total_norms_local /= self.world_size
norm_cap = total_norms_fsdp / 2.0
self.assertEqual(total_norms_local, total_norms_fsdp)
fsdp_model.clip_grad_norm_(norm_cap, norm_type=norm_type)
nn_utils.clip_grad_norm_(
local_model.parameters(), norm_cap, norm_type=norm_type
)
total_norms_after_clip_fsdp = _collect_total_grad_norm_fsdp(
fsdp_model, norm_type, self.rank
)
total_norms_after_clip_local = _collect_total_grad_norm_local(
local_model, norm_type
)
self.assertTrue(total_norms_after_clip_fsdp <= norm_cap)
self.assertEqual(total_norms_after_clip_local, total_norms_after_clip_fsdp)
@skip_if_lt_x_gpu(2)
@parametrize("norm_type", [2.0, inf])
@parametrize("nested_fsdp", [True, False])
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=True), CPUOffload(offload_params=False)],
)
def test_fsdp_clip_grad_norm(self, norm_type, nested_fsdp, cpu_offload):
"""Test FSDP with clip grad norm."""
self._run_fsdp_one_iteration(norm_type, nested_fsdp, cpu_offload)
class TestCalcuGradNorm(FSDPTest):
@skip_if_lt_x_gpu(2)
@parametrize("norm_type", [2.0, inf, 1.3, 2.5])
@parametrize("nested_fsdp", [True, False])
def test_fsdp_calc_grad_norm(self, norm_type, nested_fsdp):
"""Test grad norm cal API."""
model = FSDP(DeterministicModel(nested_fsdp))
input = torch.rand(15, 2, device=self.rank)
out = model(input)
out.sum().backward()
total_norm = _calc_grad_norm(model.params_with_grad, norm_type)
total_norm_expected = _collect_total_grad_norm_local(model, norm_type)
self.assertEqual(total_norm, total_norm_expected)
instantiate_parametrized_tests(TestClipGradNorm)
instantiate_parametrized_tests(TestCalcuGradNorm)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_clip_grad_norm.py
|
# Owner(s): ["oncall: distributed"]
import itertools
import math
import sys
from copy import deepcopy
import torch
import torch.nn as nn
from torch import distributed as dist
from torch.distributed.fsdp import CPUOffload
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import MixedPrecision
from torch.distributed.fsdp.flat_param import FlatParamHandle
from torch.distributed.fsdp.wrap import enable_wrap, wrap
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
DeterministicModel,
FSDPInitMode,
FSDPTest,
NestedWrappedModule,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
def _run_test_summon_full_param_writeback(
cls, writeback, modify_outer, *fsdp_args, **fsdp_kwargs
):
with enable_wrap(wrapper_cls=FSDP, *fsdp_args, **fsdp_kwargs):
lin1 = wrap(nn.Linear(5, 5, bias=False).cuda(cls.rank))
lin2 = nn.Linear(5, 3, bias=False).cuda(cls.rank)
model = wrap(nn.Sequential(lin1, lin2))
# set the value
outer_param = model.get_parameter("_fsdp_wrapped_module.flat_param")
inner_param = model.get_parameter(
"_fsdp_wrapped_module._fpw_module.0._fsdp_wrapped_module.flat_param"
)
p = outer_param if modify_outer else inner_param
with torch.no_grad():
# This sets the local shard value
p[0] = cls.rank + 2
with model.summon_full_params(model, writeback=writeback):
with torch.no_grad():
p.copy_(torch.zeros_like(p))
if writeback or cls.world_size == 1:
# When world_size = 1, FSDP does not shard and parameter is not set to
# a local shard, so write is always reflected.
cls.assertEqual(p.cpu()[0], 0)
else:
cls.assertEqual(p.cpu()[0], cls.rank + 2)
class TestSummonFullParamsNoShard(FSDPTest):
@property
def world_size(self):
return 1 # does not shard
@skip_if_lt_x_gpu(2)
@parametrize("writeback", [True, False])
@parametrize("modify_outer", [True, False])
@parametrize("mixed_precision", [True, False])
# TODO: CPUOffload summon + writeback does not
# work when param is not sharded
# (currently when world_size == 1)
def test_summon_full_param_writeback(
self, writeback, modify_outer, mixed_precision
):
mixed_precision = MixedPrecision() if mixed_precision else None
return _run_test_summon_full_param_writeback(
self,
writeback,
modify_outer=modify_outer,
cpu_offload=CPUOffload(offload_params=False),
mixed_precision=mixed_precision,
)
class TestSummonFullParams(FSDPTest):
@property
def world_size(self):
return 2
def get_model_param_count(self, m):
return sum([p.numel() for p in m.parameters()])
# padding ensures that all shards have the same size with the least amount of padding
def get_expected_sharded_size(self, global_size):
return int(math.ceil(global_size / self.world_size))
@skip_if_lt_x_gpu(2)
@parametrize("writeback", [True, False])
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=True), CPUOffload(offload_params=False)],
)
@parametrize("mixed_precision", [True, False])
@parametrize("modify_outer", [True, False])
def test_summon_full_param_writeback(
self, writeback, cpu_offload, mixed_precision, modify_outer
):
mixed_precision = MixedPrecision() if mixed_precision else None
return _run_test_summon_full_param_writeback(
self,
writeback,
modify_outer,
cpu_offload=cpu_offload,
mixed_precision=mixed_precision,
)
@skip_if_lt_x_gpu(2)
@parametrize("mixed_precision", [True, False])
def test_summon_full_param_shard_value(self, mixed_precision):
mixed_precision = MixedPrecision() if mixed_precision else None
raw_model = nn.Linear(10, 11)
raw_model_size = self.get_model_param_count(raw_model)
expected_shard_size = self.get_expected_sharded_size(raw_model_size)
model = FSDP(raw_model.cuda(self.rank), mixed_precision=mixed_precision)
self.assertEqual(expected_shard_size, self.get_model_param_count(model))
# we're assuming a single flattened param
self.assertEqual(1, len(list(model.parameters())))
my_shard = torch.clone(next(model.parameters()))
with model.summon_full_params(model):
self.assertEqual(raw_model_size, self.get_model_param_count(model))
parameters = list(model.parameters())
all_shards = FlatParamHandle.flatten_params(parameters, requires_grad=False)
my_slice = torch.chunk(all_shards, self.world_size)[self.rank]
# shards are padded but the full_param tensor is not
a, b = my_shard[0 : my_slice.numel()], my_slice
self.assertTrue(
torch.equal(my_shard[0 : my_slice.numel()].cpu(), my_slice.cpu())
)
@skip_if_lt_x_gpu(2)
@parametrize("recurse", [True, False])
@parametrize("summon_outer", [True, False])
@parametrize("mixed_precision", [True, False])
def test_summon_full_param_recursive(self, recurse, summon_outer, mixed_precision):
mixed_precision = MixedPrecision() if mixed_precision else None
model = FSDP(
nn.Sequential(
FSDP(nn.Linear(5, 5, bias=False), mixed_precision=mixed_precision),
nn.Linear(5, 3, bias=False),
),
mixed_precision=mixed_precision,
).cuda(self.rank)
global_inner_numel = self.get_model_param_count(nn.Linear(5, 5, bias=False))
global_outer_numel = self.get_model_param_count(nn.Linear(5, 3, bias=False))
shard_inner_numel = int(math.ceil(global_inner_numel / self.world_size))
shard_outer_numel = int(math.ceil(global_outer_numel / self.world_size))
outer_param = model.get_parameter("_fsdp_wrapped_module.flat_param")
inner_param = model.get_parameter(
"_fsdp_wrapped_module._fpw_module.0._fsdp_wrapped_module.flat_param"
)
self.assertEqual(shard_outer_numel, outer_param.numel())
self.assertEqual(shard_inner_numel, inner_param.numel())
model_to_summon = model if summon_outer else model[0]
# outer is summoned if _summon_full_param is called on the outer FSDP module
expected_outer_numel = global_outer_numel if summon_outer else shard_outer_numel
# inner is summoned if _summon_full_param is called with recursion or on the inner FSDP module
expected_inner_numel = (
global_inner_numel if recurse or not summon_outer else shard_inner_numel
)
with model_to_summon.summon_full_params(model_to_summon, recurse=recurse):
self.assertEqual(expected_outer_numel, outer_param.numel())
self.assertEqual(expected_inner_numel, inner_param.numel())
@skip_if_lt_x_gpu(2)
def test_cannot_summon_full_params_from_forward(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Parameter(torch.zeros(5))
def forward(self, fsdp_module):
with fsdp_module.summon_full_params(fsdp_module):
pass
model = FSDP(MyModule()).cuda(self.rank)
with self.assertRaisesRegex(
ValueError, "current state is TrainingState_.FORWARD"
):
model(model)
@skip_if_lt_x_gpu(2)
def test_cannot_summon_full_params_from_backward(self):
model = FSDP(nn.Linear(2, 1)).cuda(self.rank)
output = model(torch.ones(2).cuda(self.rank))
def bad_backwards_hook(tensor):
with model.summon_full_params(model):
pass
return None
self.assertTrue(output.requires_grad)
output.register_hook(bad_backwards_hook)
with self.assertRaisesRegex(
ValueError, "current state is TrainingState_.BACKWARD_PRE"
):
output.backward()
@skip_if_lt_x_gpu(2)
@parametrize("mixed_precision", [True, False])
def test_summon_full_params_respects_reshard_after_forward(self, mixed_precision):
mixed_precision = MixedPrecision() if mixed_precision else None
model = FSDP(
nn.Sequential(
FSDP(nn.Linear(5, 5, bias=False), mixed_precision=mixed_precision),
nn.Linear(5, 3, bias=False),
),
mixed_precision=mixed_precision,
).cuda(self.rank)
outer_param = model.get_parameter("_fsdp_wrapped_module.flat_param")
inner_param = model.get_parameter(
"_fsdp_wrapped_module._fpw_module.0._fsdp_wrapped_module.flat_param"
)
outer_full_param_size = outer_param.numel() * self.world_size
# trigger lazy init
model(torch.zeros(5).cuda(self.rank))
# the root FSDP module keeps all params around
self.assertEqual(
outer_full_param_size, outer_param._full_param_padded.storage().size()
)
self.assertEqual(0, inner_param._full_param_padded.storage().size())
# similarly summon_full_params should have the same behavior
with model.summon_full_params(model):
pass
self.assertEqual(
outer_full_param_size, outer_param._full_param_padded.storage().size()
)
self.assertEqual(0, inner_param._full_param_padded.storage().size())
@skip_if_lt_x_gpu(2)
def test_summon_single_param(self):
model = FSDP(nn.Linear(1, 1, bias=False)).cuda(self.rank)
p = model.get_parameter("_fsdp_wrapped_module.flat_param")
self.assertEqual(1, p.numel())
with torch.no_grad():
# This sets the local shard value
p[0] = self.rank + 2
with model.summon_full_params(model, writeback=True):
self.assertEqual(1, p.numel())
with torch.no_grad():
p.copy_(torch.zeros_like(p))
# most ranks hold no data and wrote to padding so only rank zero will observe the above write
if self.rank == 0:
self.assertEqual(0, p[0])
else:
self.assertEqual(self.rank + 2, p[0])
@skip_if_lt_x_gpu(2)
@parametrize("rank0_only", [True, False])
@parametrize("offload_to_cpu", [True, False])
def test_summon_full_params_equivalence(self, rank0_only, offload_to_cpu):
offload = CPUOffload(offload_params=True)
model = FSDP(
DeterministicModel(wrap_fsdp=True, cpu_offload=offload), cpu_offload=offload
)
local_model = DeterministicModel(wrap_fsdp=False)
params_to_compare = (
[p.clone() for p in model.parameters()]
if rank0_only and self.rank != 0
else list(local_model.parameters())
)
writeback = not rank0_only
with model.summon_full_params(
model,
recurse=True,
rank0_only=rank0_only,
writeback=writeback,
offload_to_cpu=offload_to_cpu,
):
if writeback:
with torch.no_grad():
for p in model.parameters():
p.add_(1)
for p in params_to_compare:
p.add_(1)
# Below sleep causes failures without stream synchronization in
# summon_full_params fix.
torch.cuda._sleep(1000000)
# FSDP param deepcopy() of params has issues
fsdp_params = [p.clone() for p in model.parameters()]
self.assertEqual(fsdp_params, params_to_compare)
# CPU offload is enabled for main API, so we should point back to CPU
for param in model.parameters():
self.assertEqual(param.device, torch.device("cpu"))
@skip_if_lt_x_gpu(2)
def test_summon_from_non_fsdp(self):
class FSDPContainer(nn.Module):
def __init__(self, fsdp_1, fsdp_2, fsdp_3):
super().__init__()
self.fsdp_1 = fsdp_1
self.fsdp_2 = fsdp_2
self.fsdp_3 = fsdp_3
model_fsdp = FSDPContainer(
FSDP(DeterministicModel(wrap_fsdp=True)),
FSDP(DeterministicModel(wrap_fsdp=True)),
DeterministicModel(wrap_fsdp=False),
)
model_no_fsdp = FSDPContainer(
DeterministicModel(wrap_fsdp=False),
DeterministicModel(wrap_fsdp=False),
DeterministicModel(wrap_fsdp=False),
)
params_to_compare = list(model_no_fsdp.parameters())
with FSDP.summon_full_params(model_fsdp):
fsdp_params = [p.clone() for p in model_fsdp.parameters()]
self.assertEqual(params_to_compare, fsdp_params)
@skip_if_lt_x_gpu(2)
@parametrize("rank0_only", [True, False])
@parametrize("offload_to_cpu", [True, False])
@parametrize("mixed_precision", [True, False])
def test_reshard_outside_forward_backward_iteration(
self, rank0_only, offload_to_cpu, mixed_precision
):
mixed_precision = MixedPrecision() if mixed_precision else None
model = FSDP(
nn.Sequential(
FSDP(nn.Linear(5, 5, bias=False), mixed_precision=mixed_precision),
nn.Linear(5, 1, bias=False),
),
mixed_precision=mixed_precision,
).cuda(self.rank)
outer_param = model.get_parameter("_fsdp_wrapped_module.flat_param")
inner_param = model.get_parameter(
"_fsdp_wrapped_module._fpw_module.0._fsdp_wrapped_module.flat_param"
)
outer_full_param_size = outer_param.numel() * self.world_size
# First lets validate our assumption about resharding
output = model(torch.zeros(5).cuda(self.rank))
# the root FSDP module keeps all params around
self.assertEqual(
outer_full_param_size, outer_param._full_param_padded.storage().size()
)
self.assertEqual(0, inner_param._full_param_padded.storage().size())
output.backward()
# we reshard everything after backward() finishes
self.assertEqual(0, outer_param._full_param_padded.storage().size())
self.assertEqual(0, inner_param._full_param_padded.storage().size())
# now lets repeat it with summon done in between
output = model(torch.zeros(5).cuda(self.rank))
self.assertEqual(
outer_full_param_size, outer_param._full_param_padded.storage().size()
)
self.assertEqual(0, inner_param._full_param_padded.storage().size())
with model.summon_full_params(
model,
rank0_only=rank0_only,
writeback=not rank0_only,
offload_to_cpu=offload_to_cpu,
):
pass
self.assertEqual(
outer_full_param_size, outer_param._full_param_padded.storage().size()
)
self.assertEqual(0, inner_param._full_param_padded.storage().size())
output.backward()
with model.summon_full_params(
model,
rank0_only=rank0_only,
writeback=not rank0_only,
offload_to_cpu=offload_to_cpu,
):
pass
self.assertEqual(0, outer_param._full_param_padded.storage().size())
self.assertEqual(0, inner_param._full_param_padded.storage().size())
@skip_if_lt_x_gpu(2)
@parametrize("rank0_only", [True, False])
@parametrize("offload_to_cpu", [True, False])
@parametrize("mixed_precision", [True, False])
def test_params_are_unflattenned(self, rank0_only, offload_to_cpu, mixed_precision):
layer_shape = (10, 12)
model = nn.Linear(*layer_shape, bias=False).cuda(self.rank)
mixed_precision = MixedPrecision() if mixed_precision else None
fsdp_model = FSDP(deepcopy(model), mixed_precision=mixed_precision).cuda(
self.rank
)
def _get_flat_param():
return fsdp_model.get_parameter("_fsdp_wrapped_module.flat_param")
flattened_param = _get_flat_param()
self.assertEqual(layer_shape[0] * layer_shape[1] / 2, flattened_param.numel())
with fsdp_model.summon_full_params(
fsdp_model,
rank0_only=rank0_only,
writeback=not rank0_only,
offload_to_cpu=offload_to_cpu,
):
if self.rank == 0 or not rank0_only:
self.assertEqual(fsdp_model.weight.shape, model.weight.shape)
expected_device = (
torch.device("cpu")
if offload_to_cpu
else torch.device("cuda", torch.cuda.current_device())
)
self.assertTrue(expected_device == fsdp_model.weight.device)
else:
# Nonzero rank with rank0_only maintains original params.
flat_within_ctx = _get_flat_param()
self.assertEqual(flat_within_ctx, flattened_param)
self.assertEqual(
flat_within_ctx.device, torch.device(torch.cuda.current_device())
)
# CPU offload should restore the param device
param = next(fsdp_model.parameters())
self.assertTrue(
param.device == torch.device("cuda", torch.cuda.current_device())
)
@skip_if_lt_x_gpu(2)
@parametrize("rank0_only", [True, False])
@parametrize("offload_to_cpu", [True, False])
@parametrize("mixed_precision", [True, False])
def test_params_count_and_value(
self,
rank0_only: bool,
offload_to_cpu: bool,
mixed_precision: bool,
):
mixed_precision = MixedPrecision() if mixed_precision else None
model = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.NO_FSDP,
CUDAInitMode.CUDA_BEFORE,
deterministic=True,
)
fsdp_model = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_BEFORE,
deterministic=True,
)
dev = (
torch.device("cpu")
if offload_to_cpu
else torch.device("cuda", torch.cuda.current_device())
)
params_to_compare = (
[p.to(dev) for p in model.module.parameters()]
if not rank0_only or self.rank == 0
else list(p.clone() for p in fsdp_model.parameters())
)
with FSDP.summon_full_params(
fsdp_model, rank0_only=rank0_only, writeback=not rank0_only
):
for p1, p2 in itertools.zip_longest(
fsdp_model.parameters(), params_to_compare
):
self.assertEqual(p1, p2)
# CPU offload should restore the param device
param = next(fsdp_model.parameters())
self.assertTrue(
param.device == torch.device("cuda", torch.cuda.current_device())
)
@skip_if_lt_x_gpu(2)
def test_raises_rank0_with_writeback(self):
"""Tests that ``summon_full_params()`` with both ``rank0_only=True``
and ``writeback=True`` raises an error."""
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_BEFORE,
)
with self.assertRaisesRegex(ValueError, "is not supported"):
with FSDP.summon_full_params(
nested_wrapped_module, rank0_only=True, writeback=True
):
pass
@skip_if_lt_x_gpu(2)
@parametrize("prefix", ["", "test_prefix"])
@parametrize("recurse", [False, True])
def test_named_parameters_buffers(self, prefix: str, recurse: bool):
"""Tests that ``named_parameters()`` and ``named_buffers()`` for a
top-level FSDP-wrapped model matches their behavior for the equivalent
non-wrapped model."""
model = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.NO_FSDP,
CUDAInitMode.CUDA_BEFORE,
deterministic=True,
)
model.register_buffer("buffer", torch.ones(1))
# `named_parameters()` and `named_buffers` will contain FSDP prefixes
# if called on a non-FSDP root module
fsdp_model = FSDP(
NestedWrappedModule.init(
self.process_group,
FSDPInitMode.NO_FSDP,
CUDAInitMode.CUDA_BEFORE,
deterministic=True,
),
self.process_group,
)
fsdp_model.register_buffer("buffer", torch.ones(1))
with FSDP.summon_full_params(fsdp_model):
for call in ["named_parameters", "named_buffers"]:
for (n1, p1), (n2, p2) in itertools.zip_longest(
getattr(fsdp_model, call)(prefix=prefix, recurse=recurse),
getattr(model, call)(prefix=prefix, recurse=recurse),
):
self.assertEqual(n1, n2)
self.assertEqual(p1, p2)
instantiate_parametrized_tests(TestSummonFullParams)
instantiate_parametrized_tests(TestSummonFullParamsNoShard)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_summon_full_params.py
|
# Owner(s): ["oncall: distributed"]
import bisect
import sys
from enum import Enum, auto
from typing import Any, Dict, List, Tuple, Type
import torch
from torch import distributed as dist
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
_CHECKPOINT_PREFIX, apply_activation_checkpointing_wrapper
)
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import (
OptimStateKeyType,
StateDictType,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
FSDPInitMode,
FSDPTest,
TransformerWithSharedParams,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class _OSDCommMethod(Enum):
"""Method for communicating the optimizer state dict for internal tests."""
BROADCAST_OBJECT_LIST = auto()
SCATTER_FULL_OSD = auto()
class Bias(torch.nn.Module):
"""This module applies a 1D additive bias with dimension ``dim``."""
def __init__(self, dim: int) -> None:
super().__init__()
assert dim > 0
torch.manual_seed(0)
self.bias = torch.nn.Parameter(torch.randn((dim,)))
def forward(self, x):
return x + self.bias
class BlockA(torch.nn.Module):
"""
Used to define interesting nested structure for FSDP wrapping.
BlockA
Bias0
bias
weight
Bias1
bias
"""
def __init__(self, in_dim: int, out_dim: int) -> None:
super().__init__()
assert all(v > 0 for v in (in_dim, out_dim))
torch.manual_seed(0)
self.bias_module0 = Bias(out_dim)
self.weight = torch.nn.Parameter(torch.randn((in_dim, out_dim)))
self.bias_module1 = Bias(out_dim)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = x @ self.weight
x = self.bias_module0(x)
x = self.relu(x) # ensure biases have different gradients
x = self.bias_module1(x)
return x
class BlockB(torch.nn.Module):
"""
Used to define interesting nested structure for FSDP wrapping.
BlockB
weight
Bias
bias
Bias
bias
"""
def __init__(self, in_dim: int, out_dim: int) -> None:
super().__init__()
assert all(v > 0 for v in (in_dim, out_dim))
torch.manual_seed(0)
self.weight = torch.nn.Parameter(torch.randn((in_dim, out_dim)))
self.bias_module0 = Bias(out_dim)
self.bias_module1 = Bias(out_dim)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = x @ self.weight
x = self.bias_module0(x)
x = self.relu(x) # ensure biases have different gradients
x = self.bias_module1(x)
return x
class NestedModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.block0 = BlockB(5, 7)
self.block1 = BlockB(7, 7)
self.bias = torch.nn.Parameter(torch.randn((5,)))
self.block2 = torch.nn.Sequential(
BlockA(7, 9),
BlockA(9, 9),
BlockB(9, 5),
)
self.relu = torch.nn.ReLU()
def forward(self, x) -> torch.Tensor:
x = self.relu(self.block0(x))
x = self.relu(self.block1(x))
x = self.relu(self.block2(x))
x = x + self.bias
return x
def get_input(self, device):
BATCH_SIZE = 8
return (torch.randn((BATCH_SIZE, 5)).to(device),)
def get_loss(self, inp, output):
return output.sum()
def run_backward(self, loss):
loss.backward()
@staticmethod
def wrap(model, group=None, ignore_modules: bool = False) -> torch.nn.Module:
# Flatten Bias0; then flatten weight and Bias1 together into `block1`
model.block1.bias_module0 = FSDP(
model.block1.bias_module0, process_group=group,
)
model.block1 = FSDP(model.block1, process_group=group)
# Flatten Bias0; flatten Bias1; then flatten weight into `block2[1]`
model.block2[1].bias_module0 = FSDP(
model.block2[1].bias_module0, process_group=group,
)
model.block2[1].bias_module1 = FSDP(
model.block2[1].bias_module1, process_group=group,
)
model.block2[1] = FSDP(model.block2[1], process_group=group)
# Flatten weight, Bias, bias into `block2[2]`
ignored_modules = [model.block2[2].bias_module0] if ignore_modules else None
model.block2[2] = FSDP(
model.block2[2], process_group=group, ignored_modules=ignored_modules,
)
return model
@staticmethod
def wrap_alt(model, group=None) -> torch.nn.Module:
model.block0.bias_module0 = FSDP(
model.block0.bias_module0, process_group=group,
)
model.block0 = FSDP(model.block0, process_group=group)
return model
@staticmethod
def wrap_with_unmanaged_params(
model,
add_to_fsdp_module: bool,
group=None,
) -> Tuple[torch.nn.Module, List[torch.nn.Parameter]]:
"""Registers unmanaged parameters before wrapping with :meth:`wrap`."""
device = next(model.parameters()).device
unmanaged_param = torch.nn.Parameter(torch.randn(5, 5, device=device))
# Either register the parameter to a module to be wrapped with FSDP
# (`model.block2[2]`) or a module not to be wrapped with FSDP (`model`)
register_module = model.block2[2] if add_to_fsdp_module else model
register_module.register_parameter(
"unmanaged_param", unmanaged_param,
)
# For simplicity, we only add a single unmanaged parameter, but should
# be easy to generalize if needed
return NestedModel.wrap(model, group), [unmanaged_param]
@staticmethod
def add_unmanaged_param_entry(osd, unmanaged_param, step) -> None:
"""Adds an entry for the unmanaged parameter ``unmanaged_param``
assuming Adam optimizer and a single parameter group."""
# The unmanaged parameters should be passed to this method in
# `model.parameters()` order since their parameter IDs will be assigned
# in order of the skipped IDs
# Assign a parameter ID to the unmanaged parameter
unmanaged_param_id = -1
param_ids = osd["param_groups"][0]["params"]
for i in range(1, len(param_ids)):
diff = param_ids[i] - param_ids[i - 1]
if diff != 1:
assert diff > 1, f"Invalid IDs: {param_ids[i - 1]} {param_ids[i]}"
unmanaged_param_id = param_ids[i - 1] + 1
break
if unmanaged_param_id == -1:
unmanaged_param_id = len(param_ids) # last ID skipped
assert unmanaged_param_id >= 0, "One parameter ID should be skipped"
# Add a state entry for the unmanaged parameter
state_device = next(iter(next(iter(osd["state"].values())).values())).device
osd["state"][unmanaged_param_id] = {
"step": torch.tensor(float(step), device=state_device),
"exp_avg": torch.randn(unmanaged_param.shape, device=state_device),
"exp_avg_sq": torch.randn(unmanaged_param.shape, device=state_device),
}
# Insert the ID into the parameter group in order
bisect.insort(osd["param_groups"][0]["params"], unmanaged_param_id)
# NOTE: We exclude `self.bias` from either parameter group to test the
# case where the optimizer input does not include all model parameters
def param_group0(self) -> List[torch.nn.Parameter]:
# Use `block1`'s parameters for the first parameter group to deviate
# from the `model.parameters()` order
return list(self.block1.parameters())
def param_group1(self) -> List[torch.nn.Parameter]:
# Deviate from the `model.parameters()` order further by rearranging
# `block2`'s parameters to be before `block0`'s parameters
return list(self.block2.parameters()) + \
list(self.block0.parameters())
class TestFSDPOptimState(FSDPTest):
def _init_nested_model(
self,
wrap: bool,
wrap_alt: bool = False, # ignored if `wrap=False`
device: torch.device = torch.device("cuda"),
group=None,
optim_class: Type[torch.optim.Optimizer] = torch.optim.Adam,
use_multiple_param_groups: bool = False,
use_diff_optim_inputs: bool = False,
):
model = NestedModel().to(device)
if wrap:
model = NestedModel.wrap_alt(model, group) if wrap_alt \
else NestedModel.wrap(model, group)
if not use_multiple_param_groups:
optim_input = list(model.parameters())
else:
optim_input = [
{"params": model.param_group0()},
{"params": model.param_group1(), "weight_decay": 0.9}
]
# Use a reversed parameter order for the optimizer input on odd ranks
if use_diff_optim_inputs and self.rank % 2 == 1:
if isinstance(optim_input[0], dict):
for param_group in optim_input:
param_group["params"] = list(reversed(param_group["params"]))
else:
optim_input = list(reversed(optim_input))
optim = optim_class(optim_input, lr=0.01)
return model, optim, optim_input
def _init_transformer_model(
self,
wrap: bool,
device: torch.device = torch.device("cuda"),
group=None,
optim_class: Type[torch.optim.Optimizer] = torch.optim.Adam,
use_multiple_param_groups: bool = False,
use_diff_optim_inputs: bool = False,
):
if use_multiple_param_groups or use_diff_optim_inputs:
# Keep these as arguments for parity with `_init_nested_model()`;
# these settings are not implemented since the transformer is
# wrapped with FSDP at the top-level, which means that there is
# only a single flattened parameter, making these booleans vacuous
raise NotImplementedError()
if group is None:
group = dist.distributed_c10d._get_default_group()
model = TransformerWithSharedParams.init(
group,
FSDPInitMode.RECURSIVE if wrap else FSDPInitMode.NO_FSDP,
CUDAInitMode.CUDA_BEFORE,
deterministic=True,
)
optim = optim_class(model.parameters(), lr=0.01)
return model, optim, None
def _step_model(
self,
model: torch.nn.Module,
optim: torch.optim.Optimizer,
device: torch.device = torch.device("cuda"),
num_iters: int = 1,
) -> List[float]:
"""Performs a forward pass, backward pass, and optimizer step
``num_iters``-many times, and returns the per-iteration losses."""
torch.manual_seed(0) # set seed for determinism
losses = []
module = model.module if hasattr(model, "module") else model
for _ in range(num_iters):
optim.zero_grad()
inp = module.get_input(device)
output = model(*inp)
loss = module.get_loss(inp, output).to(device)
losses.append(loss.item())
module.run_backward(loss)
optim.step()
return losses
def _broadcast_full_osd(self, full_osd: Dict[str, Any], group=None):
"""Broadcasts the full optimizer state dict in place of using
``torch.save()`` and ``torch.load()`` so that all ranks can have it."""
obj_list = [full_osd]
dist.broadcast_object_list(
obj_list, src=0, group=group,
)
full_osd = obj_list[0]
return full_osd
def _are_equal_states(
self,
state1: Dict[str, Any],
state2: Dict[str, Any],
) -> bool:
"""Checks if ``state1`` and ``state2`` contain the same mappings."""
if set(state1.keys()) != set(state2.keys()):
return False
for state_name, value1 in state1.items():
value2 = state2[state_name]
if type(value1) != type(value2):
return False
if torch.is_tensor(value1): # tensor state
assert torch.is_tensor(value2)
# Check the values on CPU to be device-agnostic
value1 = value1.cpu()
value2 = value2.cpu()
if value1.shape != value2.shape or \
not torch.all(torch.isclose(value1, value2)):
return False
else: # non-tensor state
if value1 != value2:
return False
return True
def _check_same_state(
self,
full_osd,
ref_osd,
check_same_param_keys: bool,
):
"""Checks that ``full_osd`` and ``ref_osd`` have the same "state" part.
If ``check_same_param_keys=True``, then checks that the parameter keys
match (e.g. when both should be parameter names), and does not check
the parameter keys otherwise."""
assert "state" in ref_osd
self.assertTrue("state" in full_osd)
ref_osd_state = ref_osd["state"]
full_osd_state = full_osd["state"]
if check_same_param_keys:
# Check parameter keys are the same first for earlier erroring
ref_osd_param_ids = set(ref_osd_state.keys())
full_osd_param_ids = set(full_osd_state.keys())
self.assertEqual(ref_osd_param_ids, full_osd_param_ids)
# Check state values are the same
for param_id, param_state in full_osd_state.items():
for state_name, value in param_state.items():
ref_value = ref_osd_state[param_id][state_name]
self.assertEqual(value, ref_value)
return
# Otherwise, only require the parameter keys to be isomorphic (e.g.
# between IDs and names)
ref_osd_states = list(ref_osd["state"].values())
full_osd_states = list(full_osd["state"].values())
self.assertEqual(len(ref_osd_states), len(full_osd_states))
# Use brute-force quadratic-time comparison since it is hard to
# hash a tensor by value instead of by object
for full_osd_state in full_osd_states:
# Check for at least one match (may be > 1 in toy edge cases, e.g.
# multiple biases); nonetheless, each having >= 1 match and the two
# lists having equal length imply that the list contents are equal
self.assertTrue(any(
self._are_equal_states(full_osd_state, ref_osd_state)
for ref_osd_state in ref_osd_states
))
def _check_same_param_groups(
self,
full_osd,
ref_osd,
check_same_param_keys: bool,
):
"""Checks that ``full_osd`` and ``ref_osd`` have the same
"param_groups" part. If ``check_same_param_keys=True`, then checks that
the parameter keys match (e.g. when both should be parameter names),
and does not check the parameter keys otherwise."""
assert "param_groups" in ref_osd
self.assertTrue("param_groups" in full_osd)
ref_osd_param_groups = ref_osd["param_groups"]
full_osd_param_groups = full_osd["param_groups"]
self.assertTrue(len(full_osd_param_groups), len(ref_osd_param_groups))
for full_osd_pg, ref_osd_pg in zip(
full_osd_param_groups, ref_osd_param_groups,
):
self.assertEqual(
set(full_osd_pg.keys()), set(ref_osd_pg.keys()),
)
for name, full_osd_value in full_osd_pg.items():
if name == "params" and not check_same_param_keys:
continue
self.assertEqual(full_osd_value, ref_osd_pg[name])
def _check_state_device(self, osd: Dict[str, Any], on_gpu: bool):
"""Checks that all tensors in ``osd["state"]`` are on GPU if
``on_gpu=True`` and on CPU if ``on_gpu=False``."""
for param_state in osd["state"].values():
for value in param_state.values():
if torch.is_tensor(value) and value.dim() > 0:
if on_gpu:
self.assertTrue(value.is_cuda)
else:
self.assertFalse(value.is_cuda)
@skip_if_lt_x_gpu(2)
@parametrize("use_multiple_param_groups", [False, True])
@parametrize("rank0_only", [False, True])
@parametrize("use_diff_optim_inputs", [False, True])
def test_full_optim_state_dict_nested(
self,
use_multiple_param_groups: bool,
rank0_only: bool,
use_diff_optim_inputs: bool,
) -> None:
"""
Tests :meth:`full_optim_state_dict` by comparing the returned dict for
an FSDP-wrapped model with that of an equivalent non-wrapped model.
The test checks the equivalence excluding the parameter keys since the
FSDP and normal optimizer state dicts key by names and IDs,
respectively. This means that the test can pass even if parameter keys
are incorrectly mapped to values. Their correct mapping is tested in
other tests that exercise the save/load workflow.
"""
NUM_ITERS = 3
model1, optim1, optim_input = self._init_nested_model(
wrap=True, use_multiple_param_groups=use_multiple_param_groups,
use_diff_optim_inputs=use_diff_optim_inputs,
)
losses1 = self._step_model(model1, optim1, num_iters=NUM_ITERS)
full_osd = FSDP.full_optim_state_dict(
model1, optim1, optim_input, rank0_only=rank0_only,
)
# Non-target ranks get an empty state dict
if rank0_only and self.rank != 0:
self.assertEqual(len(full_osd), 0)
return
model2, optim2, _ = self._init_nested_model(
wrap=False, use_multiple_param_groups=use_multiple_param_groups,
use_diff_optim_inputs=use_diff_optim_inputs,
)
losses2 = self._step_model(model2, optim2, num_iters=NUM_ITERS)
ref_osd = optim2.state_dict()
# Check the losses to eliminate model drift as a source of error
for i, (l1, l2) in enumerate(zip(losses1, losses2)):
assert l1 == l2, f"Losses differ on iter {i}: {l1:.5f} {l2:.5f}"
# Do not check the parameter keys since the full optimizer state dict
# uses parameter names, while the non-wrapped equivalent uses parameter
# IDs
check_same_param_keys = False
self._check_same_param_groups(
full_osd, ref_osd, check_same_param_keys=check_same_param_keys,
)
self._check_same_state(
full_osd, ref_osd, check_same_param_keys=check_same_param_keys,
)
@skip_if_lt_x_gpu(2)
def test_full_optim_state_dict_keys(self):
"""Tests that the parameter keys returned by
:meth:`full_optim_state_dict` match those of :meth:`state_dict` with
full ``state_dict_type`` for a non-FSDP-root model with nested FSDP
instances and ignored modules."""
device = torch.device("cuda")
model = NestedModel().to(device)
wrapped_model = NestedModel.wrap(model, ignore_modules=True)
# Add checkpointing to ensure optim_state_dict and state_dict strip out
# checkpointing prefixes.
apply_activation_checkpointing_wrapper(
model,
check_fn=lambda module: isinstance(module, torch.nn.Sequential)
)
optim = torch.optim.Adam(wrapped_model.parameters(), lr=1e-3)
self._step_model(model, optim, device)
optim_state_dict = FSDP.full_optim_state_dict(wrapped_model, optim, rank0_only=False)
with FSDP.state_dict_type(wrapped_model, StateDictType.FULL_STATE_DICT):
state_dict = wrapped_model.state_dict()
self.assertEqual(optim_state_dict["state"].keys(), state_dict.keys())
# Check that checkpointing prefix was indeed stripped.
for key in optim_state_dict["state"]:
self.assertNotIn(_CHECKPOINT_PREFIX, key)
@skip_if_lt_x_gpu(2)
def test_full_optim_state_dict_nested_invalid(self):
"""Tests that :meth:`full_optim_state_dict` raises an error when
nonzero ranks are missing the optimizer state for parameters on rank
0."""
device = torch.device("cuda")
model = NestedModel.wrap(NestedModel().to(device), None)
optim_input = list(model.parameters())
if self.rank != 0:
# Exclude a parameter so that nonzero ranks are missing state
optim_input = optim_input[:-1]
optim = torch.optim.Adam(optim_input, lr=1e-3)
self._step_model(model, optim, num_iters=3)
error_regex = (
"FSDP currently requires each rank to have at least the "
"optimizer states needed by rank 0's optimizer but some ranks "
"are missing some of those states"
)
with self.assertRaisesRegex(RuntimeError, error_regex):
FSDP.full_optim_state_dict(
model, optim, optim_input,
)
@skip_if_lt_x_gpu(2)
@parametrize("use_multiple_param_groups", [False, True])
@parametrize("wrap_alt", [False, True])
@parametrize("use_diff_optim_inputs", [False, True])
def test_shard_full_optim_state_dict_nested(
self,
use_multiple_param_groups: bool,
wrap_alt: bool,
use_diff_optim_inputs: bool,
):
"""Tests :meth:`shard_full_optim_state_dict` for a non-FSDP-root model
with nested FSDP instances."""
self._test_shard_full_optim_state(
model_class="nested",
use_multiple_param_groups=use_multiple_param_groups,
halve_world_size=False,
osd_comm_method=_OSDCommMethod.BROADCAST_OBJECT_LIST,
use_diff_optim_inputs=use_diff_optim_inputs,
wrap_alt=wrap_alt,
)
@skip_if_lt_x_gpu(2)
def test_shard_full_optim_state_dict_nested_halve_world_size(self):
"""Tests :meth:`shard_full_optim_state_dict` for a non-FSDP-root model
with nested FSDP instances when loading into a new process group with
halved world size."""
# To save CI costs, we test with the "harder" settings:
use_multiple_param_groups = True
use_diff_optim_inputs = True
wrap_alt = True
self._test_shard_full_optim_state(
model_class="nested",
use_multiple_param_groups=use_multiple_param_groups,
halve_world_size=True,
osd_comm_method=_OSDCommMethod.BROADCAST_OBJECT_LIST,
use_diff_optim_inputs=use_diff_optim_inputs,
wrap_alt=wrap_alt,
)
@skip_if_lt_x_gpu(2)
def test_shard_full_optim_state_dict_transformer(self) -> None:
"""Tests :meth:`shard_full_optim_state_dict` for an FSDP-root
transformer model with shared parameters."""
self._test_shard_full_optim_state(
model_class="transformer", use_multiple_param_groups=False,
halve_world_size=True,
osd_comm_method=_OSDCommMethod.BROADCAST_OBJECT_LIST,
use_diff_optim_inputs=False,
)
@skip_if_lt_x_gpu(2)
@parametrize("use_multiple_param_groups", [False, True])
@parametrize("wrap_alt", [False, True])
@parametrize("use_diff_optim_inputs", [False, True])
def test_scatter_full_optim_state_dict_nested(
self,
use_multiple_param_groups: bool,
wrap_alt: bool,
use_diff_optim_inputs: bool,
):
"""Tests :meth:`scatter_full_optim_state_dict` for a non-FSDP-root
model with nested FSDP instances."""
self._test_shard_full_optim_state(
model_class="nested",
use_multiple_param_groups=use_multiple_param_groups,
halve_world_size=False,
osd_comm_method=_OSDCommMethod.SCATTER_FULL_OSD,
use_diff_optim_inputs=use_diff_optim_inputs,
wrap_alt=wrap_alt,
)
@skip_if_lt_x_gpu(2)
def test_scatter_full_optim_state_dict_nested_halve_world_size(self):
"""Tests :meth:`scatter_full_optim_state_dict` for a non-FSDP-root
model with nested FSDP instances when loading into a new process group
with halved world size."""
# To save CI costs, we test with the "harder" settings:
use_multiple_param_groups = True
use_diff_optim_inputs = True
wrap_alt = True
self._test_shard_full_optim_state(
model_class="nested",
use_multiple_param_groups=use_multiple_param_groups,
halve_world_size=True,
osd_comm_method=_OSDCommMethod.SCATTER_FULL_OSD,
use_diff_optim_inputs=use_diff_optim_inputs,
wrap_alt=wrap_alt,
)
@skip_if_lt_x_gpu(2)
def test_scatter_full_optim_state_dict_transformer(self) -> None:
"""Tests :meth:`scatter_full_optim_state_dict` for an FSDP-root
transformer model with shared parameters."""
self._test_shard_full_optim_state(
model_class="transformer", use_multiple_param_groups=False,
halve_world_size=True,
osd_comm_method=_OSDCommMethod.SCATTER_FULL_OSD,
use_diff_optim_inputs=False,
)
def _test_shard_full_optim_state(
self,
model_class: str,
use_multiple_param_groups: bool,
halve_world_size: bool,
osd_comm_method: _OSDCommMethod,
use_diff_optim_inputs: bool,
**new_model_kwargs,
):
"""
(1) Runs a model with full world size for K iterations to generate a
full optimizer state dict;
(2) initializes a model with halved world size and possibly different
FSDP wrapping scheme (based on ``new_model_kwargs``);
(3) shards the full optimizer state dict from (1) according to the
halved-world-size model;
(4) runs the halved-world-size model for K iterations; and
(5) checks that the sharded optimizer state dict from (3) matches the
halved-world-size model's local optimizer state dict, meaning that the
former could have equivalently been loaded into the local optimizer.
"""
NUM_ITERS = 3
initializer = self._init_nested_model if model_class == "nested" \
else self._init_transformer_model if model_class == "transformer" \
else None
assert initializer is not None, f"Unsupported model: {model_class}"
# First, run a wrapped model with full world size for a few iterations
model1, optim1, optim_input1 = initializer(
wrap=True, use_multiple_param_groups=use_multiple_param_groups,
)
self._step_model(model1, optim1, num_iters=NUM_ITERS)
full_osd1 = FSDP.full_optim_state_dict(model1, optim1, optim_input1)
if halve_world_size:
# Create a new process group with halved world size
new_group_ranks = [r for r in range(self.world_size) if r % 2 == 0]
new_group = dist.new_group(ranks=new_group_ranks)
if self.rank not in new_group_ranks:
return
else:
# Continue using the same group and hence world size
new_group = dist.distributed_c10d._get_default_group()
# Second, run a wrapped model with (possibly) halved world size and
# (possibly) differing `optim_input` across ranks
model2, optim2, optim_input2 = initializer(
wrap=True, group=new_group,
use_multiple_param_groups=use_multiple_param_groups,
use_diff_optim_inputs=use_diff_optim_inputs,
**new_model_kwargs, # specify `wrap_alt` to change wrapping
)
self._step_model(model2, optim2, num_iters=NUM_ITERS)
full_osd2 = FSDP.full_optim_state_dict(model2, optim2, optim_input2, group=new_group)
# Compute two sharded optim state dicts: (1) for the first model
# according to the second model and (2) for the second model according
# to the second model
if osd_comm_method == _OSDCommMethod.BROADCAST_OBJECT_LIST:
full_osd1 = self._broadcast_full_osd(full_osd1, group=new_group)
sharded_osd1 = FSDP.shard_full_optim_state_dict(
full_osd1, model2, optim_input2,
)
full_osd2 = self._broadcast_full_osd(full_osd2, group=new_group)
sharded_osd2 = FSDP.shard_full_optim_state_dict(
full_osd2, model2, optim_input2,
)
elif osd_comm_method == _OSDCommMethod.SCATTER_FULL_OSD:
sharded_osd1 = FSDP.scatter_full_optim_state_dict(
full_osd1 if self.rank == 0 else None, model2, optim_input2,
group=new_group,
)
sharded_osd2 = FSDP.scatter_full_optim_state_dict(
full_osd2 if self.rank == 0 else None, model2, optim_input2,
group=new_group,
)
self._check_state_device(sharded_osd1, on_gpu=True)
self._check_state_device(sharded_osd2, on_gpu=True)
# As a sanity check, check that sharding the second model's full
# optimizer state dict according to itself is equivalent to its local
# optimizer's state dict
local_osd2 = optim2.state_dict()
check_same_param_keys = True # should all have matching parameter IDs
self._check_same_param_groups(
sharded_osd2, local_osd2,
check_same_param_keys=check_same_param_keys,
)
self._check_same_state(
sharded_osd2, local_osd2,
check_same_param_keys=check_same_param_keys,
)
# Check that sharding the first model's full optimizer state dict
# according to the second model is equivalent to the second model's
# local optimizer state dict
self._check_same_param_groups(
sharded_osd1, local_osd2,
check_same_param_keys=check_same_param_keys,
)
self._check_same_state(
sharded_osd1, local_osd2,
check_same_param_keys=check_same_param_keys,
)
# As a sanity check, check that we can load and run a few iterations
optim2.load_state_dict(sharded_osd1)
self._step_model(model2, optim2, num_iters=NUM_ITERS)
@skip_if_lt_x_gpu(2)
@parametrize("add_to_fsdp_module", [False, True])
def test_shard_full_optim_state_dict_unmanaged_params(
self,
add_to_fsdp_module: bool,
):
"""
Tests :meth:`shard_full_optim_state_dict` when there are unmanaged
parameters.
- If ``add_to_fsdp_module=True``, then the unmanaged parameters are
added to a module to be wrapped with FSDP, in which case there should
be an error since we require that all unflattened parameter
comprising a flattened parameter have the same scalar state (e.g.
Adam "step") but the added parameter is missing its entry.
- If ``add_to_fsdp_module=False``, then the unmanaged parameters are
added to a module not to be wrapped with FSDP, in which case there
should be no error (emulating model parallel use cases where some
parameters may be managed externally to FSDP).
We do not separately test unmanaged parameters for
:meth:`scatter_full_optim_state_dict` to save CI cost since it calls
into the same subroutine :meth:`_flatten_full_optim_state_dict`.
"""
NUM_ITERS = 1
# Create a normal wrapped model
model, optim, optim_input = self._init_nested_model(wrap=True)
self._step_model(model, optim, num_iters=NUM_ITERS)
full_osd = FSDP.full_optim_state_dict(
model, optim, optim_input, rank0_only=False,
) # save on all ranks to avoid having to broadcast from rank 0
# Create a new model with the same structure but additional unmanaged
# parameters, representing the model for which we want to load
device = torch.device("cuda")
model = NestedModel().to(device)
model, unmanaged_params = NestedModel.wrap_with_unmanaged_params(
model, add_to_fsdp_module,
)
optim_input = list(model.parameters())
if add_to_fsdp_module:
# If we add the unmanaged parameters to a module wrapped with FSDP,
# then the flattened parameter will be comprised of some
# unflattened parameters with zero-dimensional tensor state (i.e.
# Adam "step") and others without (i.e. the unmanaged parameters),
# which triggers an error that we have to ensure correctness
error_prefix = "^(All unflattened parameters comprising a " \
"single flattened parameter must have scalar state with the " \
"same value and dtype)"
with self.assertRaisesRegex(ValueError, error_prefix):
FSDP.shard_full_optim_state_dict(
full_osd, model, optim_input,
)
else:
# If we add the unmanaged parameters to a module not wrapped with
# FSDP, then we simply ignore them without erroring to enable
# model parallelism use cases, where some parameters are managed
# externally to FSDP
sharded_osd = FSDP.shard_full_optim_state_dict(
full_osd, model, optim_input,
)
# Add entries for the unmanaged parameters to be able to load
for unmanaged_param in unmanaged_params:
NestedModel.add_unmanaged_param_entry(
sharded_osd, unmanaged_param, NUM_ITERS,
)
# Check that we can load the optimizer state dict
optim = torch.optim.Adam(optim_input, lr=1e-3)
optim.load_state_dict(sharded_osd)
@skip_if_lt_x_gpu(2)
@parametrize("use_multiple_param_groups", [False, True])
def test_rekey_optim_state_dict_to_ids(
self,
use_multiple_param_groups: bool,
):
"""Tests :meth:`rekey_optim_state_dict` with the new keys being
parameter IDs by checking that a wrapped model (i.e. with FSDP modules)
can rekey its optimizer state dict to match that of an equivalent
non-wrapped model (i.e. without FSDP modules)."""
NUM_ITERS = 3
# Run a wrapped model for a few iterations
model1, optim1, optim_input1 = self._init_nested_model(
wrap=True, use_multiple_param_groups=use_multiple_param_groups,
)
self._step_model(model1, optim1, num_iters=NUM_ITERS)
full_osd = FSDP.full_optim_state_dict(model1, optim1, optim_input1)
# Broadcast instead of `torch.save()`/`torch.load()` so that all ranks
# have the full state dict
full_osd = self._broadcast_full_osd(full_osd)
# Run a non-wrapped model for a few iterations
model2, optim2, optim_input2 = self._init_nested_model(
wrap=False, use_multiple_param_groups=use_multiple_param_groups,
)
self._step_model(model2, optim2, num_iters=NUM_ITERS)
# Re-key the wrapped model's optimizer state dict using parameter IDs
# according to the non-wrapped model
rekeyed_osd = FSDP.rekey_optim_state_dict(
full_osd, OptimStateKeyType.PARAM_ID, model2, optim_input2,
)
# Check that the re-keyed dict and actual dict are the same
osd = optim2.state_dict()
check_same_param_keys = True
self._check_same_param_groups(
rekeyed_osd, osd, check_same_param_keys=check_same_param_keys,
)
self._check_same_state(
rekeyed_osd, osd, check_same_param_keys=check_same_param_keys,
)
# As a sanity check, check that we can load and run a few iterations
optim2.load_state_dict(rekeyed_osd)
self._step_model(model2, optim2, num_iters=NUM_ITERS)
@skip_if_lt_x_gpu(2)
@parametrize("use_multiple_param_groups", [False])
def test_rekey_optim_state_dict_to_names(
self,
use_multiple_param_groups: bool,
):
"""Tests :meth:`rekey_optim_state_dict` with the new keys being
parameter names by checking that a non-wrapped model (i.e. without FSDP
modules) can rekey its optimizer state dict to match the expected
output of :meth:`full_optim_state_dict`, hence be sharded using
:meth:`shard_full_optim_state_dict`, and finally match the per-rank
optimizer state dict of a wrapped model (i.e. with FSDP modules)."""
NUM_ITERS = 3
# Run a wrapped model for a few iterations
model1, optim1, optim_input1 = self._init_nested_model(
wrap=True, use_multiple_param_groups=use_multiple_param_groups,
)
self._step_model(model1, optim1, num_iters=NUM_ITERS)
# Run a non-wrapped model for a few iterations
model2, optim2, optim_input2 = self._init_nested_model(
wrap=False, use_multiple_param_groups=use_multiple_param_groups,
)
self._step_model(model2, optim2, num_iters=NUM_ITERS)
# Re-key the non-wrapped model's optimizer state dict using parameter
# names (still according to itself)
osd2 = optim2.state_dict()
rekeyed_osd = FSDP.rekey_optim_state_dict(
osd2, OptimStateKeyType.PARAM_NAME, model2, optim_input2,
)
# Shard the non-wrapped model's re-keyed optimizer state dict, which
# maps back to (flattened) parameter IDs
sharded_osd = FSDP.shard_full_optim_state_dict(
rekeyed_osd, model1, optim_input1,
)
# Check that this sharded optimizer state dict matches the wrapped
# model's per-rank optimizer state dict
osd1 = optim1.state_dict()
check_same_param_keys = True
self._check_same_param_groups(
sharded_osd, osd1, check_same_param_keys=check_same_param_keys,
)
self._check_same_state(
sharded_osd, osd1, check_same_param_keys=check_same_param_keys,
)
# As a sanity check, check that we can load and run a few iterations
optim1.load_state_dict(sharded_osd)
self._step_model(model1, optim1, num_iters=NUM_ITERS)
instantiate_parametrized_tests(TestFSDPOptimState)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_optim_state.py
|
# Owner(s): ["oncall: distributed"]
import sys
import warnings
from contextlib import suppress
import torch
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class Model(torch.nn.Module):
"""
Model that supports two computation paths: `layer0` -> `layer1` and
`layer0` -> `layer2`. Notably, both `layer1` and `layer2` have 36 elements
when flattened, which means that their corresponding all-gathers and
reduce-scatters may be silently matched if we do not perform any checks.
"""
def __init__(self) -> None:
super().__init__()
self.layer0 = torch.nn.Linear(5, 6)
self.layer1 = torch.nn.Linear(6, 6, bias=False)
self.layer2 = torch.nn.Sequential(
torch.nn.Linear(6, 3, bias=False),
torch.nn.ReLU(),
torch.nn.Linear(3, 6, bias=False),
)
self.relu = torch.nn.ReLU()
self.use_alt_path = False
for param in self.layer2.parameters():
param.requires_grad = False
def forward(self, x):
# `layer0` -> `layer1` (normal)
# `layer0` -> `layer2` (alternate)
z = self.relu(self.layer0(x))
z = self.relu(self.layer2(z)) if self.use_alt_path \
else self.relu(self.layer1(z))
return z
def get_input(self, device: torch.device):
return (torch.randn((8, 5)).to(device),)
def get_loss(self, input, output):
return output.sum()
def run_backward(self, loss):
loss.backward()
def flip_path(self):
params_to_freeze = self.layer2.parameters() if self.use_alt_path \
else self.layer1.parameters()
params_to_unfreeze = self.layer1.parameters() if self.use_alt_path \
else self.layer2.parameters()
for param in params_to_freeze:
param.requires_grad = False
for param in params_to_unfreeze:
param.requires_grad = True
self.use_alt_path = not self.use_alt_path
@staticmethod
def wrap(sharding_strategy: ShardingStrategy, device: torch.device):
model = Model()
model.layer1 = FSDP(model.layer1, sharding_strategy=sharding_strategy)
model.layer2 = FSDP(model.layer2, sharding_strategy=sharding_strategy)
fsdp_model = FSDP(model, sharding_strategy=sharding_strategy)
return fsdp_model.to(device)
class TestFSDPExecOrder(FSDPTest):
@property
def device(self):
return torch.device("cuda")
@skip_if_lt_x_gpu(2)
@parametrize(
"sharding_strategy",
[ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP],
)
def test_invalid_first_iter_order(
self,
sharding_strategy: ShardingStrategy,
):
"""Tests that FSDP errors if the all-gather order differs across ranks
in the first iteration."""
# Rank 0 runs the forward pass in one order and all other ranks run in
# different order
fsdp_model = Model.wrap(sharding_strategy, self.device)
if self.rank != 0:
fsdp_model.flip_path()
inp = fsdp_model.module.get_input(self.device)
# Match the error message with the following prefix
error_regex = "^(Forward order differs across ranks)"
with self.assertRaisesRegex(RuntimeError, error_regex):
fsdp_model(*inp)
@skip_if_lt_x_gpu(2)
@parametrize(
"sharding_strategy",
[ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP],
)
@parametrize("iters_before_path_change", [1, 3])
def test_invalid_later_iter_order(
self,
sharding_strategy: ShardingStrategy,
iters_before_path_change: int,
):
"""Tests that FSDP warns the user if the all-gather order changes after
the first iteration."""
# On the first iteration, all ranks run the same order, and on the next
# iteration, all but rank 0 run in a different order
fsdp_model = Model.wrap(sharding_strategy, self.device)
for _ in range(iters_before_path_change):
inp = fsdp_model.module.get_input(self.device)
output = fsdp_model(*inp)
loss = fsdp_model.module.get_loss(inp, output).to(self.device)
fsdp_model.module.run_backward(loss)
# Match the warning message with the following prefix
regex = "^(Forward order differs from that of the first iteration " \
f"on rank {self.rank} -- collectives are unchecked and may give " \
"incorrect results or hang)"
context = self.assertWarnsRegex(
expected_warning=UserWarning, expected_regex=regex,
) if self.rank != 0 else suppress()
if self.rank != 0:
fsdp_model.flip_path()
inp = fsdp_model.module.get_input(self.device)
# Expect a warning for the forward pass all-gather
with context: # warning for forward pass all-gather
output = fsdp_model(*inp)
loss = fsdp_model.module.get_loss(inp, output).to(self.device)
fsdp_model.module.run_backward(loss)
# Run an additional iteration to check that there are no more warnings
inp = fsdp_model.module.get_input(self.device)
output = fsdp_model(*inp)
loss = fsdp_model.module.get_loss(inp, output).to(self.device)
fsdp_model.module.run_backward(loss)
@skip_if_lt_x_gpu(2)
@parametrize(
"sharding_strategy",
[ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP],
)
def test_train_eval(self, sharding_strategy: ShardingStrategy):
fsdp_model = Model.wrap(sharding_strategy, self.device)
NUM_ITERS = 3
NUM_EPOCHS = 2
with warnings.catch_warnings(record=True) as w: # records warnings to `w`
for _ in range(NUM_EPOCHS):
fsdp_model.train()
for _ in range(NUM_ITERS):
inp = fsdp_model.module.get_input(self.device)
output = fsdp_model(*inp)
loss = fsdp_model.module.get_loss(inp, output).to(self.device)
fsdp_model.module.run_backward(loss)
fsdp_model.eval()
for _ in range(NUM_ITERS):
inp = fsdp_model.module.get_input(self.device)
output = fsdp_model(*inp)
fsdp_model.module.get_loss(inp, output).to(self.device)
# Check that the order validation warning was not issued (errors do not
# need to be checked since they will be directly reported)
warning_prefix = "Forward order differs"
for warning in w:
if str(warning.message).startswith(warning_prefix):
raise AssertionError(f"Warning was incorrectly issued: {warning.message}")
# If we still validate the forward execution order in eval mode, then
# an `AssertionError` will be raised above for both sharding strategies
instantiate_parametrized_tests(TestFSDPExecOrder)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_exec_order.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
FSDPInitMode,
FSDPTest,
NestedWrappedModule,
TransformerWithSharedParams,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestApply(FSDPTest):
@property
def world_size(self):
return 2
@torch.no_grad()
def _init_linear_weights(self, m):
if type(m) == nn.Linear:
m.weight.fill_(1.0)
m.bias.fill_(1.0)
def check_weights(self, fsdp, expected_tensor_fn, check):
with FSDP.summon_full_params(fsdp, recurse=True):
linear_modules = [
module for module in fsdp.modules() if type(module) == nn.Linear
]
for module in linear_modules:
for param in module.parameters():
expected = expected_tensor_fn(param)
check(param, expected, f"Got {param} but expected {expected}")
def _check_apply(self, fsdp):
# Assert linear weights are not all 1.0
self.check_weights(
fsdp, lambda param: torch.empty_like(param).fill_(1.0), self.assertNotEqual
)
fsdp.apply(self._init_linear_weights)
# Ensure all weights are 1.0
self.check_weights(
fsdp, lambda param: torch.empty_like(param).fill_(1.0), self.assertEqual
)
@skip_if_lt_x_gpu(2)
def test_nested_module_apply(self):
"""Tests that ``apply()`` modifies parameter values in-place on a
non-FSDP-root nested FSDP-wrapped model."""
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_AFTER,
)
self._check_apply(nested_wrapped_module)
@skip_if_lt_x_gpu(2)
def test_transformer_module_apply(self):
"""Tests that ``apply()`` modifies parameter values in-place on an
FSDP-wrapped transformer model with shared parameters."""
transformer = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_AFTER,
)
self._check_apply(transformer)
@skip_if_lt_x_gpu(2)
def test_apply_in_summon_raises_error(self):
"""Tests that calling ``apply()`` on an FSDP instance inside the
``summon_full_params()`` context raises an error."""
transformer = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_AFTER,
)
with transformer.summon_full_params(transformer):
with self.assertRaisesRegex(ValueError, "expected to be in states"):
transformer.apply(self._init_linear_weights)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_apply.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.wrap import always_wrap_policy as always_wrap
from torch.distributed.fsdp.wrap import wrap, enable_wrap
from torch.testing._internal.common_fsdp import (
FSDPTest,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
parametrize,
instantiate_parametrized_tests,
sandcastle_skip_if,
)
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
)
_TORCHDISTX_AVAIL = True
try:
from torchdistx import deferred_init
except ImportError:
_TORCHDISTX_AVAIL = False
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
def _reset_params_if_meta(is_meta, model):
# For torchdistX init, we don't need to call reset_params, as
# deferred_init(model).materialize() is equivalent to model().
if is_meta:
model.reset_parameters()
class MyLinear(nn.Linear):
"""
Linear layer with deterministic reset_parameters for testing.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def reset_parameters(self, *args, **kwargs):
with torch.no_grad():
self.weight.fill_(1)
class MyModel(nn.Module):
def __init__(self, device):
super().__init__()
self.lin1 = MyLinear(2, 2, bias=False, device=device)
self.lin2 = MyLinear(2, 2, bias=False, device=device)
def forward(self, x):
return self.lin2(self.lin1(x))
def reset_parameters(self, *args, **kwargs):
for m in [self.lin1, self.lin2]:
if not isinstance(m, FSDP):
m.reset_parameters()
class NestedModel(nn.Module):
def __init__(self, device):
super().__init__()
self.lin1 = MyLinear(2, 2, bias=False, device=device)
self.lin1 = wrap(self.lin1)
self.lin2 = MyLinear(2, 2, bias=False, device=device)
self.l3 = MyModel(device=device)
self.l3 = wrap(self.l3)
def forward(self, x):
return self.l3(self.lin2(self.lin1(x)))
def reset_parameters(self):
for m in [self.lin1, self.lin2, self.l3]:
if not isinstance(m, FSDP):
m.reset_parameters()
def _init_with_reset_params(module):
"""
to_empty + reset_parameters() init function example for modules
initailized with device="meta"
"""
is_meta = any(t.is_meta for t in module.parameters())
if is_meta:
module.to_empty(device=torch.cuda.current_device())
with torch.no_grad():
module.reset_parameters()
def _init_with_torchdistX(module):
"""
torchdistX-based deferred module initialization function example
using ``materialize_module``.
"""
assert _TORCHDISTX_AVAIL
def check_fn(k):
return not isinstance(k, FSDP)
deferred_init.materialize_module(module, check_fn=check_fn)
class TestFSDPWithMetaDevice(FSDPTest):
@property
def world_size(self):
return 2
@property
def process_group(self):
return dist.distributed_c10d._get_default_group()
def _compare_fsdp(self, fsdp1, fsdp2):
with FSDP.summon_full_params(fsdp1):
with FSDP.summon_full_params(fsdp2):
for p1, p2 in zip(fsdp1.parameters(), fsdp2.parameters()):
self.assertTrue(torch.allclose(p1, p2), f"{p1} vs {p2}")
def _test_simple_model_with_meta_device(self, meta_module_fn, init_fn=None):
# Create model on meta device and wrap with FSDP.
model = meta_module_fn()
is_meta = next(model.parameters()).is_meta
fsdp_meta = FSDP(
model,
auto_wrap_policy=always_wrap,
param_init_fn=init_fn,
)
meta_opt = torch.optim.SGD(fsdp_meta.parameters(), lr=1e-3)
# Test to make sure it is the same model parameters as regular FSDP
# approach.
regular = MyModel(device="cuda")
_reset_params_if_meta(is_meta, regular)
fsdp_regular = FSDP(regular, auto_wrap_policy=always_wrap)
regular_opt = torch.optim.SGD(fsdp_regular.parameters(), lr=1e-3)
self._compare_fsdp(fsdp_meta, fsdp_regular)
inp = torch.randn(10, 2, device='cuda')
fsdp_meta(inp).sum().backward()
fsdp_regular(inp).sum().backward()
meta_opt.step()
regular_opt.step()
self._compare_fsdp(fsdp_meta, fsdp_regular)
# Test that meta init works if all submodules are contained in only a
# single FSDP unit.
model = meta_module_fn()
fsdp_meta = FSDP(model, param_init_fn=init_fn)
meta_opt = torch.optim.SGD(fsdp_meta.parameters(), lr=1e-3)
regular = MyModel(device="cuda")
_reset_params_if_meta(is_meta, regular)
fsdp_regular = FSDP(regular, auto_wrap_policy=always_wrap)
regular_opt = torch.optim.SGD(fsdp_regular.parameters(), lr=1e-3)
# Run a forward + backward pass + optimizer step
fsdp_meta(inp).sum().backward()
fsdp_regular(inp).sum().backward()
meta_opt.step()
regular_opt.step()
self._compare_fsdp(fsdp_meta, fsdp_regular)
@skip_if_lt_x_gpu(2)
def test_simple_model_with_meta_device_reset_params(self):
def meta_module_fn():
return MyModel(device="meta")
self._test_simple_model_with_meta_device(
meta_module_fn, _init_with_reset_params
)
@skip_if_lt_x_gpu(2)
def test_simple_model_with_meta_device_default_init(self):
def meta_module_fn():
return MyModel(device="meta")
self._test_simple_model_with_meta_device(meta_module_fn)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
not _TORCHDISTX_AVAIL, "Test requires torchdistX: https://github.com/pytorch/torchdistX"
)
def test_simple_model_with_torchdistX_default_init(self):
def meta_module_fn():
return deferred_init.deferred_init(MyModel, device="cuda")
self._test_simple_model_with_meta_device(meta_module_fn)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
not _TORCHDISTX_AVAIL, "Test requires torchdistX: https://github.com/pytorch/torchdistX"
)
def test_simple_model_with_torchdistX_init_fn(self):
def meta_module_fn():
return deferred_init.deferred_init(MyModel, device="cuda")
self._test_simple_model_with_meta_device(meta_module_fn, init_fn=_init_with_torchdistX)
def _test_nested_model_with_meta_device(self, auto_wrap, meta_module_fn, init_fn=None):
if auto_wrap:
module = meta_module_fn()
is_meta = next(module.parameters()).is_meta
fsdp_meta = FSDP(
module,
auto_wrap_policy=always_wrap,
param_init_fn=init_fn,
)
meta_opt = torch.optim.SGD(fsdp_meta.parameters(), lr=1e-3)
module_regular = NestedModel(device="cuda")
_reset_params_if_meta(is_meta, module_regular)
fsdp_regular = FSDP(
module_regular,
auto_wrap_policy=always_wrap,
)
regular_opt = torch.optim.SGD(fsdp_regular.parameters(), lr=1e-3)
else:
with enable_wrap(
wrapper_cls=FSDP, param_init_fn=init_fn,
):
module = meta_module_fn()
is_meta = next(module.parameters()).is_meta
# Non FSDP modules will still be initialized because they bubble up
# to be part of a larger FSDP unit.
fsdp_meta = wrap(module)
meta_opt = torch.optim.SGD(fsdp_meta.parameters(), lr=1e-3)
# Init and reset parameters before wrapping so that reset_params
# matches up with meta device's initialization.
module_regular = NestedModel(device="cuda")
_reset_params_if_meta(is_meta, module_regular)
with enable_wrap(wrapper_cls=FSDP):
module_regular.lin1 = wrap(module_regular.lin1)
module_regular.l3 = wrap(module_regular.l3)
fsdp_regular = wrap(module_regular)
regular_opt = torch.optim.SGD(fsdp_regular.parameters(), lr=1e-3)
# Compare it before training
self._compare_fsdp(fsdp_meta, fsdp_regular)
inp = torch.randn(10, 2, device='cuda')
fsdp_meta(inp).sum().backward()
fsdp_regular(inp).sum().backward()
meta_opt.step()
regular_opt.step()
self._compare_fsdp(fsdp_meta, fsdp_regular)
@skip_if_lt_x_gpu(2)
@parametrize("auto_wrap", [True, False])
def test_nested_model_with_meta_device_reset_params(self, auto_wrap):
def meta_module_fn():
return NestedModel(device="meta")
self._test_nested_model_with_meta_device(
auto_wrap=auto_wrap, meta_module_fn=meta_module_fn, init_fn=_init_with_reset_params
)
@skip_if_lt_x_gpu(2)
@parametrize("auto_wrap", [True, False])
def test_nested_model_with_meta_device_default_init(self, auto_wrap):
def meta_module_fn():
return NestedModel(device="meta")
self._test_nested_model_with_meta_device(
auto_wrap=auto_wrap, meta_module_fn=meta_module_fn,
)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
not _TORCHDISTX_AVAIL, "Test requires torchdistX: https://github.com/pytorch/torchdistX"
)
@parametrize("auto_wrap", [True, False])
def test_nested_model_with_torchdistX_default_init(self, auto_wrap):
def meta_module_fn():
return deferred_init.deferred_init(NestedModel, device="cuda")
self._test_nested_model_with_meta_device(
auto_wrap=auto_wrap, meta_module_fn=meta_module_fn
)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
not _TORCHDISTX_AVAIL, "Test requires torchdistX: https://github.com/pytorch/torchdistX"
)
@parametrize("auto_wrap", [True, False])
def test_nested_model_with_torchdistX_init_fn(self, auto_wrap):
def meta_module_fn():
return deferred_init.deferred_init(NestedModel, device="cuda")
self._test_nested_model_with_meta_device(
auto_wrap=auto_wrap, meta_module_fn=meta_module_fn, init_fn=_init_with_torchdistX,
)
def _test_bad_arg(self, meta_module_fn):
mod = meta_module_fn()
with self.assertRaisesRegex(ValueError, "to be callable"):
FSDP(mod, param_init_fn=42)
@skip_if_lt_x_gpu(2)
@sandcastle_skip_if(
not _TORCHDISTX_AVAIL, "Test requires torchdistX: https://github.com/pytorch/torchdistX"
)
def test_bad_arg_torchdistx(self):
def meta_module_fn():
return deferred_init.deferred_init(NestedModel, "cuda")
self._test_bad_arg(meta_module_fn)
@skip_if_lt_x_gpu(2)
def test_bad_arg_meta(self):
def meta_module_fn():
return NestedModel(device="meta")
self._test_bad_arg(meta_module_fn)
instantiate_parametrized_tests(TestFSDPWithMetaDevice)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_meta.py
|
# Owner(s): ["oncall: distributed"]
from typing import Any
import torch
from torch.distributed.fsdp._symbolic_trace import _init_execution_info, _patch_tracer
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
run_tests,
)
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight1 = torch.nn.Parameter(torch.randn(6, 6))
self.weight2 = torch.nn.Parameter(torch.randn(6, 6))
self.weight_unused = torch.nn.Parameter(torch.randn(2, 2))
self.layer0 = torch.nn.Linear(6, 6)
self.layer1 = torch.nn.Linear(6, 6, bias=False)
self.layer2 = torch.nn.Sequential(
torch.nn.Linear(6, 3, bias=False),
torch.nn.ReLU(),
torch.nn.Linear(3, 6, bias=False),
)
self.relu = torch.nn.ReLU()
def forward(self, x: Any, run_all_layers: bool):
z = self.relu(self.layer0(x))
z = self.relu(self.layer2(z))
z = z @ self.weight1
if run_all_layers:
z = self.relu(self.layer1(z))
z = z @ self.weight2
# used to test the case where a module is called more than once
z = self.relu(self.layer0(x))
return z
class TestSymbolicTracing(FSDPTest):
def test_symbolic_tracing_outputs(self):
"""
test ``execution_info.module_forward_order`` and ``execution_info.module_to_execution_infos``
after running ``tracer.trace()`` inside ``_patch_tracer``.
"""
model = Model()
tracer = torch.fx.Tracer()
execution_info = _init_execution_info(model)
original_call_module = tracer.call_module
original_create_proxy = tracer.create_proxy
with _patch_tracer(
tracer=tracer, root_module=model, execution_info=execution_info
):
concrete_args = {"run_all_layers": True}
tracer.trace(model, concrete_args)
# the member functions of tracer should not be changed
self.assertEqual(original_call_module, tracer.call_module)
self.assertEqual(original_create_proxy, tracer.create_proxy)
# test tracer.module_forward_order
correct_module_forward_order = [
model,
model.layer0,
model.relu,
model.layer2,
model.layer2[0],
model.layer2[1],
model.layer2[2],
model.relu,
model.layer1,
model.relu,
model.layer0,
model.relu,
]
self.assertEqual(
execution_info.module_forward_order, correct_module_forward_order
)
# test execution_info.module_to_execution_infos
self.assertEqual(
execution_info.module_to_execution_infos[model],
[
(model.layer0, list(model.layer0.named_parameters())),
(model.layer2, list(model.layer2.named_parameters())),
(model, [("weight1", model.weight1)]),
(model.layer1, list(model.layer1.named_parameters())),
(model, [("weight2", model.weight2)]),
(model.layer0, list(model.layer0.named_parameters())),
],
)
self.assertEqual(
execution_info.module_to_execution_infos[model.layer0],
[(model.layer0, list(model.layer0.named_parameters()))],
)
self.assertEqual(
execution_info.module_to_execution_infos[model.layer1],
[(model.layer1, list(model.layer1.named_parameters()))],
)
self.assertEqual(
execution_info.module_to_execution_infos[model.layer2],
[
(model.layer2[0], list(model.layer2[0].named_parameters())),
(model.layer2[2], list(model.layer2[2].named_parameters())),
],
)
self.assertEqual(execution_info.module_to_execution_infos[model.relu], [])
# test tracer.param_exec_order
correct_param_order = [
model.layer0.weight,
model.layer0.bias,
model.layer2[0].weight,
model.layer2[2].weight,
model.weight1,
model.layer1.weight,
model.weight2,
]
self.assertEqual(execution_info.param_exec_order, correct_param_order)
instantiate_parametrized_tests(TestSymbolicTracing)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_fx.py
|
# Owner(s): ["oncall: distributed"]
import functools
import itertools
import sys
import unittest
from typing import Optional
import torch
from torch import distributed as dist
from torch.cuda.amp.common import amp_definitely_not_available
from torch.distributed.fsdp import CPUOffload, MixedPrecision
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
DummyProcessGroup,
FSDPInitMode,
FSDPTest,
NestedWrappedModule,
subtest_name,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
TestCase,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
params = "cpu_offload,sharding_strategy,mixed_precision"
cpu_offload_config = [CPUOffload(offload_params=True), CPUOffload(offload_params=False)]
sharding_strategy_config = [ShardingStrategy.SHARD_GRAD_OP, None]
mixed_precision = ["enable_mixed_precision", None]
configs = list(itertools.product(cpu_offload_config,
sharding_strategy_config,
mixed_precision))
test_name_mapping = {
str(CPUOffload(offload_params=True)): "offload_true",
str(CPUOffload(offload_params=False)): "offload_false",
str(ShardingStrategy.SHARD_GRAD_OP): "shard_grad_op",
"enable_mixed_precision": "mixed_precision"
}
subtest_name = functools.partial(subtest_name, test_name_mapping)
class TestShardGradScaler(TestCase):
@unittest.skipIf(amp_definitely_not_available(), "no supported device (cuda, xla) found")
def test_grad_scaling(self):
pg = DummyProcessGroup(0, 1)
scaler = ShardedGradScaler(init_scale=2.0, process_group=pg, enabled=True)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cpu")
t1 = torch.full((1,), 8.0, dtype=torch.float32, device="cpu")
outputs = [t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), t1.clone()]]
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 16.0 and outputs[1][0] == 8.0 and outputs[1][1] == 16.0)
self.assertTrue(outputs[2][0] == 8.0 and outputs[2][1] == 16.0)
self.assertTrue(scaler._scale.device == t1.device)
@unittest.skipIf(amp_definitely_not_available(), "no supported device (cuda, xla) found")
def test_scaling_unscaling_sparse(self):
pg = DummyProcessGroup(0, 1)
scaler = ShardedGradScaler(init_scale=2.0, process_group=pg, enabled=True)
inv_scale = torch.full((1,), 0.5, dtype=torch.float, device="cpu")
found_inf = torch.full((1,), 0, dtype=torch.float, device="cpu")
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cpu", dtype=torch.int64)
v = torch.tensor([16.0, 32.0, 64.0], dtype=torch.float, device="cpu")
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cpu", dtype=torch.float)
# unscale sparse tensors
s1 = s.clone()
s1.grad = s.clone()
opt = torch.optim.SGD([s1], lr=1.0)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf)[s1.device]
self.assertEqual(found_inf, 0.0)
self.assertEqual(s1.grad.to_dense(), (s / 2).to_dense())
# unscale sparse tensor: inf
v = torch.tensor([16.0, 32.0, float('inf')], dtype=torch.float, device="cpu")
s1.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cpu", dtype=torch.float)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf)[s1.device]
self.assertEqual(found_inf, 1.0)
# unscale sparse tensor: overflow (marked as inf)
i = torch.tensor([[1, 1, 1],
[0, 0, 2]], device="cpu", dtype=torch.int64)
# coalescing sparse tensor here will cause the value to be Inf
v = torch.tensor([2**15, 2**15, 1.0], dtype=torch.float16, device="cpu")
s1 = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cpu", dtype=torch.float16)
s1.grad = s1.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf)[s1.device]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(amp_definitely_not_available(), "no supported device (cuda, xla) found")
def test_inf_gradients_skip_optim_step(self):
pg = DummyProcessGroup(0, 1)
scaler = ShardedGradScaler(init_scale=2.0, process_group=pg, enabled=True)
loss = torch.full((1,), 4.0, dtype=torch.float32, device="cpu")
t0 = torch.tensor([float('inf')], dtype=torch.float32, device="cpu")
t0.grad = t0.clone()
opt = torch.optim.SGD([t0], lr=1.0)
scaler.scale(loss)
ret_val = scaler.step(opt)
self.assertTrue(ret_val is None)
class TestShardedGradScalerParityWithDDP(FSDPTest):
def _get_init_modes_for_test(self, cpu_offload):
modes = [
CUDAInitMode.CUDA_AFTER,
CUDAInitMode.CUDA_BEFORE
]
# Note that CUDAInitMode.CUDA_NEVER works currently only with CPU
# offload as we explicitly bring the param back to CUDA device. In
# general, it will not work since we try to all_gather p.data which is
# on CPU but NCCL only supports GPU.
if cpu_offload.offload_params:
modes.append(CUDAInitMode.CUDA_NEVER)
return modes
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
def test_fsdp_ddp_parity_with_grad_scaler(
self,
cpu_offload: CPUOffload,
sharding_strategy: Optional[ShardingStrategy],
mixed_precision: Optional[str],
):
init_modes = self._get_init_modes_for_test(cpu_offload)
mp = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
) if mixed_precision is not None else None
for cuda_init_mode in init_modes:
self._test_fsdp_parity(
NestedWrappedModule,
FSDPInitMode.RECURSIVE,
cuda_init_mode=cuda_init_mode,
cpu_offload=cpu_offload,
sharding_strategy=sharding_strategy,
mixed_precision=mp,
enable_sharded_grad_scaler=True,
)
instantiate_parametrized_tests(TestShardGradScaler)
instantiate_parametrized_tests(TestShardedGradScalerParityWithDDP)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_sharded_grad_scaler.py
|
# Owner(s): ["oncall: distributed"]
import contextlib
import itertools
import sys
from dataclasses import dataclass
from typing import List, Optional, Tuple
import torch
from torch import distributed as dist
from torch.distributed.fsdp import CPUOffload
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
FSDPInitMode,
FSDPTest,
TransformerWithSharedParams,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
@dataclass
class _GradAccConfig:
"""
This configures how gradients are accumulated in :meth:`_test_grad_acc`.
Each instance of this class represents ``num_iters``-many consecutive
iterations, where the ``no_sync()`` context manager is used or not as given
by ``use_no_sync``.
Attributes:
use_no_sync (bool): Indicates whether to use the ``no_sync()`` context
manager as the way to accumulate gradients.
num_iters (int): Number of iterations to accumulate gradients.
"""
use_no_sync: bool
num_iters: int
def __repr__(self) -> str:
# Override to remove any spaces in the string to appease the internal
# build's test name parser
return (
f"(use_no_sync={self.use_no_sync},"
f"num_iters={self.num_iters})"
)
@dataclass
class _GradAccConfigs:
"""
This wraps a :class:`list` of :class:`_GradAccConfig` instances with the
sole purpose of overriding :meth:`__repr__` to remove spaces.
"""
configs: List[_GradAccConfig]
def __repr__(self) -> str:
# Override to remove any spaces in the string to appease the internal
# build's test name parser
return (
"[" + ",".join(config.__repr__() for config in self.configs) + "]"
)
class TestGradAcc(FSDPTest):
"""Tests ``FullyShardedDataParallel``'s gradient accumulation via both its
``no_sync()`` context manager and without the context manager."""
def _test_grad_acc(
self,
batch_dim: int,
configs: List[_GradAccConfig],
cpu_offload: CPUOffload,
backward_prefetch: Optional[BackwardPrefetch],
):
"""
Tests gradient accumulation by comparing a run that trains sequentially
through some batches while accumulating gradients with a run that
trains on the concatenation of those batches in a single iteration.
The last iteration always synchronizes gradients regardless of what is
specified by the last element of ``configs``.
Arguments:
batch_dim (int): Batch dimension in the input tensor to be passed
into the model for the forward pass.
configs (List[_GradAccConfig]): :class:`list` of configurations
specifying how gradients are accumulated; for example, a list
corresponding to [(False, 2), (True, 2), (False, 2)] indicates
to accumulate over 2 + 2 + 2 = 6 total iterations, where the
first two do not use ``no_sync()``, the middle two do use
``no_sync()``, and the final two again do not use
``no_sync()``.
cpu_offload (CPUOffload): Configures CPU offloading.
backward_prefetch (Optional[BackwardPrefetch]): Specifies at which
point to prefetch the next layer's full parameters during the
backward pass, if at all.
"""
# Gradient accumulation outside `no_sync()` is not currently compatible
# with CPU offloading
if cpu_offload.offload_params and \
any(not config.use_no_sync for config in configs):
return
old_allow_tf32 = torch.backends.cuda.matmul.allow_tf32
try:
# Disable TF32 to prevent floating point drift
torch.backends.cuda.matmul.allow_tf32 = False
# Initialize the FSDP model and optimizer
fsdp_kwargs = {
"cpu_offload": cpu_offload,
"backward_prefetch": backward_prefetch,
}
fsdp_model: FSDP = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_AFTER,
fsdp_kwargs,
deterministic=True,
add_bn=False, # disable BN since the test uses varying batch sizes
)
device = torch.device("cuda")
optim = torch.optim.SGD(
fsdp_model.parameters(), lr=0.01, momentum=0.9,
)
# Generate the sequence of batches, each containing the same data
# but permuted
def permute_tensor(x: torch.Tensor):
return x.view(-1)[torch.randperm(x.numel())].view_as(x)
batch: Tuple[torch.Tensor, ...] = \
fsdp_model.module.get_input(device)
batches: List[Tuple[torch.Tensor, ...]] = [batch]
num_iters_to_acc = sum(config.num_iters for config in configs)
for _ in range(num_iters_to_acc - 1):
batches.append(tuple(permute_tensor(t) for t in batch))
for (batch1, batch2) in itertools.combinations(batches, r=2):
for t1, t2 in zip(batch1, batch2):
assert not torch.all(t1 == t2), \
"Check the test to make sure that batches are distinct"
# Concatenate the batches along the given batch dimension
concat_batch: Tuple[torch.Tensor, ...] = tuple(
torch.cat(ts, dim=batch_dim) for ts in zip(*batches)
)
# Establish reference gradients using the concatenated batch
fsdp_model.zero_grad()
output = fsdp_model(*concat_batch)
ref_loss = fsdp_model.module.get_loss(concat_batch, output)
ref_loss.backward()
ref_grads = [
p.grad.detach().clone() for p in fsdp_model.parameters()
]
# Compute and accumulate the gradients
fsdp_model.zero_grad()
losses = []
batch_idx = 0
for config in configs:
sync_context = fsdp_model.no_sync() if config.use_no_sync \
else contextlib.suppress()
with sync_context:
for _ in range(config.num_iters):
if batch_idx == num_iters_to_acc - 1:
break # always sync on the last iteration
batch = batches[batch_idx]
batch_idx += 1
output = fsdp_model(*batch)
loss = fsdp_model.module.get_loss(batch, output)
loss.backward()
losses.append(loss)
output = fsdp_model(*batches[-1])
loss = fsdp_model.module.get_loss(batches[-1], output)
loss.backward()
losses.append(loss)
acc_loss = sum(losses)
acc_grads = [
p.grad.detach().clone() for p in fsdp_model.parameters()
]
# Compare the losses and gradients
torch.testing.assert_close(ref_loss, acc_loss)
self.assertEqual(len(ref_grads), len(acc_grads))
for ref_grad, acc_grad in zip(ref_grads, acc_grads):
self.assertEqual(ref_grad.device, acc_grad.device)
self.assertEqual(ref_grad.size(), acc_grad.size())
self.assertEqual(ref_grad.dtype, acc_grad.dtype)
torch.testing.assert_close(ref_grad, acc_grad)
# Check that the optimizer step does not error
optim.step()
finally:
torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32
@skip_if_lt_x_gpu(2)
@parametrize(
"configs",
[
_GradAccConfigs([
_GradAccConfig(use_no_sync=True, num_iters=3),
_GradAccConfig(use_no_sync=False, num_iters=3),
_GradAccConfig(use_no_sync=True, num_iters=3),
]),
_GradAccConfigs([
_GradAccConfig(use_no_sync=False, num_iters=3),
_GradAccConfig(use_no_sync=True, num_iters=3),
_GradAccConfig(use_no_sync=False, num_iters=3),
]),
]
)
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=False), CPUOffload(offload_params=True)],
)
@parametrize(
"backward_prefetch",
[BackwardPrefetch.BACKWARD_PRE, BackwardPrefetch.BACKWARD_POST, None],
)
def test_grad_acc(
self,
configs: _GradAccConfigs,
cpu_offload: CPUOffload,
backward_prefetch: Optional[BackwardPrefetch],
):
"""
Tests gradient accumulation.
This exercises gradient accumulation inside and outside the
``no_sync()`` context manager, in particular by interleaving the two.
It tests both interleaving starting with (and ending with, resp.)
inside versus outside ``no_sync()`` to ensure that initial conditions
(and final conditions, resp.) do not affect the correctness. This test
also checks for compatibility with the CPU offload and backward
prefetch options.
NOTE: Gradient accumulation without using the ``no_sync()`` context
manager is not currently compatible with CPU offloading, so those tests
are vacuous.
"""
self._test_grad_acc(
batch_dim=1,
configs=configs.configs,
cpu_offload=cpu_offload,
backward_prefetch=backward_prefetch,
)
instantiate_parametrized_tests(TestGradAcc)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_grad_acc.py
|
# Owner(s): ["oncall: distributed"]
import contextlib
import sys
from functools import partial
from itertools import product
from typing import Any, Dict, List
import torch
import torch.cuda.nccl as nccl
import torch.nn as nn
import torch.nn.functional as F
from torch import distributed as dist
from torch.distributed.fsdp import BackwardPrefetch, CPUOffload
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import MixedPrecision, ShardingStrategy
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy
from torch.nn.modules.batchnorm import _BatchNorm
from torch.testing._internal.common_cuda import CUDA11OrLater
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
FSDPInitMode,
FSDPTest,
TransformerWithSharedParams,
subtest_name,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
sandcastle_skip_if,
)
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = sandcastle_skip_if(not HAS_TORCHVISION, "no torchvision")
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
# Various mixed precision configs to test under.
default_mp = MixedPrecision(
param_dtype=torch.float16,
buffer_dtype=torch.float16,
reduce_dtype=torch.float16,
)
# Params and buffers are not cast, comm only happens
# in reduced precision.
mp_only_reduce = MixedPrecision(reduce_dtype=torch.float16)
# Only parameters are cast (thus comm should happen in the param_dtype precision)
mp_only_param_and_buf = MixedPrecision(param_dtype=torch.float16, buffer_dtype=torch.float16)
# Nothing is cast (thus param, comm, grad, and buffer should be in the full precision)
mp_no_mixed_precision = MixedPrecision()
nccl_supports_bf16 = (
CUDA11OrLater and dist.is_nccl_available() and nccl.version() >= (2, 10)
)
mp_configs = [default_mp, mp_only_reduce, mp_only_param_and_buf, mp_no_mixed_precision]
if nccl_supports_bf16:
mp_diff_buffer_and_reduce = MixedPrecision(
param_dtype=torch.float16,
buffer_dtype=torch.bfloat16,
reduce_dtype=torch.float32
)
mp_configs.extend([mp_diff_buffer_and_reduce])
# Buffer original dtype, which can differ from model params.
_BUFFER_ORIG_DTYPE = torch.float64
params = "mp_config,cpu_offload,full_precision_param_dtype,enable_sharded_grad_scaler"
cpu_offload_config = [
CPUOffload(offload_params=True), CPUOffload(offload_params=False)
]
full_precision_param_dtype_config = [torch.float32, torch.float64]
enable_sharded_grad_scaler = ["enable_sharded_grad_scaler", None]
configs = list(product(
mp_configs,
cpu_offload_config,
full_precision_param_dtype_config,
enable_sharded_grad_scaler,
))
test_name_mapping = {
str(CPUOffload(offload_params=True)): "offload_true",
str(CPUOffload(offload_params=False)): "offload_false",
str(default_mp): "mp_fp16",
str(mp_only_reduce): "mp_only_reduce",
str(mp_only_param_and_buf): "mp_only_param_and_buf",
str(mp_no_mixed_precision): "mp_no_mp",
str(torch.float32): "fp32",
str(torch.float64): "fp64",
"enable_sharded_grad_scaler": "enable_sharded_grad_scaler"
}
if nccl_supports_bf16:
test_name_mapping.update({
str(mp_diff_buffer_and_reduce): "mp_diff_buffer_reduce",
})
subtest_name = partial(subtest_name, test_name_mapping)
_CURRENT_FULL_PRECISION_PARAM_DTYPE = None
@contextlib.contextmanager
def patch_reduce_scatter(new_reduce_scatter, full_precision_param_dtype):
"""
Patches dist._reduce_scatter_base with a new reduce_scatter_base and
restores upon exiting. Used for validation of mixed precision
"""
orig_reduce_scatter = dist._reduce_scatter_base
dist._reduce_scatter_base = new_reduce_scatter
global _CURRENT_FULL_PRECISION_PARAM_DTYPE
_CURRENT_FULL_PRECISION_PARAM_DTYPE = full_precision_param_dtype
try:
yield
finally:
dist._reduce_scatter_base = orig_reduce_scatter
_CURRENT_FULL_PRECISION_PARAM_DTYPE = None
class LinearMixedPrecision(nn.Module):
"""
A linear module with extra checks for mixed precision training.
"""
def __init__(self, param_dtype):
super().__init__()
self.lin = nn.Linear(10, 10, bias=False).to(param_dtype)
self.register_buffer('buffer', torch.randn((1, 2), dtype=_BUFFER_ORIG_DTYPE))
self._orig_param_type = param_dtype
self._orig_buffer_dtype = _BUFFER_ORIG_DTYPE
def forward(self, tup):
# Param and input should be the mixed precision type
inp, cls, fsdp, mp_config, full_precision_param_dtype = tup
expected_param_type = (
mp_config.param_dtype if mp_config.param_dtype is not None
else self._orig_param_type
)
expected_buffer_type = (
mp_config.buffer_dtype if mp_config.buffer_dtype is not None
else self._orig_buffer_dtype
)
cls.assertEqual(inp.dtype, expected_param_type)
# Buffer should be in specified precision as well.
cls.assertEqual(self.buffer.dtype, expected_buffer_type)
# In FSDP, self.params should point to the right type.
num_active_fsdp = 0
for fsdp_module in FSDP.fsdp_modules(fsdp):
fsdp_managed_params = fsdp_module.params
# Single param assumption
cls.assertEqual(1, len(fsdp_managed_params))
for param in fsdp_managed_params:
# FSDP unit is currently active if it is not using the param
# local shard. This supports both FULL_SHARD and SHARD_GRAD_OP
# cases. In FULL_SHARD, we have the additional property that
# param._full_param_padded has not been freed.
is_fsdp_unit_active = (
param._is_sharded and
(param.data.data_ptr() != param._local_shard.data_ptr())
)
if is_fsdp_unit_active:
num_active_fsdp += 1
# This FSDP unit is active, verify param points to mixed
cls.assertEqual(param.dtype, expected_param_type)
# _rebuild_full_param should have also freed the fp16 shard.
# Shard is never allocated if param_dtype mixed precision is not
# enabled.
if mp_config.param_dtype is not None:
cls.assertEqual(0, param._mp_shard.storage().size())
else:
cls.assertFalse(hasattr(param, '_mp_shard'))
elif param._is_sharded:
# This FSDP unit is not active as full param has been
# freed or not yet allocated. Ensure param points to full
# precision param.
cls.assertEqual(param.dtype, full_precision_param_dtype)
# We should have gotten at least one active FSDP unit for sharded
# (world size > 1) cases. For cases where param is not sharded
# (ie world_size == 1) it is a bit hard to check if FSDP unit is active
# as we'd always point to the local shard, so we rely on the forward
# pass self.lin(inp) working well and inp being reduced precision to
# implicitly validate that the param is indeed in the reduced precision.
if cls.world_size > 1:
cls.assertGreater(num_active_fsdp, 0)
return (self.lin(inp), cls, fsdp, mp_config, full_precision_param_dtype)
class TestFSDPMixedPrecision(FSDPTest):
@property
def world_size(self):
raise ValueError("To be implemented by child classes")
def _get_simple_nested_model(self, param_dtype, *fsdp_args, **fsdp_kwargs):
model = FSDP(
nn.Sequential(
FSDP(LinearMixedPrecision(param_dtype).cuda(), *fsdp_args, **fsdp_kwargs),
LinearMixedPrecision(param_dtype).cuda(),
),
*fsdp_args,
**fsdp_kwargs,
)
return model
def _get_simple_model(self, param_dtype, *fsdp_args, **fsdp_kwargs):
model = FSDP(LinearMixedPrecision(param_dtype).cuda(), *fsdp_args, **fsdp_kwargs)
return model
def _validate_no_mp_shard(self, fsdp_model):
"""
Validates that there is no mixed precision _mp_shard allocated
when it is not expected to be.
"""
fsdp_units = FSDP.fsdp_modules(fsdp_model)
for fsdp in fsdp_units:
for param in fsdp.params:
self.assertFalse(hasattr(param, '_mp_shard'))
def _validate_mp_shard_freed(self, fsdp_model):
"""
Ensures that the mixed precision shard is greed for all FSDP units.
"""
fsdp_units = FSDP.fsdp_modules(fsdp_model)
for fsdp in fsdp_units:
for param in fsdp.params:
self.assertEqual(0, param._mp_shard.storage().size())
def _reduce_scatter_base_validate_mp(
self,
orig_reduce_scatter,
mp_config,
*args,
**kwargs
):
"""
Performs dist._reduce_scatter_base but verifies mixed precision settings
before. This is to test mixed precision is working as expected during
backward pass. In particular it ensures that the gradients were cast to the right type
and comm. is going to happen in the right type.
"""
tensors = []
for x in args:
if isinstance(x, torch.Tensor):
tensors.append(x)
for _, x in kwargs.items():
if isinstance(x, torch.Tensor):
tensors.append(x)
# reduce_dtype has higher priority than param_dtype, because mixed_precision
# supports overriding param_dtype with reduce_dtype to control the
# reduction precision. In the case where reduce_dtype == param_dtype
# this tests that gradients are in the expected precision as well.
# If reduce_dtype is not specified (is None) we comm. in the param_dtype
# if that is specified, otherwise full precision dtype.
expected_dtype = (
mp_config.reduce_dtype if mp_config.reduce_dtype is not None
else (
mp_config.param_dtype if mp_config.param_dtype is not None
else _CURRENT_FULL_PRECISION_PARAM_DTYPE
)
)
for t in tensors:
self.assertEqual(expected_dtype, t.dtype)
return orig_reduce_scatter(*args, **kwargs)
def _run_test_mixed_precision_e2e(
self,
mp_config,
cpu_offload,
backward_prefetch,
forward_prefetch,
full_precision_param_dtype,
sharding_strategy,
enable_sharded_grad_scaler,
):
torch.cuda.set_device(self.rank)
fsdp_models = [
self._get_simple_model(
param_dtype=full_precision_param_dtype,
sharding_strategy=sharding_strategy,
cpu_offload=cpu_offload,
mixed_precision=mp_config,
backward_prefetch=backward_prefetch,
forward_prefetch=forward_prefetch
),
self._get_simple_nested_model(
param_dtype=full_precision_param_dtype,
sharding_strategy=sharding_strategy,
cpu_offload=cpu_offload,
mixed_precision=mp_config,
backward_prefetch=backward_prefetch,
forward_prefetch=forward_prefetch
),
]
for model in fsdp_models:
if not cpu_offload.offload_params:
model.cuda()
# Patch reduce_scatter to add validation for mixed precision types.
orig_reduce_scatter = dist._reduce_scatter_base
test_reduce_scatter = partial(
self._reduce_scatter_base_validate_mp, orig_reduce_scatter, mp_config,
)
with patch_reduce_scatter(test_reduce_scatter, full_precision_param_dtype):
scaler = ShardedGradScaler(enabled=enable_sharded_grad_scaler)
optim = torch.optim.Adam(model.parameters())
for _ in range(3):
inp = torch.randn(3, 10, device='cuda', dtype=full_precision_param_dtype)
# Forward pass of LinearMixedPrecision check casting of
# inputs, params, buffers.
act, *_ = model(
(inp, self, model, mp_config, full_precision_param_dtype)
)
# Buffers should be casted.
for buf in model.buffers():
if mp_config.buffer_dtype is not None:
self.assertEqual(buf.dtype, mp_config.buffer_dtype)
else:
self.assertEqual(buf.dtype, _BUFFER_ORIG_DTYPE)
# p._mp_shard should be freed.
if model.params[0]._is_sharded: # i.e. world_size > 1
# TODO: free the mixed precision shard after forward
# when world_size == 1 as well, currently when
# world_size == 1 it is only freed after backward.
if mp_config.param_dtype is not None:
self._validate_mp_shard_freed(model)
else:
# We never should have allocated an _mp_shard.
self._validate_no_mp_shard(model)
loss = act.sum()
loss = scaler.scale(loss)
if mp_config.param_dtype is not None:
self.assertEqual(loss.dtype, mp_config.param_dtype)
else:
self.assertEqual(loss.dtype, full_precision_param_dtype)
# Will run patched reduce scatter that validates mixed_precision
# types in backward.
loss.backward()
# Buffers stay casted even after backwards.
for buf in model.buffers():
if mp_config.buffer_dtype is not None:
self.assertEqual(buf.dtype, mp_config.buffer_dtype)
else:
self.assertEqual(buf.dtype, _BUFFER_ORIG_DTYPE)
# p._mp_shard should be freed.
if mp_config.param_dtype is not None:
self._validate_mp_shard_freed(model)
else:
self._validate_no_mp_shard(model)
# Ensure params and grads are in full precision,
# as after fwd/backward we maintain full precision shards.
for param in model.parameters():
self.assertEqual(param.dtype, full_precision_param_dtype)
if param.grad is not None:
self.assertEqual(param.grad.dtype, full_precision_param_dtype)
# Unscale the gradients and step
scaler.step(optim)
# Update the scale factor
scaler.update()
# Summon full params should be in full precision
with model.summon_full_params(model):
# It is not expected for summon_full_params to allocate
# a mixed precision shard.
if mp_config.param_dtype is not None:
self._validate_mp_shard_freed(model)
else:
self._validate_no_mp_shard(model)
params = list(model.parameters())
for p in params:
self.assertEqual(p.dtype, full_precision_param_dtype)
# Note that buffers are cast only once and only restored
# to the original buffer dtype in state_dict, so
# summon_full_params is not expected to restore buffer
# types to their original.
named_buffers = dict(model.named_buffers())
for v in named_buffers.values():
if mp_config.buffer_dtype is not None:
self.assertEqual(v.dtype, mp_config.buffer_dtype)
else:
self.assertEqual(v.dtype, _BUFFER_ORIG_DTYPE)
# state_dict should be in full precision
state_dict = {k: v.clone() for k, v in model.state_dict().items()}
for name, tensor in state_dict.items():
# Parameters and buffers are checkpointed in their
# original dtypes, which may be different.
if name in named_buffers.keys():
self.assertEqual(tensor.dtype, _BUFFER_ORIG_DTYPE)
else:
self.assertEqual(
tensor.dtype, full_precision_param_dtype,
f"{name}: {tensor.dtype} vs {full_precision_param_dtype}"
)
# After state_dict, buffer's dtype should have been restored
# to the mixed precision one.
for buf in model.buffers():
if mp_config.buffer_dtype is not None:
self.assertEqual(buf.dtype, mp_config.buffer_dtype)
else:
self.assertEqual(buf.dtype, _BUFFER_ORIG_DTYPE)
class TestFSDPMixedPrecisionSharded(TestFSDPMixedPrecision):
@property
def world_size(self):
return 2
def _get_subtest_config(self) -> Dict[str, List[Any]]:
"""Returns a subtest configuration that subtests prefetching settings
together."""
return {
"forward_prefetch": [False, True],
"backward_prefetch": [
None,
BackwardPrefetch.BACKWARD_PRE,
BackwardPrefetch.BACKWARD_POST,
]
}
@skip_if_lt_x_gpu(2)
def test_mixed_precision_no_reshard_after_forward(self):
# Note that we don't exercise all possible different configs so as to
# not increase test TTS too much.
mp = default_mp if not nccl_supports_bf16 else mp_diff_buffer_and_reduce
self._run_test_mixed_precision_e2e(
mp_config=mp,
cpu_offload=CPUOffload(offload_params=True),
backward_prefetch=None,
forward_prefetch=False,
full_precision_param_dtype=torch.float64,
sharding_strategy=ShardingStrategy.SHARD_GRAD_OP,
enable_sharded_grad_scaler=False,
)
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
def test_mixed_precision_e2e_full_shard(
self,
mp_config,
cpu_offload,
full_precision_param_dtype,
enable_sharded_grad_scaler,
):
self.run_subtests(
self._get_subtest_config(),
self._run_test_mixed_precision_e2e,
mp_config=mp_config,
cpu_offload=cpu_offload,
full_precision_param_dtype=full_precision_param_dtype,
sharding_strategy=ShardingStrategy.FULL_SHARD,
enable_sharded_grad_scaler=enable_sharded_grad_scaler,
)
def _test_mixed_precision_embedding_table(self, mp_config):
# Basic test to ensure int inputs are not casted which would break
# modules such as embedding tables.
param_dtype = mp_config.param_dtype or torch.float32
orig_reduce_scatter = dist._reduce_scatter_base
test_reduce_scatter = partial(
self._reduce_scatter_base_validate_mp, orig_reduce_scatter, mp_config,
)
with patch_reduce_scatter(test_reduce_scatter, param_dtype):
# TODO: `test_mp_embedding_reduce()` fails if we do not wrap the
# entire `TransformerWithSharedParams` with a single top-level FSDP
model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.NO_FSDP,
CUDAInitMode.CUDA_BEFORE,
{"mixed_precision": mp_config},
)
fsdp_model = FSDP(model, mixed_precision=mp_config)
optim = torch.optim.SGD(fsdp_model.parameters(), lr=0.1)
for _ in range(6):
inp = fsdp_model.module.get_input(torch.device("cuda"))
# This would fail if we casted integer module inputs such as for
# embedding tables.
output = fsdp_model(*inp)
loss = fsdp_model.module.get_loss(inp, output).cuda()
self.assertEqual(loss.dtype, param_dtype)
fsdp_model.module.run_backward(loss)
optim.step()
@skip_if_lt_x_gpu(2)
def test_mp_embedding_reduce(self):
self._test_mixed_precision_embedding_table(
mp_config=MixedPrecision(reduce_dtype=torch.float16)
)
@skip_if_lt_x_gpu(2)
def test_mp_embedding_only_params_and_bufs(self):
self._test_mixed_precision_embedding_table(
mp_config=MixedPrecision(
param_dtype=torch.float16,
buffer_dtype=torch.float16,
)
)
@skip_if_lt_x_gpu(2)
def test_mp_embedding_default(self):
default_mp_config = MixedPrecision(
param_dtype=torch.float16,
buffer_dtype=torch.float16,
reduce_dtype=torch.float16,
)
self._test_mixed_precision_embedding_table(mp_config=default_mp_config)
@skip_if_lt_x_gpu(2)
def test_mp_embedding_params_and_reduce_diff(self):
params_and_reduce_different = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float32,
buffer_dtype=torch.float16
)
self._test_mixed_precision_embedding_table(mp_config=params_and_reduce_different)
@skip_if_lt_x_gpu(2)
@skipIfNoTorchVision
def test_mixed_precision_resnet(self):
"""
End to end test to ensure mixed precision + auto_wrap works
for ResNet model.
"""
resnet_model = torchvision.models.resnet50().cuda()
resnet_model = nn.SyncBatchNorm.convert_sync_batchnorm(
resnet_model,
process_group=dist.distributed_c10d._get_default_group()
)
n_bn = sum(1 if isinstance(x, _BatchNorm) else 0 for x in resnet_model.modules())
inp = torch.ones(1, 3, 1000, 1000, device='cuda')
mp_config = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
)
fsdp = FSDP(
resnet_model,
auto_wrap_policy=size_based_auto_wrap_policy,
mixed_precision=mp_config
)
# Batchnorm units should be wrapped individually. Validate this by
# ensuring there are equal no. of FSDP units that are BN as BN units
# in original resnet model.
fsdp_bn = 0
for module in fsdp.fsdp_modules(fsdp):
wrapped_module = module.module
if isinstance(wrapped_module, _BatchNorm):
fsdp_bn += 1
self.assertEqual(fsdp_bn, n_bn)
# Would throw type mismatch issue without mixed precision autowrapping.
loss = fsdp(inp).sum()
loss.backward()
@skip_if_lt_x_gpu(2)
@parametrize("convert_sync_bn", [True, False])
def test_mp_batchnorm(self, convert_sync_bn):
class BatchNormNet(nn.Module):
def __init__(self, affine=True):
super(BatchNormNet, self).__init__()
self.fc1 = nn.Linear(2, 40, bias=False)
self.bn = nn.BatchNorm1d(4, affine=affine)
self.fc2 = nn.Linear(40, 4, bias=False)
def forward(self, x):
x = torch.reshape(self.fc1(x), (-1, 4, 10))
x = self.bn(x)
x = torch.reshape(x, (-1, 40))
x = self.fc2(x)
return F.softmax(x, dim=1)
def never_wrap_policy(*args, **kwargs):
return False
net = BatchNormNet().cuda()
if convert_sync_bn:
net = nn.SyncBatchNorm.convert_sync_batchnorm(net)
# FSDP detects that mixed precision + batchnorm will cause issues
# and thus wrap batchnorm in a distinct FSDP unit that does not
# use mixed precision.
mp_config = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
)
with self.assertWarnsRegex(
expected_warning=UserWarning,
expected_regex="BatchNorm units will be wrapped as a separate"
):
model = FSDP(
net,
mixed_precision=mp_config,
auto_wrap_policy=never_wrap_policy,
)
bn = model.bn
self.assertTrue(isinstance(bn, FSDP))
# policy should not have wrapped any other submodules
self.assertFalse(isinstance(model.fc1, FSDP))
self.assertFalse(isinstance(model.fc2, FSDP))
self.assertEqual(None, bn.mixed_precision)
self.assertNotEqual(None, model.mixed_precision)
inp = torch.randn((1, 2), device='cuda')
# Without FSDP BN mixed precision fix, this would result in
# RuntimeError: Expected counts to have type Half but got Float
# for syncBN
model(inp).sum().backward()
class TestFSDPMixedPrecisionUnsharded(TestFSDPMixedPrecision):
"""
Smaller test suite for unshared param (i.e. world_size == 1) case.
"""
@property
def world_size(self):
return 1
@skip_if_lt_x_gpu(1)
def test_mixed_precision_no_reshard_after_forward(self):
# Note that we don't exercise all possible different configs so as to
# not increase test TTS too much.
mp = default_mp if not nccl_supports_bf16 else mp_diff_buffer_and_reduce
self._run_test_mixed_precision_e2e(
mp_config=mp,
cpu_offload=CPUOffload(offload_params=True),
backward_prefetch=None,
forward_prefetch=False,
full_precision_param_dtype=torch.float64,
sharding_strategy=ShardingStrategy.SHARD_GRAD_OP,
enable_sharded_grad_scaler=False,
)
@skip_if_lt_x_gpu(1)
def test_mixed_precision_e2e_full_shard(self):
mp = default_mp if not nccl_supports_bf16 else mp_diff_buffer_and_reduce
self._run_test_mixed_precision_e2e(
mp_config=mp,
cpu_offload=CPUOffload(offload_params=True),
backward_prefetch=None,
forward_prefetch=False,
full_precision_param_dtype=torch.float64,
sharding_strategy=ShardingStrategy.FULL_SHARD,
enable_sharded_grad_scaler=False,
)
instantiate_parametrized_tests(TestFSDPMixedPrecisionSharded)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_mixed_precision.py
|
# Owner(s): ["oncall: distributed"]
import sys
from enum import Enum
import torch
import torch.nn as nn
import torch.optim as optim
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
FSDPTest,
get_full_params,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class Model(nn.Module):
def __init__(self, with_fsdp, freeze_after_wrap_fsdp):
super().__init__()
self.trunk = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
)
self.head = nn.Linear(64, 10)
if with_fsdp and freeze_after_wrap_fsdp:
self.fsdp_wrap()
def fsdp_wrap(self):
self.trunk = FSDP(self.trunk)
self.head = FSDP(self.head)
def forward(self, x):
return self.head(self.trunk(x))
class NestedTrunkModel(nn.Module):
def __init__(self, with_fsdp, freeze_after_wrap_fsdp):
super().__init__()
self.trunk = nn.Sequential(
self._create_block(3, 64, with_fsdp, freeze_after_wrap_fsdp),
self._create_block(64, 64, with_fsdp, freeze_after_wrap_fsdp),
)
self.head = nn.Sequential(
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
nn.Linear(64, 10),
)
if with_fsdp and freeze_after_wrap_fsdp:
self.fsdp_wrap()
def fsdp_wrap(self):
for name, child in self.trunk.named_children():
wrapped_child = FSDP(child)
setattr(self.trunk, name, wrapped_child)
self.trunk = FSDP(self.trunk)
self.head = FSDP(self.head)
def forward(self, x):
return self.head(self.trunk(x))
def _create_block(
self, in_channels, out_channels, with_fsdp, freeze_after_wrap_fsdp
):
block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3),
nn.ReLU(inplace=True),
)
return block
class FreezingMethod(str, Enum):
GradToNone = "grad_to_none"
RequiresGrad = "requires_grad"
class TestFreezingWeights(FSDPTest):
def _create_model(self, with_fsdp, with_nested_trunk, freeze_after_wrap_fsdp):
if with_nested_trunk:
model = NestedTrunkModel(with_fsdp, freeze_after_wrap_fsdp)
else:
model = Model(with_fsdp, freeze_after_wrap_fsdp)
return model
def _dist_train(
self, with_nested_trunk, freezing_method, freeze_after_wrap_fsdp, with_fsdp
):
torch.manual_seed(0)
batch = torch.randn(size=(2, 3, 224, 224)).cuda()
model = self._create_model(with_fsdp, with_nested_trunk, freeze_after_wrap_fsdp)
model = model.cuda()
# freezing the trunk using requires_grad.
if freezing_method == FreezingMethod.RequiresGrad:
for param in model.trunk.parameters():
param.requires_grad = False
if with_fsdp:
if not freeze_after_wrap_fsdp:
model.fsdp_wrap()
model = FSDP(model)
else:
model = DistributedDataParallel(model, device_ids=[self.rank])
target = torch.tensor([0, 1], dtype=torch.long).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
for iteration in range(3):
out = model(batch)
fake_loss = criterion(out, target)
optimizer.zero_grad()
fake_loss.backward()
if freezing_method == FreezingMethod.GradToNone:
for param in model.module.trunk.parameters():
param.grad = None
optimizer.step()
if with_fsdp:
return get_full_params(model)
return list(model.parameters())
@skip_if_lt_x_gpu(2)
@parametrize("with_nested_trunk", [True, False])
@parametrize(
"freezing_method", [FreezingMethod.RequiresGrad, FreezingMethod.GradToNone]
)
@parametrize("freeze_after_wrap_fsdp", [True, False])
def test_freezing_weights(
self, with_nested_trunk, freezing_method, freeze_after_wrap_fsdp
):
# DDP
ddp_state = self._dist_train(
with_nested_trunk, freezing_method, freeze_after_wrap_fsdp, with_fsdp=False
)
# FSDP
fsdp_state = self._dist_train(
with_nested_trunk, freezing_method, freeze_after_wrap_fsdp, with_fsdp=True
)
self.assertEqual(
ddp_state,
fsdp_state,
exact_device=True,
msg="FullyShardedDataParallel states didn't match PyTorch DDP states",
)
instantiate_parametrized_tests(TestFreezingWeights)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_freezing_weights.py
|
# Owner(s): ["oncall: distributed"]
import sys
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
FSDPInitMode,
FSDPTest,
NestedWrappedModule,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestTraversal(FSDPTest):
@property
def world_size(self):
return 2
@skip_if_lt_x_gpu(2)
def test_fsdp_modules(self):
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_BEFORE,
)
modules = FSDP.fsdp_modules(nested_wrapped_module)
self.assertEquals(
modules, [
nested_wrapped_module.module.get_submodule("1"),
nested_wrapped_module.module.get_submodule("1").get_submodule("0"),
nested_wrapped_module.module.get_submodule("2"),
]
)
modules = FSDP.fsdp_modules(nested_wrapped_module, root_only=True)
self.assertEqual(
modules, [
nested_wrapped_module.module.get_submodule("1"),
nested_wrapped_module.module.get_submodule("2"),
]
)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_traversal.py
|
# Owner(s): ["oncall: distributed"]
import sys
from contextlib import suppress
from enum import Enum, auto
from typing import Optional
from unittest.mock import patch
import torch
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
FSDPInitMode,
FSDPTest,
NestedWrappedModule,
TransformerWithSharedParams,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class PassType(Enum):
__order__ = "FWD BWD"
FWD = auto()
BWD = auto()
class TestCommunication(FSDPTest):
"""Tests ``FullyShardedDataParallel``'s collective communication usage."""
def _init_model(
self,
nested_model: bool,
sharding_strategy: ShardingStrategy,
device: torch.device,
):
fsdp_kwargs = {"sharding_strategy": sharding_strategy}
if nested_model:
model = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_AFTER,
fsdp_kwargs,
)
fsdp_model: FSDP = FSDP(
model,
self.process_group,
**fsdp_kwargs,
).to(device)
else:
fsdp_model: FSDP = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_BEFORE,
fsdp_kwargs,
)
return fsdp_model
def _run_iter(self, fsdp_model, batch, use_no_sync: bool):
"""Runs an iteration inside or outside the ``no_sync()`` context."""
context = fsdp_model.no_sync() if use_no_sync else suppress()
with context:
output = fsdp_model(*batch)
loss = fsdp_model.module.get_loss(batch, output)
loss.backward()
def _get_ref_num_reduce_scatters(
self,
num_fsdp: int,
in_no_sync: bool,
) -> int:
"""Returns the reference number of reduce-scatters for an iteration
in the ``no_sync()`` context."""
return num_fsdp if not in_no_sync else 0
def _get_ref_num_all_gathers(
self,
num_fsdp: int,
sharding_strategy: Optional[ShardingStrategy],
is_first_iter: bool,
is_last_iter_no_sync: bool,
) -> int:
"""Returns the reference number of all-gathers in an iteration, summing
over the forward and backward passes."""
return sum(
self._get_ref_num_all_gathers_in_pass(
num_fsdp,
sharding_strategy,
pass_type,
is_first_iter,
is_last_iter_no_sync,
) for pass_type in PassType
)
def _get_ref_num_all_gathers_in_pass(
self,
num_fsdp: int,
sharding_strategy: Optional[ShardingStrategy],
pass_type: PassType,
is_first_iter: bool,
is_last_iter_no_sync: bool,
):
"""Returns the reference number of all-gathers for a given setting."""
if sharding_strategy is None:
sharding_strategy = ShardingStrategy.FULL_SHARD # default
# Forward pass:
if pass_type == PassType.FWD and \
sharding_strategy == ShardingStrategy.SHARD_GRAD_OP and \
is_last_iter_no_sync:
# Modules do not free the full parameters in the last
# iteration's backward pass if it was in `no_sync()`
num_all_gathers = 0
elif pass_type == PassType.FWD:
# Otherwise, all modules all-gather the full parameters in the
# forward pass
num_all_gathers = num_fsdp
# Backward pass:
elif pass_type == PassType.BWD and \
sharding_strategy == ShardingStrategy.FULL_SHARD:
# Root does not free the full parameters at the end of the
# forward pass
num_all_gathers = num_fsdp - 1
elif pass_type == PassType.BWD and \
sharding_strategy == ShardingStrategy.SHARD_GRAD_OP:
# Modules do not free the full parameters at the end of the
# forward pass
num_all_gathers = 0
else:
assert 0, f"Unsupported: add a branch for pass_type={pass_type} " \
f"is_first_iter={is_first_iter} " \
f"is_last_iter_no_sync={is_last_iter_no_sync} " \
f"sharding_strategy={sharding_strategy}"
if is_first_iter and pass_type == PassType.FWD:
# With execution order validation, on the first iteration, we have
# an additional all-gather before every actual all-gather in the
# forward pass
num_all_gathers *= 2
return num_all_gathers
def _print_ref_num_all_gathers_in_pass(
self,
num_fsdp: int,
sharding_strategy: ShardingStrategy,
pass_type: PassType,
is_first_iter: bool,
is_last_iter_no_sync: bool,
):
"""Helper method for printing the number of all-gathers for a specific
setting. This may be helpful since the branching is complex."""
if self.rank != 0:
return # only print on one rank
num_all_gathers = self._get_ref_num_all_gathers_in_pass(
num_fsdp, sharding_strategy, pass_type, is_first_iter,
is_last_iter_no_sync,
)
print(
f"Pass: {pass_type}\n"
f"Is First Iteration: {is_first_iter}\n"
f"Sharding Strategy: {sharding_strategy}\n"
f"Last iteration in `no_sync()`: {is_last_iter_no_sync}\n"
f"Number of all-gathers: {num_all_gathers}"
)
@skip_if_lt_x_gpu(2)
@parametrize("nested_model", [False, True])
@parametrize("use_no_sync", [False, True])
@parametrize("sharding_strategy", [ShardingStrategy.SHARD_GRAD_OP, None])
def test_communication(
self,
nested_model: bool,
use_no_sync: bool,
sharding_strategy: Optional[ShardingStrategy],
):
"""
Tests FSDP's communication cost in terms of calls to collective
communication primitives (i.e. all-gather and reduce-scatter).
Arguments:
nested_model (bool): If ``True``, uses ``NestedWrappedModule``,
which has nested FSDP instances; if ``False``, uses the default
model, which does not have nested FSDP instances.
use_no_sync (bool): If ``True``, runs some iterations inside the
``no_sync()`` context manager to accumulate gradients, followed
by some iterations outside the context manager; if ``False``,
only runs some iterations outside the context manager.
sharding_strategy (Optional[ShardingStrategy]): Configures the
FSDP algorithm.
"""
# Initialize the model and inputs
device = torch.device("cuda")
fsdp_model = self._init_model(nested_model, sharding_strategy, device)
batch = fsdp_model.module.get_input(device)
# Count the number of FSDP instances that manage parameters since the
# number of collectives are a function of this number
num_fsdp = sum(
(isinstance(m, FSDP) and len(m.params) > 0)
for m in fsdp_model.modules()
)
# If `use_no_sync=True`, we run `num_iters` iterations inside
# `no_sync()` followed by `num_iters` iterations outside `no_sync()`,
# and if `use_no_sync=False`, we only run `num_iters` iterations
# outside `no_sync()`
num_iters = 3
with patch("torch.distributed._all_gather_base") as mock_all_gather, \
patch("torch.distributed._reduce_scatter_base") as mock_reduce_scatter:
def reset_mocks():
mock_all_gather.reset_mock()
mock_reduce_scatter.reset_mock()
# Check the communication cost when using `no_sync()`
if use_no_sync:
for i in range(num_iters):
reset_mocks()
self._run_iter(fsdp_model, batch, use_no_sync=True)
num_all_gathers = mock_all_gather.call_count
num_reduce_scatters = mock_reduce_scatter.call_count
ref_num_all_gathers = self._get_ref_num_all_gathers(
num_fsdp, sharding_strategy, is_first_iter=i == 0,
is_last_iter_no_sync=i > 0,
)
ref_num_reduce_scatters = self._get_ref_num_reduce_scatters(
num_fsdp, in_no_sync=True,
)
self.assertEqual(num_all_gathers, ref_num_all_gathers)
self.assertEqual(num_reduce_scatters, ref_num_reduce_scatters)
# Check the normal communication cost (when not using `no_sync()`)
for i in range(num_iters):
reset_mocks()
self._run_iter(fsdp_model, batch, use_no_sync=False)
num_all_gathers = mock_all_gather.call_count
num_reduce_scatters = mock_reduce_scatter.call_count
ref_num_all_gathers = self._get_ref_num_all_gathers(
num_fsdp, sharding_strategy,
is_first_iter=not use_no_sync and i == 0,
is_last_iter_no_sync=use_no_sync and i == 0,
)
ref_num_reduce_scatters = self._get_ref_num_reduce_scatters(
num_fsdp, in_no_sync=False,
)
self.assertEqual(num_all_gathers, ref_num_all_gathers)
self.assertEqual(num_reduce_scatters, ref_num_reduce_scatters)
instantiate_parametrized_tests(TestCommunication)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_comm.py
|
# Owner(s): ["oncall: distributed"]
import torch
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardMetadata,
)
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
)
from torch.distributed.distributed_c10d import _get_default_group
from torch.distributed.fsdp.shard_utils import (
_offsets_to_split_sizes,
_reshard_flatten_tensor,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import TestCase
class TestShardUtils(TestCase):
def test_offsets_to_split_sizes(self):
tensor_numel = 40
def _get_and_check_split_sizes(
world_size,
in_offsets,
out_offsets,
in_split_sizes,
):
for my_rank in range(world_size):
_in_split_sizes = in_split_sizes[my_rank]
_out_split_sizes = [
in_split_sizes[i][my_rank] for i in range(world_size)
]
res_in_split_sizes, res_out_split_sizes = _offsets_to_split_sizes(
in_offsets, out_offsets, tensor_numel, world_size, my_rank
)
self.assertEqual(_in_split_sizes, res_in_split_sizes)
self.assertEqual(_out_split_sizes, res_out_split_sizes)
# The tensor size can be evenly divided by the world size.
world_size = 4
in_offsets = [0, 10, 20, 30]
out_offsets = [0, 10, 20, 30]
in_split_sizes = [
[10, 0, 0, 0],
[0, 10, 0, 0],
[0, 0, 10, 0],
[0, 0, 0, 10],
]
_get_and_check_split_sizes(world_size, in_offsets, out_offsets, in_split_sizes)
world_size = 4
in_offsets = [0, 3, 17, 18]
out_offsets = [0, 10, 20, 30]
in_split_sizes = [
[3, 0, 0, 0],
[7, 7, 0, 0],
[0, 1, 0, 0],
[0, 2, 10, 10],
]
_get_and_check_split_sizes(world_size, in_offsets, out_offsets, in_split_sizes)
world_size = 4
in_offsets = [0, 10, 20, 30]
out_offsets = [0, 3, 17, 18]
in_split_sizes = [
[3, 7, 0, 0],
[0, 7, 1, 2],
[0, 0, 0, 10],
[0, 0, 0, 10],
]
_get_and_check_split_sizes(world_size, in_offsets, out_offsets, in_split_sizes)
world_size = 4
in_offsets = [0, 7, 11, 25]
out_offsets = [0, 10, 17, 18]
in_split_sizes = [
[7, 0, 0, 0],
[3, 1, 0, 0],
[0, 6, 1, 7],
[0, 0, 0, 15],
]
_get_and_check_split_sizes(world_size, in_offsets, out_offsets, in_split_sizes)
# The tensor size cannot be evenly divided by the world size.
world_size = 6
in_offsets = [0, 7, 14, 21, 28, 35]
out_offsets = [0, 7, 14, 21, 28, 35]
in_split_sizes = [
[7, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 7, 0, 0, 0],
[0, 0, 0, 7, 0, 0],
[0, 0, 0, 0, 7, 0],
[0, 0, 0, 0, 0, 5],
]
_get_and_check_split_sizes(world_size, in_offsets, out_offsets, in_split_sizes)
world_size = 6
in_offsets = [0, 0, 10, 11, 28, 40]
out_offsets = [0, 7, 14, 21, 28, 35]
in_split_sizes = [
[0, 0, 0, 0, 0, 0],
[7, 3, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 3, 7, 7, 0, 0],
[0, 0, 0, 0, 7, 5],
[0, 0, 0, 0, 0, 0],
]
_get_and_check_split_sizes(world_size, in_offsets, out_offsets, in_split_sizes)
class TestShardUtilsDistributed(FSDPTest):
@property
def world_size(self):
return 2
def _create_local_chunk(self, tensor):
chunk = tensor.chunk(2)[self.rank]
offsets = [0] if self.rank == 0 else [tensor.shape[0] - chunk.shape[0]]
shard = Shard.from_tensor_and_offsets(chunk, offsets, self.rank)
return init_from_local_shards([shard], tensor.numel())
def _create_enumerate_spec(self, tensor):
# Since placement is not used, always set placement to rank0 to mimic
# the actual usage.
metadata = [
ShardMetadata([0], [101], placement="rank0/cuda:0"),
ShardMetadata([101], [900], placement="rank0/cuda:0"),
]
return EnumerableShardingSpec(metadata)
def _create_chunk_spec(self):
return ChunkShardingSpec(dim=0, placements=["rank0/cuda:0"])
def _create_tensor(self):
# Keep everything deterministic.
torch.manual_seed(0)
return torch.rand(1001).cuda()
@skip_if_lt_x_gpu(2)
def test_reshard_flatten_tensor(self):
def get_offsets(tensor, shard):
if self.rank == 0:
return [0]
else:
return [tensor.shape[0] - shard.shape[0]]
tensor = self._create_tensor()
shard = _reshard_flatten_tensor(
self._create_local_chunk(tensor),
self._create_enumerate_spec(tensor),
self.world_size,
self.rank,
tensor.device,
_get_default_group(),
)
offsets = [0] if self.rank == 0 else [tensor.shape[0] - shard.shape[0]]
shard = Shard.from_tensor_and_offsets(shard, offsets, self.rank)
uneven_sharded_tensor = init_from_local_shards([shard], tensor.numel())
shard = _reshard_flatten_tensor(
uneven_sharded_tensor,
self._create_chunk_spec(),
self.world_size,
self.rank,
tensor.device,
_get_default_group(),
)
offsets = [0] if self.rank == 0 else [tensor.shape[0] - shard.shape[0]]
shard = Shard.from_tensor_and_offsets(shard, offsets, self.rank)
even_sharded_tensor = init_from_local_shards([shard], tensor.numel())
output = torch.empty(tensor.shape).cuda() if self.rank == 0 else None
even_sharded_tensor.gather(0, output)
if self.rank == 0:
self.assertEqual(tensor, output)
output = torch.empty(tensor.shape).cuda() if self.rank == 0 else None
uneven_sharded_tensor.gather(0, output)
if self.rank == 0:
self.assertEqual(tensor, output)
|
pytorch-master
|
test/distributed/fsdp/test_shard_utils.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
FSDPTest,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.utils.checkpoint import checkpoint
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
def get_cur_mem(rank, result, prefix):
"""Collect memory allocated values in a result dict in MB"""
result[prefix] = round(torch.cuda.memory_allocated() / 1024 / 1024)
class Model(nn.Module):
def __init__(self, hidden_dim, with_fsdp=False, with_checkpoint=False):
super().__init__()
if with_fsdp:
self.stem = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3),
FSDP(nn.BatchNorm2d(64)),
nn.ReLU(inplace=True),
)
else:
self.stem = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
if with_fsdp:
self.blocks = nn.Sequential(
nn.Conv2d(64, hidden_dim, kernel_size=5, padding=2),
FSDP(nn.BatchNorm2d(hidden_dim)),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
FSDP(nn.BatchNorm2d(hidden_dim)),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
FSDP(nn.BatchNorm2d(hidden_dim)),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
)
else:
self.blocks = nn.Sequential(
nn.Conv2d(64, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
)
self.head = nn.Linear(hidden_dim, 10)
self.with_checkpoint = with_checkpoint
def forward(self, x):
if self.with_checkpoint:
return self.head(checkpoint(self.blocks, self.stem(x)))
else:
return self.head(self.blocks(self.stem(x)))
def create_model(with_fsdp, with_checkpoint, model_hidden_dim):
torch.manual_seed(0)
model = Model(model_hidden_dim, with_fsdp, with_checkpoint)
if with_fsdp:
model.stem = FSDP(model.stem)
model.blocks = FSDP(model.blocks)
model.head = FSDP(model.head)
return model
class TestFSDPMemory(FSDPTest):
@property
def world_size(self):
return 2
def _dist_train(self, with_checkpoint, expected, model_hidden_dim, iterations):
gpu_id = self.rank
world_size = self.world_size
batch = torch.randn(size=(2, 3, 224, 224)).cuda()
model = create_model(
with_fsdp=True,
with_checkpoint=with_checkpoint,
model_hidden_dim=model_hidden_dim,
)
model = model.cuda()
model = FSDP(model)
# We enable momentum so that after the first iteration, the optimizer state is added
# to the total memory used.
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
results = {} # results of memory stats
for iteration in range(iterations):
get_cur_mem(gpu_id, results, f"iter {iteration}: start")
out = model(batch)
get_cur_mem(gpu_id, results, f"iter {iteration}: after fwd")
out = sum(o.sum() for o in out[0])
fake_loss = criterion(out, torch.tensor(0.0).cuda())
get_cur_mem(gpu_id, results, f"iter {iteration}: after loss")
fake_loss.backward()
get_cur_mem(gpu_id, results, f"iter {iteration}: after bwd")
optimizer.step()
get_cur_mem(gpu_id, results, f"iter {iteration}: after step")
# It is important to use `set_to_none` below, not optimizer.zero_grad() to reclaim memory.
model.zero_grad(set_to_none=True)
get_cur_mem(gpu_id, results, f"iter {iteration}: done")
def cmp(results, expected):
ret = ""
self.assertEqual(results.keys(), expected.keys())
for k, v in results.items():
exp = expected[k]
if abs(exp - v) > 1: # allow 1MB rounding differences
ret += f"{k}: got {v}, expected {exp}\n"
return ret
output = cmp(results, expected)
self.assertEqual(output, "")
@skip_if_lt_x_gpu(2)
@parametrize("ckpt", ["no_ckpt", "ckpt"])
def test_fsdp_memory(self, ckpt):
# hidden_dim 128: model size ~4MB
model_hidden_dim = 128
model = create_model(
with_fsdp=False, with_checkpoint=False, model_hidden_dim=model_hidden_dim
).cuda()
model_size_mb = round(torch.cuda.memory_allocated() / 1024 / 1024)
del model
sharded_model_size_mb = int(model_size_mb / self.world_size)
# We have observed that sometimes after 3rd iteration, 4th one can fail (not on this
# test but on much bigger scale tests). We run 4 iterations here just in case it happens.
iterations = 4
expected = {}
for iteration in range(iterations):
if iteration == 0:
# sharded model size + 1MB temp memory
expected[f"iter {iteration}: start"] = sharded_model_size_mb + 1
# it is hard to calculate this memory size, get it from printed memory usage
if ckpt == "ckpt":
expected[f"iter {iteration}: after fwd"] = 51
expected[f"iter {iteration}: after loss"] = 51
else:
expected[f"iter {iteration}: after fwd"] = 340
expected[f"iter {iteration}: after loss"] = 340
# sharded model size + sharded grad size + 1M temp memory
expected[f"iter {iteration}: after bwd"] = 2 * sharded_model_size_mb + 1
else:
# after optimizer step in the first iteraiton, memory usage increased by
# sharded_model_size_mb becasue of increased optimizer states memory usage
expected[f"iter {iteration}: start"] = 2 * sharded_model_size_mb + 1
if ckpt == "ckpt":
expected[f"iter {iteration}: after fwd"] = (
51 + sharded_model_size_mb
)
expected[f"iter {iteration}: after loss"] = (
51 + sharded_model_size_mb
)
else:
expected[f"iter {iteration}: after fwd"] = (
340 + sharded_model_size_mb
)
expected[f"iter {iteration}: after loss"] = (
340 + sharded_model_size_mb
)
expected[f"iter {iteration}: after bwd"] = 3 * sharded_model_size_mb + 1
# sharded model size + sharded grad size + optimizer states + 1M temp memory
expected[f"iter {iteration}: after step"] = 3 * sharded_model_size_mb + 1
# grad memory is claimed after setting grad = None
# sharded model size + optimizer states + 1M temp memory
expected[f"iter {iteration}: done"] = 2 * sharded_model_size_mb + 1
# Get the fsdp and checkpoint flags.
with_ckpt = ckpt == "ckpt"
self._dist_train(
with_ckpt,
expected,
model_hidden_dim,
iterations,
)
instantiate_parametrized_tests(TestFSDPMemory)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_memory.py
|
# Owner(s): ["oncall: distributed"]
import sys
import unittest
import torch
from torch import distributed as dist
from torch.distributed.fsdp.flat_param import FlatParamShardMetadata
from torch.distributed.fsdp.flatten_params_wrapper import FlattenParamsWrapper
from torch.testing._internal.common_utils import TestCase, run_tests
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
class TestFlattenParams(TestCase):
"""Base test class and used for CPU case."""
def _get_empty_module(self, seed=0):
torch.manual_seed(seed) # keep everything deterministic
class Test(torch.nn.Module):
def forward(self, x):
return x + 1
module = Test()
def get_input(device, dtype):
torch.manual_seed(1) # keep everything deterministic
return torch.rand(1).to(device=device, dtype=dtype)
module.get_input = get_input
return module
def _get_transformer(self, seed=0):
torch.manual_seed(seed) # keep everything deterministic
module = torch.nn.Transformer(
d_model=32,
num_encoder_layers=2,
num_decoder_layers=2,
dim_feedforward=128,
dropout=0.1,
)
module.register_buffer("dummy_buffer", torch.tensor(1.0))
def get_input(device, dtype):
torch.manual_seed(1) # keep everything deterministic
src = torch.rand(20, 8, 32).to(device=device, dtype=dtype) # T x B x C
tgt = torch.rand(10, 8, 32).to(device=device, dtype=dtype) # T x B x C
return (src, tgt)
module.get_input = get_input
return module
def _get_shared_params_transformer(self, seed=0):
module = self._get_transformer(seed=seed)
# share the FFNs
for enc_layer, dec_layer in zip(module.encoder.layers, module.decoder.layers):
dec_layer.linear1.weight = enc_layer.linear1.weight
dec_layer.linear2.weight = enc_layer.linear2.weight
return module
def _get_output(self, module):
device = next(module.parameters()).device
dtype = next(module.parameters()).dtype
input = module.get_input(device, dtype)
return module(*input)
def _get_pnorm_after_step(self, module):
optim = torch.optim.SGD(module.parameters(), lr=0.01)
loss = self._get_output(module).sum()
loss.backward()
optim.step()
return torch.norm(torch.stack([p.detach().norm() for p in module.parameters()]))
def _test_num_params(self, module):
ref_num_params = sum(p.numel() for p in module.parameters())
params_to_flatten = list(module.parameters())
flat_module = FlattenParamsWrapper(module, params_to_flatten)
flat_num_params = sum(p.numel() for p in flat_module.parameters())
self.assertEqual(ref_num_params, flat_num_params)
self.assertEqual(flat_num_params, flat_module.flat_param.numel())
def _test_output(self, module):
ref_output = self._get_output(module)
params_to_flatten = list(module.parameters())
flat_module = FlattenParamsWrapper(module, params_to_flatten)
flat_output = self._get_output(flat_module)
self.assertEqual(ref_output, flat_output)
def test_partial_flattening(self):
module = self._get_transformer()
num_params = sum(p.numel() for p in module.parameters())
params_to_flatten = list(module.encoder.layers[1].parameters()) + list(
module.decoder.layers[0].parameters()
)
num_params_to_flatten = sum(p.numel() for p in params_to_flatten)
module = FlattenParamsWrapper(module, params_to_flatten)
self.assertEqual(module.flat_param.numel(), num_params_to_flatten)
self.assertEqual(sum(p.numel() for p in module.parameters()), num_params)
# flattened parameters are removed
self.assertEqual(len(list(module.encoder.layers[1].parameters())), 0)
self.assertEqual(len(list(module.decoder.layers[0].parameters())), 0)
# non-flattened parameters remain
self.assertGreater(len(list(module.encoder.layers[0].parameters())), 0)
self.assertGreater(len(list(module.decoder.layers[1].parameters())), 0)
# test that changing the module dtype works properly
orig_dtype = params_to_flatten[0].dtype
new_dtype = torch.float32 if orig_dtype == torch.float16 else torch.float16
self.assertEqual(module.flat_param.dtype, orig_dtype)
self.assertTrue(
all(p.dtype == orig_dtype for p in module.encoder.layers[0].parameters())
)
module = module.to(dtype=new_dtype)
self.assertEqual(module.flat_param.dtype, new_dtype)
self.assertTrue(
all(p.dtype == new_dtype for p in module.encoder.layers[0].parameters())
)
def test_flatten_nothing(self):
module = self._get_transformer()
module = FlattenParamsWrapper(module, [])
self.assertIsNone(module.flat_param)
def test_empty_module(self):
module = self._get_empty_module()
in_data = torch.rand(1)
ref_out = module(in_data)
module = FlattenParamsWrapper(module, [])
self.assertEqual(len(list(module.parameters())), 0)
self.assertIsNone(module.flat_param)
fpw_out = module(in_data)
self.assertEqual(ref_out, fpw_out)
def test_num_params(self):
module = self._get_transformer()
self._test_num_params(module)
def test_shared_params_num_params(self):
module = self._get_shared_params_transformer()
self._test_num_params(module)
def test_output(self):
module = self._get_transformer()
self._test_output(module)
def test_shared_params_output(self):
module = self._get_shared_params_transformer()
self._test_output(module)
def test_shared_params_pnorm_after_step(self):
# incorrect parameter sharing is likely to cause problems after an
# optimization step
module = self._get_shared_params_transformer()
ref_pnorm_after_step = self._get_pnorm_after_step(module)
module = self._get_shared_params_transformer() # recreate
params_to_flatten = list(module.parameters())
flat_module = FlattenParamsWrapper(module, params_to_flatten)
flat_pnorm_after_step = self._get_pnorm_after_step(flat_module)
self.assertEqual(ref_pnorm_after_step, flat_pnorm_after_step)
def test_sharded_flat_param(self):
module = torch.nn.Sequential(
torch.nn.Linear(10, 10, bias=False),
torch.nn.ReLU(),
torch.nn.Linear(10, 10, bias=False),
torch.nn.ReLU(),
torch.nn.Linear(10, 10, bias=False),
torch.nn.ReLU(),
)
params_to_flatten = list(module.parameters())
flat_module = FlattenParamsWrapper(module, params_to_flatten)
flat_param_handle = flat_module.handle
def _test(kwargs, expected):
"""
Tests the subroutine ``_get_shard_metadata()`` that computes shard
metadata based on start and end indices in the unsharded flattened
parameter.
We manually set the relevant attributes on the flattened parameter
to be able to check the effect of ``_get_shard_metadata()`` via
``shard_metadata()`` since normally the attributes are set in
``init_shard_info()`` with the start and end indices fixed based on
rank and world size.
"""
flat_param = flat_module.flat_param
flat_param._is_sharded = True
flat_param._shard_param_offsets, flat_param._shard_indices = \
flat_param_handle._get_shard_metadata(kwargs["start"], kwargs["end"])
self.assertEqual(
flat_param_handle.shard_metadata(),
expected,
msg=f"{flat_param_handle.shard_metadata()}, {expected}",
)
_test(
kwargs={"start": 0, "end": 0},
expected=FlatParamShardMetadata(
param_names=["0.weight"],
param_shapes=[(10, 10)],
param_numels=[100],
param_offsets=[(0, 0)],
),
)
_test(
kwargs={"start": 0, "end": 50},
expected=FlatParamShardMetadata(
param_names=["0.weight"],
param_shapes=[(10, 10)],
param_numels=[100],
param_offsets=[(0, 50)],
),
)
_test(
kwargs={"start": 0, "end": 99},
expected=FlatParamShardMetadata(
param_names=["0.weight"],
param_shapes=[(10, 10)],
param_numels=[100],
param_offsets=[(0, 99)],
),
)
_test(
kwargs={"start": 50, "end": 149},
expected=FlatParamShardMetadata(
param_names=["0.weight", "2.weight"],
param_shapes=[(10, 10), (10, 10)],
param_numels=[100, 100],
param_offsets=[(50, 99), (0, 49)],
),
)
_test(
kwargs={"start": 50, "end": 199},
expected=FlatParamShardMetadata(
param_names=["0.weight", "2.weight"],
param_shapes=[(10, 10), (10, 10)],
param_numels=[100, 100],
param_offsets=[(50, 99), (0, 99)],
),
)
_test(
kwargs={"start": 99, "end": 199},
expected=FlatParamShardMetadata(
param_names=["0.weight", "2.weight"],
param_shapes=[(10, 10), (10, 10)],
param_numels=[100, 100],
param_offsets=[(99, 99), (0, 99)],
),
)
_test(
kwargs={"start": 100, "end": 199},
expected=FlatParamShardMetadata(
param_names=["2.weight"],
param_shapes=[(10, 10)],
param_numels=[100],
param_offsets=[(0, 99)],
),
)
_test(
kwargs={"start": 100, "end": 299},
expected=FlatParamShardMetadata(
param_names=["2.weight", "4.weight"],
param_shapes=[(10, 10), (10, 10)],
param_numels=[100, 100],
param_offsets=[(0, 99), (0, 99)],
),
)
_test(
kwargs={"start": 100, "end": 1000},
expected=FlatParamShardMetadata(
param_names=["2.weight", "4.weight"],
param_shapes=[(10, 10), (10, 10)],
param_numels=[100, 100],
param_offsets=[(0, 99), (0, 99)],
),
)
_test(
kwargs={"start": 299, "end": 299},
expected=FlatParamShardMetadata(
param_names=["4.weight"],
param_shapes=[(10, 10)],
param_numels=[100],
param_offsets=[(99, 99)],
),
)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestFlattenParamsCUDA(TestFlattenParams):
def _get_transformer(self, seed=0):
module = super()._get_transformer(seed=seed)
return module.cuda()
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestFlattenParamsCUDAHalf(TestFlattenParams):
def _get_transformer(self, seed=0):
module = super()._get_transformer(seed=seed)
return module.cuda().half()
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_flatten_params_wrapper.py
|
# Owner(s): ["oncall: distributed"]
import functools
import sys
from collections import namedtuple
from contextlib import suppress
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed.fsdp import FlatParameter
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import ShardingStrategy, CPUOffload
from torch.distributed.fsdp.wrap import (
always_wrap_policy,
transformer_auto_wrap_policy,
)
from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
FSDPInitMode,
FSDPTest,
NestedWrappedModule,
TransformerWithSharedParams,
_assert_module_states,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestFSDPMisc(FSDPTest):
@property
def world_size(self):
return 2
@property
def process_group(self):
return dist.distributed_c10d._get_default_group()
@skip_if_lt_x_gpu(2)
def test_fsdp_namedtuple(self):
# Ensure namedtuple support, preventing issues such as
# https://github.com/pytorch/pytorch/issues/83053
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(100, 100)
def forward(self, x):
return x
m = MyModule().cuda()
m = FSDP(m)
t = torch.ones(1, device="cuda", requires_grad=True)
MyOutputType = namedtuple(
"MyOutputType",
["a", "b", "c", "d"],
defaults=(t, t, t, t)
)
inp = MyOutputType()
out = m(inp)
# Ensure hooks are registered
for x in out:
self.assertNotEqual([], list(x._backward_hooks.values()))
# TODO: we should check backward() and param is resharded
# as well, but this is blocked by
# https://github.com/pytorch/pytorch/issues/83107 and
# https://github.com/pytorch/pytorch/issues/83129
@skip_if_lt_x_gpu(2)
@parametrize("use_second_layer", [True, False])
@parametrize("sharding_strategy", [ShardingStrategy.NO_SHARD, None])
def test_fsdp_module_no_compute_grad(self, use_second_layer, sharding_strategy):
# When use_second_layer=True, b is involved in forward computation but does
# not receive grad in backward. Otherwise, b is not involved in forward
# computation.
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Linear(10, 10)
self.b = nn.Linear(10, 10)
def forward(self, x, y):
out1 = self.a(x)
if use_second_layer:
out2 = self.b(y)
return out1, out2
else:
return out1
fsdp = FSDP(
MyModel().cuda(),
sharding_strategy=sharding_strategy,
auto_wrap_policy=always_wrap_policy
)
x = torch.randn(10, 10, device='cuda')
y = torch.randn(10, 10, device='cuda')
for i in range(4):
if use_second_layer:
a, b = fsdp(x, y)
else:
a = fsdp(x, y)
loss = a.sum()
loss.backward()
# self.a receives grad, self.b does not
a_grad = fsdp.module.a._fsdp_wrapped_module.flat_param.grad
b_grad = fsdp.module.b._fsdp_wrapped_module.flat_param.grad
self.assertIsNotNone(a_grad)
self.assertIsNone(b_grad)
@skip_if_lt_x_gpu(2)
def test_device_id_auto_wrap(self):
"""Tests that ``auto_wrap_policy`` propagates ``device_id`` to all
nested FSDP instances."""
auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls={TransformerEncoderLayer, TransformerDecoderLayer},
)
fsdp_kwargs = {
"auto_wrap_policy": auto_wrap_policy,
"device_id": torch.cuda.current_device(),
}
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_BEFORE,
fsdp_kwargs,
)
for fsdp_module in FSDP.fsdp_modules(fsdp_model):
self.assertEqual(
fsdp_module.device_id,
torch.device("cuda", torch.cuda.current_device()),
)
@skip_if_lt_x_gpu(2)
def test_fsdp_device_id_cpu_offload(self):
"""
Ensures that even if device_id is specified but we have
CPU offload, module is on CPU after init.
"""
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Linear(10, 10)
self.b = nn.Linear(10, 10)
def forward(self, x):
return self.b(self.a(x))
model = MyModel()
fsdp = FSDP(
model,
auto_wrap_policy=always_wrap_policy,
cpu_offload=CPUOffload(offload_params=True),
device_id=torch.cuda.current_device()
)
cpu_device = torch.device("cpu")
for fsdp_unit in FSDP.fsdp_modules(fsdp):
# This FSDP unit may not directly manage
# any parameters.
if len(fsdp_unit.params) > 0:
fsdp_param = fsdp_unit.params[0]
self.assertEqual(fsdp_param.device, cpu_device)
@skip_if_lt_x_gpu(2)
@parametrize("use_index", [True, False])
def test_fsdp_device_id(self, use_index):
"""
Tests the FSDP ``device_id`` argument:
- Wrapping a CPU module should move the module to the GPU matching
``device_id``
- Wrapping a GPU module already on the GPU matching ``device_id``
should not raise an error
- Wrapping a GPU module already on GPU and passing a GPU device
without specifying a device ID (i.e. ``torch.device("cuda")``) warns
"""
dev_id = (
torch.cuda.current_device() if use_index
else torch.device("cuda", torch.cuda.current_device())
)
def _check_device_matches(module, device_id):
"""Checks that the ``FlatParameter``s in ``module`` have device
matching ``device_id``."""
devices = {
p.device for p in module.parameters()
if isinstance(p, FlatParameter)
}
assert len(devices) > 0
self.assertEqual(1, len(devices))
found_device = devices.pop()
if use_index and not isinstance(device_id, torch.device):
device = torch.device("cuda", device_id)
else:
device = device_id
self.assertEqual(found_device, device)
# Check that FSDP parameters are moved to `device_id` for a CPU module
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_NEVER,
fsdp_kwargs={"device_id": dev_id},
)
_check_device_matches(nested_wrapped_module, dev_id)
# Check that specifying `device_id` for a GPU module already on that
# device does not raise an error
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_BEFORE,
fsdp_kwargs={"device_id": dev_id},
)
_check_device_matches(nested_wrapped_module, dev_id)
# Check that passing in `torch.device("cuda")` for a GPU module warns
regex = "does not have explicit index"
context = self.assertWarnsRegex(
expected_warning=UserWarning, expected_regex=regex
)
with context:
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_BEFORE,
fsdp_kwargs={"device_id": torch.device("cuda")}
)
_check_device_matches(
nested_wrapped_module,
torch.device("cuda", torch.cuda.current_device())
)
@skip_if_lt_x_gpu(2)
def test_module_device_mismatches_device_id(self):
"""Tests that specifying a ``device_id`` argument to FSDP for a GPU
module that does not match the GPU device ID raises an error."""
context = (
self.assertRaisesRegex(
RuntimeError,
f"on rank {self.rank}.*cuda:0, but is on cuda:{self.rank}"
) if self.rank != 0 else suppress()
)
with context:
NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
# Move wrapped modules to CUDA before wrapping with FSDP
cuda_init_mode=CUDAInitMode.CUDA_BEFORE,
# Should raise error since rank 1 is given `device_id=0` when
# the model is on cuda:1
fsdp_kwargs={"device_id": 0},
)
@skip_if_lt_x_gpu(2)
def test_multi_device_not_supported(self):
"""Tests that wrapping a multi-device module (i.e. with submodules on
both GPU and CPU) with FSDP raises an error."""
class MultiDeviceModule(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Linear(1, 1).cuda()
self.b = nn.Linear(1, 1)
with self.assertRaisesRegex(
RuntimeError, "FSDP only supports single device modules"
):
FSDP(MultiDeviceModule())
@skip_if_lt_x_gpu(2)
def test_no_params(self):
"""
Test that device_id and cpu init work if module has no params
(they are effective noops, but ensure FSDP does not assume module
has parameters during init)
"""
# Test CPU
no_params = nn.ReLU()
module = FSDP(no_params)
# Test CUDA
no_params = nn.ReLU().cuda()
module = FSDP(no_params)
# Test CPU + device_id
no_params = nn.ReLU()
module = FSDP(no_params, device_id=torch.cuda.current_device())
# For modules with no params, wrong device_id will raise error about
# inconsistency between compute_device and device_id, since compute_device
# is computed as torch.cuda.current_device when there are no params.
no_params = nn.ReLU().cuda()
context = (
self.assertRaisesRegex(
AssertionError,
f"Inconsistent.*cuda:{self.rank} vs cuda:0"
)
) if self.rank != 0 else suppress()
with context:
module = FSDP(no_params, device_id=0)
@skip_if_lt_x_gpu(2)
def test_fsdp_cpu_init_stays_on_cpu(self):
"""Tests that passing a CPU module to FSDP preserves that the wrapped
module is on CPU after FSDP initialization, albeit after loging a
warning, and that FSDP moves CPU input to GPU before the forward."""
torch.cuda.set_device(self.rank)
regex = "Module is put on CPU"
context = self.assertWarnsRegex(
expected_warning=UserWarning, expected_regex=regex
)
with context:
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_NEVER,
)
fsdp_model = FSDP(nested_wrapped_module, self.process_group)
devices = {p.device for p in fsdp_model.parameters()}
self.assertEqual(1, len(devices))
self.assertEqual(torch.device("cpu"), devices.pop())
fsdp_model = fsdp_model.cuda()
# Ensure fwd + backward can be performed after moving to CUDA.
# CPU input also tests that input is correctly moved to appropriate
# CUDA device.
inp = fsdp_model.module.get_input(device=torch.device("cpu"))
fsdp_model(*inp).sum().backward()
@skip_if_lt_x_gpu(2)
def test_cpu_init_with_sync_module_states(self):
"""Tests that passing ``sync_module_states=True`` raises an error for
a CPU module since the synchronization requires GPU communication,
while additionally passing ``device_id`` does not raise an error."""
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_NEVER,
)
with self.assertRaisesRegex(
ValueError,
"Module has CPU parameters, but sync_module_states=True is specified."
):
FSDP(nested_wrapped_module, self.process_group, sync_module_states=True)
# Specifying device_id with sync_module_states=True works.
FSDP(
nested_wrapped_module,
self.process_group,
device_id=torch.cuda.current_device(),
sync_module_states=True,
)
@skip_if_lt_x_gpu(2)
def test_fsdp_same_model_across_ranks(self):
"""
FSDP broadcasts model from rank 0 to ensure it starts off with the same
values.
"""
class MyModel(nn.Module):
def __init__(self, rank):
super().__init__()
# Seed via rank to make model different across ranks
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
self.lin = nn.Linear(10, 10, bias=False)
self.register_buffer("buffer", torch.ones(1) * rank)
m = MyModel(self.rank).cuda()
_assert_module_states(m, process_group=self.process_group, assert_fn=self.assertNotEqual)
# Passing sync_module_states into FSDP makes model the same during init.
fsdp = FSDP(m, sync_module_states=True)
with fsdp.summon_full_params(fsdp):
_assert_module_states(fsdp, process_group=self.process_group, assert_fn=self.assertEqual)
# sync_module_states also works with CPU module with device_id passed in
m = MyModel(self.rank)
_assert_module_states(m, process_group=self.process_group, assert_fn=self.assertNotEqual)
# Passing sync_module_states into FSDP makes model the same during init.
fsdp = FSDP(m, device_id=torch.cuda.current_device(), sync_module_states=True)
with fsdp.summon_full_params(fsdp):
_assert_module_states(fsdp, process_group=self.process_group, assert_fn=self.assertEqual)
instantiate_parametrized_tests(TestFSDPMisc)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_misc.py
|
# Owner(s): ["oncall: distributed"]
import functools
import itertools
import sys
from typing import Any, Dict, List, Optional
from unittest import mock
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed.fsdp import CPUOffload, MixedPrecision
from torch.distributed.fsdp.fully_sharded_data_parallel import (
BackwardPrefetch,
ShardingStrategy,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
AlwaysWrapNestedWrappedModule,
CUDAInitMode,
DummyDDP,
FSDPInitMode,
FSDPTest,
MixtureOfExperts,
NestedWrappedModule,
NestedWrappedModuleWithDelay,
TransformerWithSharedParams,
subtest_name,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
params = "cpu_offload,sharding_strategy"
cpu_offload_config = [CPUOffload(offload_params=True), CPUOffload(offload_params=False)]
sharding_strategy_config = [None, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD]
configs = list(itertools.product(cpu_offload_config, sharding_strategy_config))
test_name_mapping = {
str(CPUOffload(offload_params=True)): "offload_true",
str(CPUOffload(offload_params=False)): "offload_false",
str(ShardingStrategy.SHARD_GRAD_OP): "shard_grad_op",
str(ShardingStrategy.NO_SHARD): "no_shard",
}
subtest_name = functools.partial(subtest_name, test_name_mapping)
class TestParityWithDDP(FSDPTest):
"""
Compare losses and parameter values after several updates when using
PyTorch DDP vs. FullyShardedDataParallel.
"""
def _get_cuda_init_modes(self, cpu_offload: CPUOffload) -> List[CUDAInitMode]:
modes = [
CUDAInitMode.CUDA_AFTER,
CUDAInitMode.CUDA_BEFORE
]
# Note that CUDAInitMode.CUDA_NEVER works currently only with CPU
# offload as we explicitly bring the param back to CUDA device. In
# general, it will not work since we try to all_gather p.data which is
# on CPU but NCCL only supports GPU.
if cpu_offload.offload_params:
modes.append(CUDAInitMode.CUDA_NEVER)
return modes
def _get_subtest_config(self, cpu_offload: CPUOffload) -> Dict[str, List[Any]]:
"""Returns a subtest configuration that subtests CUDA initialization
modes and prefetching settings together."""
return {
"cuda_init_mode": self._get_cuda_init_modes(cpu_offload),
"forward_prefetch": [False, True],
"backward_prefetch": [
None,
BackwardPrefetch.BACKWARD_PRE,
BackwardPrefetch.BACKWARD_POST,
]
}
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
def test_nested_wrapped_model(
self,
cpu_offload: CPUOffload,
sharding_strategy: Optional[ShardingStrategy],
):
self.run_subtests(
self._get_subtest_config(cpu_offload),
self._test_fsdp_parity,
NestedWrappedModule,
FSDPInitMode.RECURSIVE,
cpu_offload=cpu_offload,
sharding_strategy=sharding_strategy,
)
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
def test_nested_wrapped_model_single_iteration_mixed_precision(
self,
cpu_offload: CPUOffload,
sharding_strategy: Optional[ShardingStrategy],
):
mixed_precision = MixedPrecision(
param_dtype=torch.float16,
buffer_dtype=torch.float16,
reduce_dtype=torch.float16,
)
self.run_subtests(
self._get_subtest_config(cpu_offload),
self._test_fsdp_parity,
NestedWrappedModule,
FSDPInitMode.RECURSIVE,
cpu_offload=cpu_offload,
sharding_strategy=sharding_strategy,
num_iters=1,
mixed_precision=mixed_precision,
)
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
# TODO (awgu): 2.0 fails tests
# @parametrize("norm_type", [2.0, None])
@parametrize("norm_type", [None])
def test_nested_always_wrap_model(
self,
cpu_offload: CPUOffload,
sharding_strategy: Optional[ShardingStrategy],
norm_type: Optional[float],
):
self.run_subtests(
self._get_subtest_config(cpu_offload),
self._test_fsdp_parity,
AlwaysWrapNestedWrappedModule,
FSDPInitMode.RECURSIVE,
cpu_offload=cpu_offload,
sharding_strategy=sharding_strategy,
norm_type=norm_type,
)
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
# TODO (awgu): 2.0 fails tests
# @parametrize("norm_type", [2.0, None])
@parametrize("norm_type", [None])
def test_transformer(
self,
cpu_offload: CPUOffload,
sharding_strategy: Optional[ShardingStrategy],
norm_type: Optional[float],
):
self.run_subtests(
self._get_subtest_config(cpu_offload),
self._test_fsdp_parity,
TransformerWithSharedParams,
FSDPInitMode.RECURSIVE,
cpu_offload=cpu_offload,
norm_type=norm_type,
sharding_strategy=sharding_strategy,
)
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
def test_delayed_optim_step(
self,
cpu_offload: CPUOffload,
sharding_strategy: Optional[ShardingStrategy],
):
"""Tests the FSDP forward, backward, and optimizer step runtime by
using a model with a long CUDA delay after the loss computation/before
the optimizer step to exercise the internal CUDA stream usage in that
the forward pass all-gathers do not start until after the optimizer
step completes."""
self.run_subtests(
self._get_subtest_config(cpu_offload),
self._test_fsdp_parity,
NestedWrappedModuleWithDelay,
FSDPInitMode.RECURSIVE,
cpu_offload=cpu_offload,
sharding_strategy=sharding_strategy,
init_kwargs={"delay_after_loss_ms": 250},
)
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
def test_delayed_reduce_scatter(
self,
cpu_offload: CPUOffload,
sharding_strategy: Optional[ShardingStrategy],
):
"""Tests the FSDP forward, backward, and optimizer step runtime by
using a model with a long CUDA delay before the gradient reduce-scatter
to exercise the internal CUDA stream usage in that the backward pass
waits for those reductions to finish."""
self.run_subtests(
self._get_subtest_config(cpu_offload),
self._test_fsdp_parity,
NestedWrappedModuleWithDelay,
FSDPInitMode.RECURSIVE,
cpu_offload=cpu_offload,
sharding_strategy=sharding_strategy,
init_kwargs={"delay_before_reduction_ms": 250},
)
def _dummy_ddp_fn(self, model):
# `MixtureOfExperts`` implements custom gradient reduction logic, so
# the reference behavior should follow that logic instead of DDP
return DummyDDP(model)
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
# TODO (awgu): 2.0 fails tests
# @parametrize("norm_type", [2.0, None])
@parametrize("norm_type", [None])
def test_mixture_of_experts(
self,
cpu_offload: CPUOffload,
sharding_strategy: Optional[ShardingStrategy],
norm_type: Optional[float],
):
self.run_subtests(
self._get_subtest_config(cpu_offload),
self._test_fsdp_parity,
MixtureOfExperts,
FSDPInitMode.RECURSIVE,
ref_init_fn=self._dummy_ddp_fn,
cpu_offload=cpu_offload,
sharding_strategy=sharding_strategy,
norm_type=norm_type,
)
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
def test_mixture_of_experts_with_delay_before_free(
self,
cpu_offload: CPUOffload,
sharding_strategy: Optional[ShardingStrategy],
):
self.run_subtests(
self._get_subtest_config(cpu_offload),
self._test_fsdp_parity,
MixtureOfExperts,
FSDPInitMode.RECURSIVE,
ref_init_fn=self._dummy_ddp_fn,
cpu_offload=cpu_offload,
sharding_strategy=sharding_strategy,
init_kwargs={"delay_before_free_ms": 250}
)
class TestParamInit(FSDPTest):
@skip_if_lt_x_gpu(2)
@parametrize("mixed_precision", [True, False])
def test_param_change_after_init(self, mixed_precision):
"""
Tests that changing FSDP model parameter values in-place after FSDP
initialization persist.
"""
# Establish reference behavior
fsdp_kwargs = {}
if mixed_precision:
fsdp_kwargs["mixed_precision"] = MixedPrecision()
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_AFTER,
fsdp_kwargs,
deterministic=True,
)
input = fsdp_model.module.get_input(torch.device("cuda"))
ref_output = fsdp_model(*input)
# Initialize the same model but change its first parameter value
# in-place after FSDP initialization
new_fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_AFTER,
fsdp_kwargs,
deterministic=True,
)
first_param = next(new_fsdp_model.parameters())
nn.init.normal_(first_param.data)
new_output = new_fsdp_model(*input)
self.assertNotEqual(
ref_output,
new_output,
msg="new_output did not reflect change to param after init",
)
class TestHooks(FSDPTest):
@skip_if_lt_x_gpu(2)
@parametrize("cuda_first", [False, True])
def test_pre_backward_hook_registration(self, cuda_first: bool):
"""Tests that FSDP pre-backward hooks are registered on forward pass
outputs."""
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_BEFORE if cuda_first else CUDAInitMode.CUDA_AFTER,
)
self._test_pre_backward_hook_registration(fsdp_model)
@skip_if_lt_x_gpu(2)
def test_pre_backward_hook_registration_after_state_dict(self):
"""Tests that FSDP pre-backward hooks are registered on forward pass
outputs after saving and loading the model from a checkpoint."""
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_AFTER,
)
self._train_for_several_steps(fsdp_model, num_steps=2, autocast=False)
state_dict = fsdp_model.state_dict()
fsdp_model.load_state_dict(state_dict)
self._test_pre_backward_hook_registration(fsdp_model)
def _test_pre_backward_hook_registration(self, model):
optim = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
optim.zero_grad()
# Inputs always cuda, as computation happes on CUDA device only
input = model.module.get_input(torch.device("cuda"))
output = model(*input)
# this is pre-bwd hook
self.assertEqual(len(output._backward_hooks), 1)
loss = model.module.get_loss(input, output).cuda()
loss.backward()
# It doesn't get removed
self.assertEqual(len(output._backward_hooks), 1)
optim.step()
self.assertEqual(len(output._backward_hooks), 1)
@skip_if_lt_x_gpu(2)
@parametrize("cuda_first", [False, True])
@parametrize("mixed_precision", [True, False])
def test_register_functions_called(self, cuda_first: bool, mixed_precision: bool):
"""Tests that ``_register_{pre|post}_backward_hooks()`` are called
during the FSDP forward."""
fsdp_kwargs = {}
if mixed_precision:
fsdp_kwargs["mixed_precision"] = MixedPrecision()
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_BEFORE if cuda_first else CUDAInitMode.CUDA_AFTER,
fsdp_kwargs,
)
input = fsdp_model.module.get_input(torch.device("cuda"))
fsdp_model._register_pre_backward_hooks = mock.MagicMock(return_value=None)
fsdp_model._register_post_backward_hooks = mock.MagicMock(return_value=None)
self.assertFalse(fsdp_model._register_post_backward_hooks.called)
self.assertFalse(fsdp_model._register_pre_backward_hooks.called)
fsdp_model(*input)
self.assertTrue(fsdp_model._register_post_backward_hooks.called)
self.assertTrue(fsdp_model._register_pre_backward_hooks.called)
class TestNoGrad(FSDPTest):
@skip_if_lt_x_gpu(2)
@parametrize("mixed_precision", [True, False])
def test_transformer_no_grad(self, mixed_precision):
"""Tests that for an FSDP-wrapped transformer model with shared
parameters, after training for one iteration, running a forward pass in
``eval()`` mode gives the same output as running a forward pass in
``torch.no_grad()``."""
fsdp_kwargs = {}
if mixed_precision:
fsdp_kwargs["mixed_precision"] = MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
)
else:
fsdp_kwargs["mixed_precision"] = None
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_AFTER,
fsdp_kwargs,
)
self._train_for_several_steps(
fsdp_model,
num_steps=1,
autocast=False,
mixed_precision=fsdp_kwargs["mixed_precision"]
)
input = fsdp_model.module.get_input(torch.device("cuda"))
# Run a forward in eval mode
fsdp_model.eval()
ref_output = fsdp_model(*input)
# Run a forward in `no_grad()` and compare
with torch.no_grad():
no_grad_output = fsdp_model(*input)
self.assertEqual(ref_output, no_grad_output)
instantiate_parametrized_tests(TestHooks)
instantiate_parametrized_tests(TestParityWithDDP)
instantiate_parametrized_tests(TestNoGrad)
instantiate_parametrized_tests(TestParamInit)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_core.py
|
# Owner(s): ["oncall: distributed"]
from copy import deepcopy
from functools import partial
import torch
import torch.nn as nn
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
apply_activation_checkpointing_wrapper,
CheckpointWrapper,
CheckpointImpl
)
from torch.utils.checkpoint import checkpoint
from torch.testing._internal.common_utils import (
run_tests,
TestCase,
)
import unittest
class CheckpointWrapperTest(TestCase):
def setUp(self):
super().setUp()
def test_load_activation_checkpointed_module(self):
lin = nn.Linear(10, 10, bias=False)
lin = checkpoint_wrapper(
lin,
checkpoint_fn=checkpoint,
# checkpoint kwargs
use_reentrant=True,
preserve_rng_state=False,
)
state_dict = deepcopy(lin.state_dict())
# Load into non-checkpoint wrapped linear module
lin_new = nn.Linear(10, 10, bias=False)
lin_new.load_state_dict(state_dict)
for p1, p2 in zip(lin.parameters(), lin_new.parameters()):
self.assertEqual(p1, p2)
self.assertTrue(torch.allclose(p1, p2))
# Load non-checkpoint wrapped module into checkpoint wrapped one
# Make params different
for p in lin_new.parameters():
with torch.no_grad():
p.add_(0.5)
state_dict = deepcopy(lin_new.state_dict())
# Verify checkpoint wrapped linear can load unwrapped linear
lin.load_state_dict(state_dict)
for p1, p2 in zip(lin.parameters(), lin_new.parameters()):
self.assertEqual(p1, p2)
@unittest.skipIf(not torch.cuda.is_available(), "Test requires CUDA")
def test_checkpoint_wrapper_parity(self):
"""
Tests that using checkpoint_wrapper or the functional
torch.utils.checkpoint (with the same reentrant config)
results in the same maximum memory usage, i.e. they are
equivalent memory usage wise.
"""
class Model(nn.Module):
def __init__(
self,
n: int,
use_cp: bool,
use_wrapper: bool = False,
use_reentrant: bool = True
):
super().__init__()
self.layers = nn.ModuleList()
self.n = n
self.use_cp = use_cp
self.use_wrapper = use_wrapper
self.use_reentrant = use_reentrant
wrp = partial(
checkpoint_wrapper,
checkpoint_impl=CheckpointImpl.REENTRANT if use_reentrant else CheckpointImpl.NO_REENTRANT
)
for i in range(self.n):
l = nn.Sequential(nn.Linear(256, 256), nn.Linear(256, 256), nn.Linear(256, 256))
use_checkpoint_wrapper = self.use_wrapper
if use_checkpoint_wrapper:
l = wrp(l)
self.layers.append(l)
def forward(self, x):
for i in range(self.n):
if (
self.use_wrapper or
not self.use_cp
):
x = self.layers[i](x)
else:
x = checkpoint(self.layers[i], x, use_reentrant=self.use_reentrant)
return x
def test(use_checkpointing, use_wrapper, use_reentrant):
a = Model(8, use_checkpointing, use_wrapper=use_wrapper, use_reentrant=use_reentrant).cuda()
x = torch.randn(10000, 256, requires_grad=True).cuda()
torch.cuda.reset_peak_memory_stats()
loss = a(x).sum()
loss.backward()
return torch.cuda.max_memory_allocated()
functional_no_reentrant = test(use_checkpointing=True, use_wrapper=False, use_reentrant=False)
wrapper_no_reentrant = test(use_checkpointing=False, use_wrapper=True, use_reentrant=False)
self.assertEqual(functional_no_reentrant, wrapper_no_reentrant)
functional_reentrant = test(use_checkpointing=True, use_wrapper=False, use_reentrant=True)
wrapper_reentrant = test(use_checkpointing=False, use_wrapper=True, use_reentrant=True)
self.assertEqual(functional_reentrant, wrapper_reentrant)
def test_forward_missing_attributes(self):
lin = nn.Linear(1, 1)
m = nn.Sequential(lin, lin)
wrapped = CheckpointWrapper(m)
# Test indexing is forwarded
self.assertEqual(wrapped[0], lin)
# Test missing attributes are forwarded.
m._foo = 'bar'
self.assertEqual(wrapped._foo, 'bar')
def test_apply_activation_checkpointing_wrapper(self):
"""
Ensures that `apply_activation_checkpointing_wrapper` can be used
to swap modules for their checkpoint-wrapped counterparts given
a model.
"""
class LinearWithBatchNorm(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(10, 10)
self.bn = nn.BatchNorm1d(10)
self.nested_linear = nn.Sequential(nn.Linear(10, 10))
def forward(self, x):
return self.bn(self.nested_linear(self.lin(x)))
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.seq = nn.Sequential(
LinearWithBatchNorm(), LinearWithBatchNorm(), LinearWithBatchNorm()
)
def forward(self, x):
return self.seq(x)
model = MyModel()
n_linear = sum(1 if isinstance(x, nn.Linear) else 0 for x in model.modules())
def check_fn(l):
return isinstance(l, nn.Linear)
apply_activation_checkpointing_wrapper(
model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn
)
n_linear_wrapped = sum(1 if isinstance(x, nn.Linear) else 0 for x in model.modules())
n_checkpointed = sum(1 if isinstance(x, CheckpointWrapper) else 0 for x in model.modules())
self.assertEqual(n_checkpointed, n_linear_wrapped)
self.assertEqual(n_linear, n_linear_wrapped)
for j in range(3):
self.assertTrue(isinstance(model.seq[j].lin, CheckpointWrapper))
self.assertTrue(isinstance(model.seq[j].nested_linear[0], CheckpointWrapper))
inp = torch.randn(4, 10, requires_grad=True)
for i in range(6):
loss = model(inp).sum()
self.assertTrue(loss.requires_grad)
loss.backward()
# ensure checkpointed part of model has gradients
for j in range(3):
weight_lin = model.seq[j].lin._checkpoint_wrapped_module.weight
bias_lin = model.seq[j].lin._checkpoint_wrapped_module.bias
weight_nested_lin = model.seq[j].nested_linear[0]._checkpoint_wrapped_module.weight
bias_nested_lin = model.seq[j].nested_linear[0]._checkpoint_wrapped_module.bias
for param in [weight_lin, bias_lin, weight_nested_lin, bias_nested_lin]:
self.assertTrue(param.requires_grad)
self.assertFalse(param.grad is None)
def test_fqn(self):
lin = nn.Linear(10, 10, bias=False)
lin = checkpoint_wrapper(lin)
state_dict = lin.state_dict()
for fqn, _ in lin.named_parameters():
self.assertTrue(fqn in state_dict, msg=f"{fqn} not in state_dict.")
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_checkpoint_wrapper.py
|
# Owner(s): ["oncall: distributed"]
import sys
import tempfile
import torch
from torch import distributed as dist
from torch.distributed._shard.checkpoint import (
FileSystemReader,
FileSystemWriter,
save_state_dict,
load_state_dict,
)
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
StateDictType,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel
from torch.distributed.fsdp.wrap import enable_wrap, wrap
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
FSDPTest,
SkipModel,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
_DISTRIBUTED_STATE_DICT_IMPLS = {
StateDictType.LOCAL_STATE_DICT,
StateDictType.SHARDED_STATE_DICT,
}
class TestDistributedCheckpoint(FSDPTest):
@property
def world_size(self):
return 2
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _DISTRIBUTED_STATE_DICT_IMPLS)
def test_distributed_checkpoint(self, state_dict_type) -> None:
with enable_wrap(wrapper_cls=FSDP):
torch.manual_seed(100)
model = wrap(SkipModel(double_nest=True))
torch.manual_seed(200)
new_model = wrap(SkipModel(double_nest=True))
with FullyShardedDataParallel.summon_full_params(
model
), FullyShardedDataParallel.summon_full_params(new_model):
params = list(model.parameters())
new_params = list(new_model.parameters())
self.assertNotEqual(params, new_params)
with tempfile.TemporaryDirectory() as path:
paths = [path]
dist.broadcast_object_list(paths)
path = paths[0]
writer = FileSystemWriter(path)
reader = FileSystemReader(path)
with FSDP.state_dict_type(
model, state_dict_type
), FSDP.state_dict_type(new_model, state_dict_type):
state_dict = model.state_dict()
save_state_dict(state_dict, writer)
with FSDP.state_dict_type(
model, state_dict_type
), FSDP.state_dict_type(new_model, state_dict_type):
state_dict = new_model.state_dict()
load_state_dict(state_dict, reader)
new_model.load_state_dict(state_dict)
with FullyShardedDataParallel.summon_full_params(
model
), FullyShardedDataParallel.summon_full_params(new_model):
params = list(model.parameters())
new_params = list(new_model.parameters())
self.assertEqual(params, new_params)
# TODO: add resharding test case.
instantiate_parametrized_tests(TestDistributedCheckpoint)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_distributed_checkpoint.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.nn import Linear, Module, Sequential
from torch.optim import SGD
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, run_tests
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class InnerModel(Module):
def __init__(self):
super().__init__()
self.layers = Sequential(FSDP(Linear(5, 5)))
def forward(self, x):
return self.layers(x)
class TestMultipleWrapping(FSDPTest):
@skip_if_lt_x_gpu(2)
def test_multiple_wrapping(self):
"""
This test simulates wrapping the module after training to run inference.
This is required in cases where later in a session, the model is wrapped again in FSDP but
contains nested FSDP wrappers within the module.
"""
inner_model = InnerModel()
model = FSDP(inner_model).cuda()
optim = SGD(model.parameters(), lr=0.1)
for i in range(3):
input = torch.rand((1, 5), dtype=torch.float).cuda()
input.requires_grad = True
output = model(input)
output.sum().backward()
optim.step()
optim.zero_grad()
input = torch.rand((1, 5), dtype=torch.float).cuda()
output = model(input)
# second time to rewrap the inner model
rewrapped_model = FSDP(inner_model).cuda()
rewrapped_output = rewrapped_model(input)
self.assertEqual(output, rewrapped_output)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_multiple_wrapping.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.