python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import torch.nn as nn
from .activations import *
def make_divisible(v, divisor=8, min_value=1):
"""
forked from slim:
https://github.com/tensorflow/models/blob/\
0344c5503ee55e24f0de7f37336a6e08f10976fd/\
research/slim/nets/mobilenet/mobilenet.py#L62-L69
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def sub_filter_start_end(kernel_size, sub_kernel_size):
center = kernel_size // 2
dev = sub_kernel_size // 2
start, end = center - dev, center + dev + 1
assert end - start == sub_kernel_size
return start, end
def get_net_device(net):
return net.parameters().__next__().device
def int2list(val, repeat_time=1):
if isinstance(val, list):
return val
elif isinstance(val, tuple):
return list(val)
else:
return [val for _ in range(repeat_time)]
def get_same_padding(kernel_size):
if isinstance(kernel_size, tuple):
assert len(kernel_size) == 2, 'invalid kernel size: %s' % kernel_size
p1 = get_same_padding(kernel_size[0])
p2 = get_same_padding(kernel_size[1])
return p1, p2
assert isinstance(kernel_size, int), 'kernel size should be either `int` or `tuple`'
assert kernel_size % 2 > 0, 'kernel size should be odd number'
return kernel_size // 2
def copy_bn(target_bn, src_bn):
feature_dim = target_bn.num_features
target_bn.weight.data.copy_(src_bn.weight.data[:feature_dim])
target_bn.bias.data.copy_(src_bn.bias.data[:feature_dim])
target_bn.running_mean.data.copy_(src_bn.running_mean.data[:feature_dim])
target_bn.running_var.data.copy_(src_bn.running_var.data[:feature_dim])
def build_activation(act_func, inplace=True):
if act_func == 'relu':
return nn.ReLU(inplace=inplace)
elif act_func == 'relu6':
return nn.ReLU6(inplace=inplace)
elif act_func == 'tanh':
return nn.Tanh()
elif act_func == 'sigmoid':
return nn.Sigmoid()
elif act_func == 'h_swish':
return Hswish(inplace=inplace)
elif act_func == 'h_sigmoid':
return Hsigmoid(inplace=inplace)
elif act_func == 'swish':
return MemoryEfficientSwish()
elif act_func is None:
return None
else:
raise ValueError('do not support: %s' % act_func)
def drop_connect(inputs, p, training):
"""Drop connect.
Args:
input (tensor: BCWH): Input of this structure.
p (float: 0.0~1.0): Probability of drop connection.
training (bool): The running mode.
Returns:
output: Output after drop connection.
"""
assert 0 <= p <= 1, 'p must be in range of [0,1]'
if not training:
return inputs
batch_size = inputs.shape[0]
keep_prob = 1.0 - p
# generate binary_tensor mask according to probability (p for 0, 1-p for 1)
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
|
AttentiveNAS-main
|
models/modules/nn_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from torch.autograd.function import Function
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch
from torch.nn.modules._functions import SyncBatchNorm as sync_batch_norm
import torch.distributed as dist
from .nn_utils import get_same_padding, make_divisible, sub_filter_start_end
from .static_layers import SELayer
class DynamicSeparableConv2d(nn.Module):
KERNEL_TRANSFORM_MODE = None # None or 1
def __init__(self, max_in_channels, kernel_size_list, stride=1, dilation=1, channels_per_group=1):
super(DynamicSeparableConv2d, self).__init__()
self.max_in_channels = max_in_channels
self.channels_per_group = channels_per_group
assert self.max_in_channels % self.channels_per_group == 0
self.kernel_size_list = kernel_size_list
self.stride = stride
self.dilation = dilation
self.conv = nn.Conv2d(
self.max_in_channels, self.max_in_channels, max(self.kernel_size_list), self.stride,
groups=self.max_in_channels // self.channels_per_group, bias=False,
)
self._ks_set = list(set(self.kernel_size_list))
self._ks_set.sort() # e.g., [3, 5, 7]
if self.KERNEL_TRANSFORM_MODE is not None:
# register scaling parameters
# 7to5_matrix, 5to3_matrix
scale_params = {}
for i in range(len(self._ks_set) - 1):
ks_small = self._ks_set[i]
ks_larger = self._ks_set[i + 1]
param_name = '%dto%d' % (ks_larger, ks_small)
scale_params['%s_matrix' % param_name] = Parameter(torch.eye(ks_small ** 2))
for name, param in scale_params.items():
self.register_parameter(name, param)
self.active_kernel_size = max(self.kernel_size_list)
def get_active_filter(self, in_channel, kernel_size):
out_channel = in_channel
max_kernel_size = max(self.kernel_size_list)
start, end = sub_filter_start_end(max_kernel_size, kernel_size)
filters = self.conv.weight[:out_channel, :in_channel, start:end, start:end]
if self.KERNEL_TRANSFORM_MODE is not None and kernel_size < max_kernel_size:
start_filter = self.conv.weight[:out_channel, :in_channel, :, :] # start with max kernel
for i in range(len(self._ks_set) - 1, 0, -1):
src_ks = self._ks_set[i]
if src_ks <= kernel_size:
break
target_ks = self._ks_set[i - 1]
start, end = sub_filter_start_end(src_ks, target_ks)
_input_filter = start_filter[:, :, start:end, start:end]
_input_filter = _input_filter.contiguous()
_input_filter = _input_filter.view(_input_filter.size(0), _input_filter.size(1), -1)
_input_filter = _input_filter.view(-1, _input_filter.size(2))
_input_filter = F.linear(
_input_filter, self.__getattr__('%dto%d_matrix' % (src_ks, target_ks)),
)
_input_filter = _input_filter.view(filters.size(0), filters.size(1), target_ks ** 2)
_input_filter = _input_filter.view(filters.size(0), filters.size(1), target_ks, target_ks)
start_filter = _input_filter
filters = start_filter
return filters
def forward(self, x, kernel_size=None):
if kernel_size is None:
kernel_size = self.active_kernel_size
in_channel = x.size(1)
assert in_channel % self.channels_per_group == 0
filters = self.get_active_filter(in_channel, kernel_size).contiguous()
padding = get_same_padding(kernel_size)
y = F.conv2d(
x, filters, None, self.stride, padding, self.dilation, in_channel // self.channels_per_group
)
return y
class DynamicPointConv2d(nn.Module):
def __init__(self, max_in_channels, max_out_channels, kernel_size=1, stride=1, dilation=1):
super(DynamicPointConv2d, self).__init__()
self.max_in_channels = max_in_channels
self.max_out_channels = max_out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.conv = nn.Conv2d(
self.max_in_channels, self.max_out_channels, self.kernel_size, stride=self.stride, bias=False,
)
self.active_out_channel = self.max_out_channels
def forward(self, x, out_channel=None):
if out_channel is None:
out_channel = self.active_out_channel
in_channel = x.size(1)
filters = self.conv.weight[:out_channel, :in_channel, :, :].contiguous()
padding = get_same_padding(self.kernel_size)
y = F.conv2d(x, filters, None, self.stride, padding, self.dilation, 1)
return y
class DynamicLinear(nn.Module):
def __init__(self, max_in_features, max_out_features, bias=True):
super(DynamicLinear, self).__init__()
self.max_in_features = max_in_features
self.max_out_features = max_out_features
self.bias = bias
self.linear = nn.Linear(self.max_in_features, self.max_out_features, self.bias)
self.active_out_features = self.max_out_features
def forward(self, x, out_features=None):
if out_features is None:
out_features = self.active_out_features
in_features = x.size(1)
weight = self.linear.weight[:out_features, :in_features].contiguous()
bias = self.linear.bias[:out_features] if self.bias else None
y = F.linear(x, weight, bias)
return y
class AllReduce(Function):
@staticmethod
def forward(ctx, input):
input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())]
# Use allgather instead of allreduce since I don't trust in-place operations ..
dist.all_gather(input_list, input, async_op=False)
inputs = torch.stack(input_list, dim=0)
return torch.sum(inputs, dim=0)
@staticmethod
def backward(ctx, grad_output):
dist.all_reduce(grad_output, async_op=False)
return grad_output
class DynamicBatchNorm2d(nn.Module):
'''
1. doesn't acculate bn statistics, (momentum=0.)
2. calculate BN statistics of all subnets after training
3. bn weights are shared
https://arxiv.org/abs/1903.05134
https://detectron2.readthedocs.io/_modules/detectron2/layers/batch_norm.html
'''
#SET_RUNNING_STATISTICS = False
def __init__(self, max_feature_dim):
super(DynamicBatchNorm2d, self).__init__()
self.max_feature_dim = max_feature_dim
self.bn = nn.BatchNorm2d(self.max_feature_dim)
#self.exponential_average_factor = 0 #doesn't acculate bn stats
self.need_sync = False
# reserved to tracking the performance of the largest and smallest network
self.bn_tracking = nn.ModuleList(
[
nn.BatchNorm2d(self.max_feature_dim, affine=False),
nn.BatchNorm2d(self.max_feature_dim, affine=False)
]
)
def forward(self, x):
feature_dim = x.size(1)
if not self.training:
raise ValueError('DynamicBN only supports training')
bn = self.bn
# need_sync
if not self.need_sync:
return F.batch_norm(
x, bn.running_mean[:feature_dim], bn.running_var[:feature_dim], bn.weight[:feature_dim],
bn.bias[:feature_dim], bn.training or not bn.track_running_stats,
bn.momentum, bn.eps,
)
else:
assert dist.get_world_size() > 1, 'SyncBatchNorm requires >1 world size'
B, C = x.shape[0], x.shape[1]
mean = torch.mean(x, dim=[0, 2, 3])
meansqr = torch.mean(x * x, dim=[0, 2, 3])
assert B > 0, 'does not support zero batch size'
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
invstd = torch.rsqrt(var + bn.eps)
scale = bn.weight[:feature_dim] * invstd
bias = bn.bias[:feature_dim] - mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
#if bn.num_features == feature_dim or DynamicBatchNorm2d.SET_RUNNING_STATISTICS:
# return bn(x)
#else:
# exponential_average_factor = 0.0
# if bn.training and bn.track_running_stats:
# # TODO: if statement only here to tell the jit to skip emitting this when it is None
# if bn.num_batches_tracked is not None:
# bn.num_batches_tracked += 1
# if bn.momentum is None: # use cumulative moving average
# exponential_average_factor = 1.0 / float(bn.num_batches_tracked)
# else: # use exponential moving average
# exponential_average_factor = bn.momentum
# return F.batch_norm(
# x, bn.running_mean[:feature_dim], bn.running_var[:feature_dim], bn.weight[:feature_dim],
# bn.bias[:feature_dim], bn.training or not bn.track_running_stats,
# exponential_average_factor, bn.eps,
# )
class DynamicSE(SELayer):
def __init__(self, max_channel):
super(DynamicSE, self).__init__(max_channel)
def forward(self, x):
in_channel = x.size(1)
num_mid = make_divisible(in_channel // self.reduction, divisor=8)
y = x.mean(3, keepdim=True).mean(2, keepdim=True)
# reduce
reduce_conv = self.fc.reduce
reduce_filter = reduce_conv.weight[:num_mid, :in_channel, :, :].contiguous()
reduce_bias = reduce_conv.bias[:num_mid] if reduce_conv.bias is not None else None
y = F.conv2d(y, reduce_filter, reduce_bias, 1, 0, 1, 1)
# relu
y = self.fc.relu(y)
# expand
expand_conv = self.fc.expand
expand_filter = expand_conv.weight[:in_channel, :num_mid, :, :].contiguous()
expand_bias = expand_conv.bias[:in_channel] if expand_conv.bias is not None else None
y = F.conv2d(y, expand_filter, expand_bias, 1, 0, 1, 1)
# hard sigmoid
y = self.fc.h_sigmoid(y)
return x * y
|
AttentiveNAS-main
|
models/modules/dynamic_ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR, WarmupLinearDecayLR, ConstantLR
def build_optimizer(args, model):
"""
Build an optimizer from config.
"""
no_wd_params, wd_params = [], []
for name, param in model.named_parameters():
if param.requires_grad:
if ".bn" in name or ".bias" in name:
no_wd_params.append(param)
else:
wd_params.append(param)
no_wd_params = nn.ParameterList(no_wd_params)
wd_params = nn.ParameterList(wd_params)
weight_decay_weight = args.weight_decay_weight
weight_decay_bn_bias = args.weight_decay_bn_bias
base_lr = args.lr_scheduler.base_lr
params_group = [
{"params": wd_params, "weight_decay": float(weight_decay_weight), 'group_name':'weight'},
{"params": no_wd_params, "weight_decay": float(weight_decay_bn_bias), 'group_name':'bn_bias'},
]
if args.optimizer.method == 'sgd':
momentum = args.optimizer.momentum
nesterov = args.optimizer.nesterov
optimizer = torch.optim.SGD(
params_group,
lr = base_lr,
momentum = momentum,
nesterov = nesterov,
)
else:
raise ValueError(f'no optimizer {args.optimizer.method}')
return optimizer
def build_lr_scheduler(args, optimizer):
if not hasattr(args, 'max_iters'):
#important house-keeping stuff
args.max_iters = args.n_iters_per_epoch * args.epochs
if getattr(args, 'warmup_iters', None) is None:
args.warmup_iters = args.n_iters_per_epoch * args.warmup_epochs
warmup_iters = args.warmup_iters
warmup_lr = float(getattr(args.lr_scheduler, 'warmup_lr', 0.001))
warmup_method = getattr(args.lr_scheduler, 'warmup_method', 'linear')
clamp_lr_percent = float(getattr(args.lr_scheduler, 'clamp_lr_percent', 0.))
clamp_lr = args.lr_scheduler.base_lr * clamp_lr_percent
if args.lr_scheduler.method == 'warmup_cosine_lr':
return WarmupCosineLR(
optimizer,
args.max_iters,
warmup_factor = warmup_lr,
warmup_iters = warmup_iters,
warmup_method = warmup_method,
clamp_lr = clamp_lr,
)
elif args.lr_scheduler.method == 'warmup_exp_decay_lr':
decay_cycle_iters = int(args.lr_scheduler.lr_decay_cycle * args.n_iters_per_epoch)
total_decay_iters = args.n_iters_per_epoch * (args.epochs - args.warmup_epochs)
milestones = [ warmup_iters + (idx + 1) * decay_cycle_iters \
for idx in range(total_decay_iters // decay_cycle_iters)]
return WarmupMultiStepLR(
optimizer,
milestones,
gamma=args.lr_scheduler.lr_decay_rate_per_cycle,
warmup_factor = warmup_lr,
warmup_iters = warmup_iters,
warmup_method = warmup_method,
clamp_lr = clamp_lr,
)
elif args.lr_scheduler.method == 'warmup_linear_lr':
decay_cycle_iters = args.n_iters_per_epoch
milestones = [ warmup_iters + (idx + 1) * decay_cycle_iters \
for idx in range(args.epochs - args.warmup_epochs)]
return WarmupLinearDecayLR(
optimizer,
milestones,
warmup_factor = warmup_lr,
warmup_iters = warmup_iters,
warmup_method = warmup_method,
clamp_lr = clamp_lr,
)
elif args.lr_scheduler.method == 'constant_lr':
return ConstantLR(
optimizer
)
else:
raise NotImplementedError
|
AttentiveNAS-main
|
solver/build.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import math
from bisect import bisect_right
from typing import List
class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self,
optimizer,
max_iters,
warmup_factor = 0.001,
warmup_iters = 1000,
warmup_method = 'linear',
last_epoch = -1,
clamp_lr = 0.
):
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.clamp_lr = clamp_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [ max( self.clamp_lr if self.last_epoch > self.warmup_iters else 0.,
base_lr * warmup_factor * 0.5 * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters))
) for base_lr in self.base_lrs ]
def _compute_values(self):
return self.get_lr()
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma = 0.1,
warmup_factor = 0.001,
warmup_iters = 1000,
warmup_method = "linear",
last_epoch= -1,
clamp_lr = 0.
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}", milestones
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.clamp_lr = clamp_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [ max( self.clamp_lr if self.last_epoch > self.warmup_iters else 0.,
base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch)
) for base_lr in self.base_lrs ]
def _compute_values(self):
# The new interface
return self.get_lr()
class WarmupLinearDecayLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
warmup_factor = 0.001,
warmup_iters = 1000,
warmup_method = "linear",
last_epoch= -1,
clamp_lr = 0.
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}", milestones
)
self.milestones = milestones
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.clamp_lr = clamp_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [ max( self.clamp_lr if self.last_epoch > self.warmup_iters else 0.,
base_lr * warmup_factor * (1.0 - 1.0 * bisect_right(self.milestones, self.last_epoch) / len(self.milestones))
) for base_lr in self.base_lrs ]
def _compute_values(self):
# The new interface
return self.get_lr()
def _get_warmup_factor_at_iter(method, iter, warmup_iters, warmup_factor):
if iter >= warmup_iters:
return 1.0
if method == 'constant':
return warmup_factor
elif method == 'linear':
alpha = float(iter) / float(warmup_iters)
return warmup_factor * (1. - alpha) + alpha
else:
raise ValueError("Unknown warmup method: {}".format(method))
class ConstantLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self,
optimizer,
last_epoch= -1,
):
super().__init__(optimizer, last_epoch)
def get_lr(self):
return [ base_lr for base_lr in self.base_lrs ]
def _compute_values(self):
return self.get_lr()
|
AttentiveNAS-main
|
solver/lr_scheduler.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .build import build_optimizer, build_lr_scheduler
|
AttentiveNAS-main
|
solver/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import time
from utils.progress import AverageMeter, ProgressMeter, accuracy
from utils.flops_counter import count_net_flops_and_params
import models
def log_helper(summary, logger=None):
if logger:
logger.info(summary)
else:
print(summary)
def validate_one_subnet(
val_loader,
subnet,
criterion,
args,
logger=None,
):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
log_helper('evaluating...', logger)
#evaluation
end = time.time()
subnet.cuda(args.gpu)
subnet.eval() # freeze again all running stats
for batch_idx, (images, target) in enumerate(val_loader):
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = subnet(images)
loss = criterion(output, target).item()
# measure accuracy
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.size(0)
if args.distributed and getattr(args, 'distributed_val', True):
corr1, corr5, loss = acc1 * batch_size, acc5 * batch_size, loss * batch_size
stats = torch.tensor([corr1, corr5, loss, batch_size], device=args.gpu)
dist.barrier() # synchronizes all processes
dist.all_reduce(stats, op=torch.distributed.ReduceOp.SUM)
corr1, corr5, loss, batch_size = stats.tolist()
acc1, acc5, loss = corr1 / batch_size, corr5 / batch_size, loss/batch_size
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
losses.update(loss, batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
if batch_idx % args.print_freq == 0:
progress.display(batch_idx, logger)
log_helper(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}, Top1: {top1.sum}/{top1.count}'
.format(top1=top1, top5=top5), logger)
# compute flops
if getattr(subnet, 'module', None):
resolution = subnet.module.resolution
else:
resolution = subnet.resolution
data_shape = (1, 3, resolution, resolution)
flops, params = count_net_flops_and_params(subnet, data_shape)
return float(top1.avg), float(top5.avg), float(losses.avg), flops, params
|
AttentiveNAS-main
|
evaluate/imagenet_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import time
from utils.progress import AverageMeter, ProgressMeter, accuracy
import utils.comm as comm
from .imagenet_eval import validate_one_subnet, log_helper
def validate(
subnets_to_be_evaluated,
train_loader,
val_loader,
model,
criterion,
args,
logger,
bn_calibration=True,
):
supernet = model.module \
if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model
results = []
top1_list, top5_list = [], []
with torch.no_grad():
for net_id in subnets_to_be_evaluated:
if net_id == 'attentive_nas_min_net':
supernet.sample_min_subnet()
elif net_id == 'attentive_nas_max_net':
supernet.sample_max_subnet()
elif net_id.startswith('attentive_nas_random_net'):
supernet.sample_active_subnet()
else:
supernet.set_active_subnet(
subnets_to_be_evaluated[net_id]['resolution'],
subnets_to_be_evaluated[net_id]['width'],
subnets_to_be_evaluated[net_id]['depth'],
subnets_to_be_evaluated[net_id]['kernel_size'],
subnets_to_be_evaluated[net_id]['expand_ratio'],
)
subnet = supernet.get_active_subnet()
subnet_cfg = supernet.get_active_subnet_settings()
subnet.cuda(args.gpu)
if bn_calibration:
subnet.eval()
subnet.reset_running_stats_for_calibration()
# estimate running mean and running statistics
logger.info('Calirating bn running statistics')
for batch_idx, (images, _) in enumerate(train_loader):
if batch_idx >= args.post_bn_calibration_batch_num:
break
if getattr(args, 'use_clean_images_for_subnet_training', False):
_, images = images
images = images.cuda(args.gpu, non_blocking=True)
subnet(images) #forward only
acc1, acc5, loss, flops, params = validate_one_subnet(
val_loader, subnet, criterion, args, logger
)
top1_list.append(acc1)
top5_list.append(acc5)
summary = str({
'net_id': net_id,
'mode': 'evaluate',
'epoch': getattr(args, 'curr_epoch', -1),
'acc1': acc1,
'acc5': acc5,
'loss': loss,
'flops': flops,
'params': params,
**subnet_cfg
})
if args.distributed and getattr(args, 'distributed_val', True):
logger.info(summary)
results += [summary]
else:
group = comm.reduce_eval_results(summary, args.gpu)
results += group
for rec in group:
logger.info(rec)
return results
|
AttentiveNAS-main
|
evaluate/attentive_nas_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
import atexit
import os
import random
import copy
def count_helper(v, flops, m):
if flops not in m:
m[flops] = {}
if v not in m[flops]:
m[flops][v] = 0
m[flops][v] += 1
def round_flops(flops, step):
return int(round(flops / step) * step)
def convert_count_to_prob(m):
if isinstance(m[list(m.keys())[0]], dict):
for k in m:
convert_count_to_prob(m[k])
else:
t = sum(m.values())
for k in m:
m[k] = 1.0 * m[k] / t
def sample_helper(flops, m):
keys = list(m[flops].keys())
probs = list(m[flops].values())
return random.choices(keys, weights=probs)[0]
def build_trasition_prob_matrix(file_handler, step):
# initlizie
prob_map = {}
prob_map['discretize_step'] = step
for k in ['flops', 'resolution', 'width', 'depth', 'kernel_size', 'expand_ratio']:
prob_map[k] = {}
cc = 0
for line in file_handler:
vals = eval(line.strip())
# discretize
flops = round_flops(vals['flops'], step)
prob_map['flops'][flops] = prob_map['flops'].get(flops, 0) + 1
# resolution
r = vals['resolution']
count_helper(r, flops, prob_map['resolution'])
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
for idx, v in enumerate(vals[k]):
if idx not in prob_map[k]:
prob_map[k][idx] = {}
count_helper(v, flops, prob_map[k][idx])
cc += 1
# convert count to probability
for k in ['flops', 'resolution', 'width', 'depth', 'kernel_size', 'expand_ratio']:
convert_count_to_prob(prob_map[k])
prob_map['n_observations'] = cc
return prob_map
class ArchSampler():
def __init__(self, arch_to_flops_map_file_path, discretize_step, model, acc_predictor=None):
super(ArchSampler, self).__init__()
with open(arch_to_flops_map_file_path, 'r') as fp:
self.prob_map = build_trasition_prob_matrix(fp, discretize_step)
self.discretize_step = discretize_step
self.model = model
self.acc_predictor = acc_predictor
self.min_flops = min(list(self.prob_map['flops'].keys()))
self.max_flops = max(list(self.prob_map['flops'].keys()))
self.curr_sample_pool = None #TODO; architecture samples could be generated in an asynchronous way
def sample_one_target_flops(self, flops_uniform=False):
f_vals = list(self.prob_map['flops'].keys())
f_probs = list(self.prob_map['flops'].values())
if flops_uniform:
return random.choice(f_vals)
else:
return random.choices(f_vals, weights=f_probs)[0]
def sample_archs_according_to_flops(self, target_flops, n_samples=1, max_trials=100, return_flops=True, return_trials=False):
archs = []
#for _ in range(n_samples):
while len(archs) < n_samples:
for _trial in range(max_trials+1):
arch = {}
arch['resolution'] = sample_helper(target_flops, self.prob_map['resolution'])
for k in ['width', 'kernel_size', 'depth', 'expand_ratio']:
arch[k] = []
for idx in sorted(list(self.prob_map[k].keys())):
arch[k].append(sample_helper(target_flops, self.prob_map[k][idx]))
if self.model:
self.model.set_active_subnet(**arch)
flops = self.model.compute_active_subnet_flops()
if return_flops:
arch['flops'] = flops
if round_flops(flops, self.discretize_step) == target_flops:
break
else:
raise NotImplementedError
#accepte the sample anyway
archs.append(arch)
return archs
|
AttentiveNAS-main
|
sampler/attentive_nas_sampler.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
""" Auto Augment
Implementation adapted from timm: https://github.com/rwightman/pytorch-image-models
"""
import random
import math
from PIL import Image, ImageOps, ImageEnhance
import PIL
_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]])
_FILL = (128, 128, 128)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
_HPARAMS_DEFAULT = dict(
translate_const=250,
img_mean=_FILL,
)
_RANDOM_INTERPOLATION = (Image.NEAREST, Image.BILINEAR, Image.BICUBIC)
def _interpolation(kwargs):
interpolation = kwargs.pop('resample', Image.NEAREST)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
def _check_args_tf(kwargs):
if 'fillcolor' in kwargs and _PIL_VER < (5, 0):
kwargs.pop('fillcolor')
kwargs['resample'] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs['resample'])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
bits_to_keep = max(1, bits_to_keep) # prevent all 0 images
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.
level = _randomly_negate(level)
return (level,)
def _enhance_level_to_arg(level):
# range [0.1, 1.9]
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg(level, translate_const):
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg2(level):
level = (level / _MAX_LEVEL) * float(_HPARAMS_DEFAULT['translate_const'])
level = _randomly_negate(level)
return (level,)
def _translate_rel_level_to_arg(level):
# range [-0.45, 0.45]
level = (level / _MAX_LEVEL) * 0.45
level = _randomly_negate(level)
return (level,)
# def level_to_arg(hparams):
# return {
# 'AutoContrast': lambda level: (),
# 'Equalize': lambda level: (),
# 'Invert': lambda level: (),
# 'Rotate': _rotate_level_to_arg,
# # FIXME these are both different from original impl as I believe there is a bug,
# # not sure what is the correct alternative, hence 2 options that look better
# 'Posterize': lambda level: (int((level / _MAX_LEVEL) * 4) + 4,), # range [4, 8]
# 'Posterize2': lambda level: (4 - int((level / _MAX_LEVEL) * 4),), # range [4, 0]
# 'Solarize': lambda level: (int((level / _MAX_LEVEL) * 256),), # range [0, 256]
# 'SolarizeAdd': lambda level: (int((level / _MAX_LEVEL) * 110),), # range [0, 110]
# 'Color': _enhance_level_to_arg,
# 'Contrast': _enhance_level_to_arg,
# 'Brightness': _enhance_level_to_arg,
# 'Sharpness': _enhance_level_to_arg,
# 'ShearX': _shear_level_to_arg,
# 'ShearY': _shear_level_to_arg,
# 'TranslateX': lambda level: _translate_abs_level_to_arg(level, hparams['translate_const']),
# 'TranslateY': lambda level: _translate_abs_level_to_arg(level, hparams['translate_const']),
# 'TranslateXRel': lambda level: _translate_rel_level_to_arg(level),
# 'TranslateYRel': lambda level: _translate_rel_level_to_arg(level),
# }
NAME_TO_OP = {
'AutoContrast': auto_contrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Posterize2': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x_abs,
'TranslateY': translate_y_abs,
'TranslateXRel': translate_x_rel,
'TranslateYRel': translate_y_rel,
}
def pass_fn(input):
return ()
def _conversion0(input):
return (int((input / _MAX_LEVEL) * 4) + 4,)
def _conversion1(input):
return (4 - int((input / _MAX_LEVEL) * 4),)
def _conversion2(input):
return (int((input / _MAX_LEVEL) * 256),)
def _conversion3(input):
return (int((input / _MAX_LEVEL) * 110),)
class AutoAugmentOp:
def __init__(self, name, prob, magnitude, hparams={}):
self.aug_fn = NAME_TO_OP[name]
# self.level_fn = level_to_arg(hparams)[name]
if name == 'AutoContrast' or name == 'Equalize' or name == 'Invert':
self.level_fn = pass_fn
elif name == 'Rotate':
self.level_fn = _rotate_level_to_arg
elif name == 'Posterize':
self.level_fn = _conversion0
elif name == 'Posterize2':
self.level_fn = _conversion1
elif name == 'Solarize':
self.level_fn = _conversion2
elif name == 'SolarizeAdd':
self.level_fn = _conversion3
elif name == 'Color' or name == 'Contrast' or name == 'Brightness' or name == 'Sharpness':
self.level_fn = _enhance_level_to_arg
elif name == 'ShearX' or name == 'ShearY':
self.level_fn = _shear_level_to_arg
elif name == 'TranslateX' or name == 'TranslateY':
self.level_fn = _translate_abs_level_to_arg2
elif name == 'TranslateXRel' or name == 'TranslateYRel':
self.level_fn = _translate_rel_level_to_arg
else:
print("{} not recognized".format({}))
self.prob = prob
self.magnitude = magnitude
# If std deviation of magnitude is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from normal dist
# with mean magnitude and std-dev of magnitude_std.
# NOTE This is being tested as it's not in paper or reference impl.
self.magnitude_std = 0.5 # FIXME add arg/hparam
self.kwargs = {
'fillcolor': hparams['img_mean'] if 'img_mean' in hparams else _FILL,
'resample': hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION
}
def __call__(self, img):
if self.prob < random.random():
return img
magnitude = self.magnitude
if self.magnitude_std and self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude))
level_args = self.level_fn(magnitude)
return self.aug_fn(img, *level_args, **self.kwargs)
def auto_augment_policy_v0(hparams=_HPARAMS_DEFAULT):
# ImageNet policy from TPU EfficientNet impl, cannot find
# a paper reference.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
pc = [[AutoAugmentOp(*a, hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy_original(hparams=_HPARAMS_DEFAULT):
# ImageNet policy from https://arxiv.org/abs/1805.09501
policy = [
[('Posterize', 0.4, 8), ('Rotate', 0.6, 9)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
[('Posterize', 0.6, 7), ('Posterize', 0.6, 6)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Equalize', 0.4, 4), ('Rotate', 0.8, 8)],
[('Solarize', 0.6, 3), ('Equalize', 0.6, 7)],
[('Posterize', 0.8, 5), ('Equalize', 1.0, 2)],
[('Rotate', 0.2, 3), ('Solarize', 0.6, 8)],
[('Equalize', 0.6, 8), ('Posterize', 0.4, 6)],
[('Rotate', 0.8, 8), ('Color', 0.4, 0)],
[('Rotate', 0.4, 9), ('Equalize', 0.6, 2)],
[('Equalize', 0.0, 7), ('Equalize', 0.8, 8)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Rotate', 0.8, 8), ('Color', 1.0, 2)],
[('Color', 0.8, 8), ('Solarize', 0.8, 7)],
[('Sharpness', 0.4, 7), ('Invert', 0.6, 8)],
[('ShearX', 0.6, 5), ('Equalize', 1.0, 9)],
[('Color', 0.4, 0), ('Equalize', 0.6, 3)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
]
pc = [[AutoAugmentOp(*a, hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy(name='v0', hparams=_HPARAMS_DEFAULT):
if name == 'original':
return auto_augment_policy_original(hparams)
elif name == 'v0':
return auto_augment_policy_v0(hparams)
else:
print("Unknown auto_augmentation policy {}".format(name))
raise AssertionError()
class AutoAugment:
def __init__(self, policy):
self.policy = policy
def __call__(self, img):
sub_policy = random.choice(self.policy)
for op in sub_policy:
img = op(img)
return img
|
AttentiveNAS-main
|
data/auto_augment_tf.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from __future__ import print_function
import torch
import torchvision.transforms.functional as F
from torchvision import datasets, transforms
from torch.utils.data import Dataset
import math
import sys
import random
from PIL import Image
from torch.utils.data.distributed import DistributedSampler
import os
from .data_transform import get_data_transform
def build_data_loader(args):
if args.dataset == 'imagenet':
return build_default_imagenet_data_loader(args)
else:
raise NotImplementedError
def build_default_imagenet_data_loader(args):
traindir = os.path.join(args.dataset_dir, "train")
valdir = os.path.join(args.dataset_dir, "val")
#build transforms
train_transform = get_data_transform(args, is_training=True, augment=args.augment)
test_transform = get_data_transform(args, is_training=False, augment=args.augment)
#build datasets
if not getattr(args, 'data_loader_cross_validation', False):
train_dataset = datasets.ImageFolder(traindir, train_transform)
val_dataset = datasets.ImageFolder(valdir, test_transform)
#else:
# my_dataset = datasets.ImageFolder(traindir)
# train_dataset, val_dataset = torch.utils.data.random_split(
# my_dataset, [args.data_split_ntrain, args.data_split_nval], generator=torch.Generator().manual_seed(args.data_split_seed)
# )
# train_dataset = MyDataset( train_dataset, train_transform)
# val_dataset = MyDataset(val_dataset, test_transform)
#build data loaders
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
sampler=train_sampler,
drop_last = getattr(args, 'drop_last', True),
num_workers=args.data_loader_workers_per_gpu,
pin_memory=True,
)
if args.distributed and getattr(args, 'distributed_val', True):
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
else:
val_sampler = None
eval_batch_size = min(args.batch_size, 16) \
if not getattr(args, 'eval_only', False) else args.batch_size
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=eval_batch_size,
shuffle=False,
num_workers=args.data_loader_workers_per_gpu,
drop_last=False,
pin_memory=True,
sampler=val_sampler,
)
return train_loader, val_loader, train_sampler
|
AttentiveNAS-main
|
data/data_loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
AttentiveNAS-main
|
data/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
from .auto_augment_tf import (
auto_augment_policy,
AutoAugment,
)
IMAGENET_PIXEL_MEAN = [123.675, 116.280, 103.530]
IMAGENET_PIXEL_STD = [58.395, 57.12, 57.375]
def get_data_transform(args, is_training, augment):
train_crop_size = getattr(args, 'train_crop_size', 224)
test_scale = getattr(args, 'test_scale', 256)
test_crop_size = getattr(args, 'test_crop_size', 224)
interpolation = Image.BICUBIC
if getattr(args, 'interpolation', None) and args.interpolation == 'bilinear':
interpolation = Image.BILINEAR
da_args = {
'train_crop_size': train_crop_size,
'test_scale': test_scale,
'test_crop_size': test_crop_size,
'interpolation': interpolation
}
if augment == 'default':
return build_default_transform(is_training, **da_args)
elif augment == 'auto_augment_tf':
policy = getattr(args, 'auto_augment_policy', 'v0')
return build_imagenet_auto_augment_tf_transform(is_training, policy=policy, **da_args)
else:
raise ValueError(augment)
def get_normalize():
normalize = transforms.Normalize(
mean=torch.Tensor(IMAGENET_PIXEL_MEAN) / 255.0,
std=torch.Tensor(IMAGENET_PIXEL_STD) / 255.0,
)
return normalize
def build_default_transform(
is_training, train_crop_size=224, test_scale=256, test_crop_size=224, interpolation=Image.BICUBIC
):
normalize = get_normalize()
if is_training:
ret = transforms.Compose(
[
transforms.RandomResizedCrop(train_crop_size, interpolation=interpolation),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
else:
ret = transforms.Compose(
[
transforms.Resize(test_scale, interpolation=interpolation),
transforms.CenterCrop(test_crop_size),
transforms.ToTensor(),
normalize,
]
)
return ret
def build_imagenet_auto_augment_tf_transform(
is_training, policy='v0', train_crop_size=224, test_scale=256, test_crop_size=224, interpolation=Image.BICUBIC
):
normalize = get_normalize()
img_size = train_crop_size
aa_params = {
"translate_const": int(img_size * 0.45),
"img_mean": tuple(round(x) for x in IMAGENET_PIXEL_MEAN),
}
aa_policy = AutoAugment(auto_augment_policy(policy, aa_params))
if is_training:
ret = transforms.Compose(
[
transforms.RandomResizedCrop(train_crop_size, interpolation=interpolation),
transforms.RandomHorizontalFlip(),
aa_policy,
transforms.ToTensor(),
normalize,
]
)
else:
ret = transforms.Compose(
[
transforms.Resize(test_scale, interpolation=interpolation),
transforms.CenterCrop(test_crop_size),
transforms.ToTensor(),
normalize,
]
)
return ret
|
AttentiveNAS-main
|
data/data_transform.py
|
# +
import numpy as np
def generate_synthetic_data(
n,corr,class_balance,
l_probs,
l_groups):
cardinality = len(class_balance)
y_true = np.random.choice(cardinality, n, p=class_balance)
def generate_correlated(num: int = 2):
"""Generate num correlated label columns."""
ls = [[] for _ in range(num)]
for y in y_true:
if np.random.choice(2, p=[1.0 - corr, corr]):
v = np.random.choice(cardinality + 1, p=l_probs[y])
for l in ls:
l.append(v)
else:
for l in ls:
l.append(np.random.choice(cardinality + 1, p=l_probs[y]))
return [np.array(l) for l in ls]
def generate_ls(sets):
"""Generate label columns given sets of dependent indexes."""
ls = [None] * sum(map(len, sets))
for s in sets:
ls_gen = generate_correlated(num=len(s))
for i, idx in enumerate(s):
ls[idx] = ls_gen[i]
return ls
ls = generate_ls(l_groups)
l = np.vstack(ls).T - 1
return y_true, l
|
dependency_model-main
|
utils.py
|
import cvxpy as cp
import numpy as np
import scipy as sp
def learn_structure(L):
N = float(np.shape(L)[0])
M = int(np.shape(L)[1])
sigma_O = (np.dot(L.T,L))/(N-1) - \
np.outer(np.mean(L,axis=0), np.mean(L,axis=0))
#bad code
O = 1/2*(sigma_O+sigma_O.T)
O_root = np.real(sp.linalg.sqrtm(O))
# low-rank matrix
L_cvx = cp.Variable([M,M], PSD=True)
# sparse matrix
S = cp.Variable([M,M], PSD=True)
# S-L matrix
R = cp.Variable([M,M], PSD=True)
#reg params
lam = 1/np.sqrt(M)
gamma = 1e-8
objective = cp.Minimize(0.5*(cp.norm(R*O_root, 'fro')**2) - cp.trace(R) + lam*(gamma*cp.pnorm(S,1) + cp.norm(L_cvx, "nuc")))
constraints = [R == S - L_cvx, L_cvx>>0]
prob = cp.Problem(objective, constraints)
result = prob.solve(verbose=False)
opt_error = prob.value
#extract dependencies
J_hat = S.value
return J_hat
def get_deps_from_inverse_sig(J, thresh=0.2):
deps = []
for i in range(J.shape[0]):
for j in range(J.shape[1]):
if abs(J[i,j]) > thresh:
deps.append((i,j))
return deps
|
dependency_model-main
|
learn_deps.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import distutils.command.build
import distutils.util
import fnmatch
import glob
import io
import os
import sys
from pathlib import Path
import setuptools
from setuptools.command.build_py import build_py as build_py_orig
from setuptools.dist import Distribution
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
"--package-dir",
help="Source directory of package files.",
default="bazel-bin/package.runfiles/CompilerGym",
)
argparser.add_argument(
"--get-wheel-filename",
action="store_true",
help="Print only output filename without building it.",
)
argparser.add_argument(
"--build-dir",
help="Path to build dir. This is where this script copies files from the source before making the wheel package.",
default="build",
)
args, unknown = argparser.parse_known_args()
sys.argv = [sys.argv[0]] + unknown
sys.path.insert(0, str((Path(args.package_dir) / "compiler_gym").absolute()))
import config # noqa: E402
with open("VERSION") as f:
version = f.read().strip()
with open("README.md") as f:
# Force UTF-8 file encoding to support non-ascii characters in the readme.
with io.open("README.md", encoding="utf-8") as f:
long_description = f.read()
with open("compiler_gym/requirements.txt") as f:
requirements = [ln.split("#")[0].rstrip() for ln in f.readlines()]
# When building a bdist_wheel we need to set the appropriate tags: this package
# includes compiled binaries, and does not include compiled python extensions.
try:
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
class bdist_wheel(_bdist_wheel):
def finalize_options(self):
_bdist_wheel.finalize_options(self)
self.root_is_pure = False
def get_tag(self):
python, abi, plat = _bdist_wheel.get_tag(self)
python, abi = "py3", "none"
return python, abi, plat
except ImportError:
bdist_wheel = None
class build(distutils.command.build.build):
def initialize_options(self):
distutils.command.build.build.initialize_options(self)
self.build_base = args.build_dir
# Add files that should be excluded from the package.
# The argument exclude_package_data of setuptools.setup(...)
# does not work with py files. They have to be excluded here.
excluded = [
str(Path(args.package_dir) / "compiler_gym/envs/llvm/make_specs.py"),
str(Path(args.package_dir) / "compiler_gym/bin/random_eval.py"),
]
class build_py(build_py_orig):
def find_package_modules(self, package, package_dir):
modules = super().find_package_modules(package, package_dir)
res = [
(pkg, mod, file)
for (pkg, mod, file) in modules
if not any(fnmatch.fnmatchcase(file, pat=pattern) for pattern in excluded)
]
return res
def wheel_filename(**kwargs):
# create a fake distribution from arguments
dist = Distribution(attrs=kwargs)
# finalize bdist_wheel command
bdist_wheel_cmd = dist.get_command_obj("bdist_wheel")
bdist_wheel_cmd.ensure_finalized()
# assemble wheel file name
distname = bdist_wheel_cmd.wheel_dist_name
tag = "-".join(bdist_wheel_cmd.get_tag())
return f"{distname}-{tag}.whl"
setup_kwargs = {
"name": "compiler_gym",
"version": version,
"description": "Reinforcement learning environments for compiler research",
"author": "Facebook AI Research",
"long_description": long_description,
"long_description_content_type": "text/markdown",
"url": "https://github.com/facebookresearch/CompilerGym",
"license": "MIT",
"packages": [
"compiler_gym.bin",
"compiler_gym.datasets",
"compiler_gym.envs.gcc.datasets",
"compiler_gym.envs.gcc.service",
"compiler_gym.envs.gcc",
"compiler_gym.envs.loop_tool",
"compiler_gym.envs.loop_tool.service",
"compiler_gym.envs",
"compiler_gym.envs",
"compiler_gym.errors",
"compiler_gym.leaderboard",
"compiler_gym.service.proto",
"compiler_gym.service.runtime",
"compiler_gym.service",
"compiler_gym.spaces",
"compiler_gym.third_party.autophase",
"compiler_gym.third_party.gccinvocation",
"compiler_gym.third_party.inst2vec",
"compiler_gym.third_party",
"compiler_gym.util.flags",
"compiler_gym.util",
"compiler_gym.views",
"compiler_gym.wrappers",
"compiler_gym",
],
"package_dir": {
"": args.package_dir,
},
"package_data": {
"compiler_gym": [
"envs/gcc/service/compiler_gym-gcc-service",
"envs/loop_tool/service/compiler_gym-loop_tool-service",
"third_party/csmith/csmith/bin/csmith",
"third_party/csmith/csmith/include/csmith-2.3.0/*.h",
"third_party/inst2vec/*.pickle",
]
},
"install_requires": requirements,
"include_package_data": True,
"python_requires": ">=3.7",
"classifiers": [
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Compilers",
],
"cmdclass": {"bdist_wheel": bdist_wheel, "build": build, "build_py": build_py},
"platforms": [distutils.util.get_platform()],
"zip_safe": False,
}
if config.enable_llvm_env:
setup_kwargs["packages"].extend(
[
"compiler_gym.envs.llvm.datasets",
"compiler_gym.envs.llvm.service",
"compiler_gym.envs.llvm",
"compiler_gym.third_party.llvm",
"compiler_gym.third_party.autophase",
]
)
setup_kwargs["package_data"]["compiler_gym"].extend(
[
"envs/llvm/service/compiler_gym-llvm-service",
"envs/llvm/service/compute_observation",
"envs/llvm/service/llvm-extract-one",
"envs/llvm/service/libLLVMPolly.so",
"third_party/cbench/benchmarks.txt",
"third_party/cbench/cbench-v*/crc32.bc",
]
)
if config.enable_mlir_env:
setup_kwargs["packages"].extend(
[
"compiler_gym.envs.mlir.datasets",
"compiler_gym.envs.mlir.service",
"compiler_gym.envs.mlir",
]
)
setup_kwargs["package_data"]["compiler_gym"].extend(
["envs/mlir/service/compiler_gym-mlir-service"]
)
original_cwd = os.getcwd()
try:
os.chdir(os.path.join(args.package_dir, "compiler_gym"))
setup_kwargs["package_data"]["compiler_gym"].extend(
glob.glob("envs/mlir/service/llvm/**", recursive=True)
)
setup_kwargs["package_data"]["compiler_gym"].extend(
glob.glob("envs/mlir/service/google_benchmark/**", recursive=True)
)
finally:
os.chdir(original_cwd)
if args.get_wheel_filename:
# Instead of generating the wheel file,
# print its filename.
file_name = wheel_filename(**setup_kwargs)
sys.stdout.write(file_name)
else:
setuptools.setup(**setup_kwargs)
|
CompilerGym-development
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import compiler_gym
from compiler_gym.envs import LlvmEnv
from tests.test_main import main
def test_compiler_gym_make():
"""Test that compiler_gym.make() is equivalent to gym.make()."""
with compiler_gym.make("llvm-v0") as env:
assert isinstance(env, LlvmEnv)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/make_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/envs."""
import gym
import pytest
from flaky import flaky
from compiler_gym.envs import llvm
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.service.connection import CompilerGymServiceConnection
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_benchmark_constructor_arg(env: LlvmEnv):
env.close() # Fixture only required to pull in dataset.
with gym.make("llvm-v0", benchmark="cbench-v1/dijkstra") as env:
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
def test_benchmark_setter(env: LlvmEnv):
env.benchmark = "benchmark://cbench-v1/dijkstra"
assert env.benchmark != "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
def test_benchmark_set_in_reset(env: LlvmEnv):
env.reset(benchmark="benchmark://cbench-v1/dijkstra")
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
@pytest.mark.parametrize("reward_space", ["IrInstructionCount", "ObjectTextSizeBytes"])
def test_reward_space_setter(env: LlvmEnv, reward_space: str):
env.reward_space = reward_space
assert env.reward_space == reward_space
env.reset()
assert env.reward_space == reward_space
@pytest.mark.parametrize("reward_space", ["IrInstructionCount", "ObjectTextSizeBytes"])
def test_reward_space_set_in_reset(env: LlvmEnv, reward_space: str):
env.reset(reward_space=reward_space)
assert env.reward_space == reward_space
env.reset()
assert env.reward_space == reward_space
@pytest.mark.parametrize(
"observation_space", ["IrInstructionCount", "ObjectTextSizeBytes"]
)
def test_observation_space_setter(env: LlvmEnv, observation_space: str):
env.observation_space = observation_space
assert env.observation_space_spec == observation_space
env.reset()
assert env.observation_space_spec == observation_space
@pytest.mark.parametrize(
"observation_space", ["IrInstructionCount", "ObjectTextSizeBytes"]
)
def test_observation_space_set_in_reset(env: LlvmEnv, observation_space: str):
env.reset(observation_space=observation_space)
assert env.observation_space_spec == observation_space
env.reset()
assert env.observation_space_spec == observation_space
def test_uri_substring_no_match(env: LlvmEnv):
env.reset(benchmark="benchmark://cbench-v1/crc32")
assert env.benchmark == "benchmark://cbench-v1/crc32"
with pytest.raises(LookupError):
env.reset(benchmark="benchmark://cbench-v1/crc3")
with pytest.raises(LookupError):
env.reset(benchmark="benchmark://cbench-v1/cr")
def test_uri_substring_candidate_no_match_infer_scheme(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
assert env.benchmark == "benchmark://cbench-v1/crc32"
with pytest.raises(LookupError):
env.reset(benchmark="cbench-v1/crc3")
with pytest.raises(LookupError):
env.reset(benchmark="cbench-v1/cr")
def test_reset_to_force_benchmark(env: LlvmEnv):
"""Test that calling reset() with a benchmark forces that benchmark to
be used for every subsequent episode.
"""
env.reset(benchmark="benchmark://cbench-v1/crc32")
assert env.benchmark == "benchmark://cbench-v1/crc32"
for _ in range(10):
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
def test_unset_forced_benchmark(env: LlvmEnv):
"""Test that setting benchmark "unsets" the previous benchmark."""
env.reset(benchmark="benchmark://cbench-v1/dijkstra")
with pytest.warns(
UserWarning,
match=r"Changing the benchmark has no effect until reset\(\) is called",
):
env.benchmark = "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
def test_change_benchmark_mid_episode(env: LlvmEnv):
"""Test that changing the benchmark while in an episode has no effect until
the next call to reset()."""
env.reset(benchmark="benchmark://cbench-v1/crc32")
assert env.benchmark == "benchmark://cbench-v1/crc32"
with pytest.warns(
UserWarning,
match=r"Changing the benchmark has no effect until reset\(\) is called",
):
env.benchmark = "benchmark://cbench-v1/dijkstra"
assert env.benchmark == "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
def test_set_benchmark_invalid_type(env: LlvmEnv):
with pytest.raises(TypeError) as ctx:
env.benchmark = 10
assert str(ctx.value) == "Expected a Benchmark or str, received: 'int'"
def test_gym_make_kwargs():
"""Test that passing kwargs to gym.make() are forwarded to environment
constructor.
"""
with gym.make(
"llvm-v0", observation_space="Autophase", reward_space="IrInstructionCount"
) as env:
assert env.observation_space_spec.id == "Autophase"
assert env.reward_space.name == "IrInstructionCount"
def test_step_session_id_not_found(env: LlvmEnv):
"""Test that step() recovers gracefully from an unknown session error from
the service."""
env._session_id = 15 # pylint: disable=protected-access
observation, reward, done, info = env.step(0)
assert done
assert info["error_details"] == "Session not found: 15"
assert observation is None
assert reward is None
assert not env.in_episode
@pytest.fixture(scope="function")
def remote_env() -> LlvmEnv:
"""A test fixture that yields a connection to a remote service."""
service = CompilerGymServiceConnection(llvm.LLVM_SERVICE_BINARY)
try:
with LlvmEnv(service=service.connection.url) as env:
yield env
finally:
service.close()
@flaky # step() can fail.
def test_switch_default_reward_space_in_episode(env: LlvmEnv):
"""Test that switching reward space during an episode resets the cumulative
episode reward.
"""
env.reset(reward_space=None)
_, _, done, info = env.step(0)
assert not done, info
assert env.episode_reward is None
env.reward_space = "IrInstructionCount"
assert env.episode_reward == 0
_, _, done, info = env.step(0)
assert not done, info
assert env.episode_reward is not None
@flaky # step() can fail.
def test_set_same_default_reward_space_in_episode(env: LlvmEnv):
"""Test that setting the reward space during an episode does not reset the
cumulative episode reward if the reward space is unchanged.
"""
env.reset(reward_space="IrInstructionCount")
env.episode_reward = 10
# No change to the reward space.
env.reward_space = "IrInstructionCount"
assert env.episode_reward == 10
# Change in reward space.
env.reward_space = "IrInstructionCountOz"
assert env.episode_reward == 0
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/compiler_env_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the copy() and deepcopy() operators on ClientServiceCompilerEnv."""
from copy import copy, deepcopy
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_forbidden_shallow_copy(env: LlvmEnv):
"""Test that shallow copy operator is explicitly forbidden."""
with pytest.raises(
TypeError,
match=r"^ClientServiceCompilerEnv instances do not support shallow copies. Use deepcopy\(\)",
):
copy(env)
def test_deep_copy(env: LlvmEnv):
"""Test that deep copy creates an independent copy."""
env.reset()
with deepcopy(env) as cpy:
assert cpy.state == env.state
env.step(env.action_space.sample())
assert cpy.state != env.state
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/env_copy_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym:validate."""
import gym
import pytest
from compiler_gym import CompilerEnvState, validate_states
from tests.test_main import main
@pytest.mark.parametrize("inorder", (False, True))
@pytest.mark.parametrize("nproc", (1, 2))
def test_validate_states_lambda_callback(inorder, nproc):
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
commandline="opt input.bc -o output.bc",
)
results = list(
validate_states(
make_env=lambda: gym.make("llvm-v0"),
states=[state],
inorder=inorder,
nproc=nproc,
)
)
assert len(results) == 1
assert results[0].okay()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/validate_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pkg_resources
import compiler_gym
from compiler_gym.util.runfiles_path import runfiles_path
from packaging import version
from tests.pytest_plugins.common import bazel_only, install_test_only
from tests.test_main import main
def test_version_dunder():
assert isinstance(compiler_gym.__version__, str)
def test_version_dunder_format():
version.parse(compiler_gym.__version__)
@install_test_only
def test_setuptools_version():
version = pkg_resources.require("compiler_gym")[0].version
assert version == compiler_gym.__version__
@bazel_only
def test_expected_version():
"""Test that embedded compiler gym version matches VERSION file."""
with open(runfiles_path("VERSION")) as f:
version = f.read().strip()
assert version == compiler_gym.__version__
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/version_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym:compiler_env_state."""
import json
from io import StringIO
from pathlib import Path
import pytest
import requests
from pydantic import ValidationError as PydanticValidationError
from compiler_gym import CompilerEnvState, CompilerEnvStateWriter
from compiler_gym.compiler_env_state import CompilerEnvStateReader
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_state_from_dict_empty():
with pytest.raises(PydanticValidationError):
CompilerEnvState(**{})
def test_state_invalid_walltime():
with pytest.raises(PydanticValidationError, match="Walltime cannot be negative"):
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=-1,
reward=1.5,
commandline="",
)
def test_state_to_json_from_dict():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=100,
reward=1.5,
commandline="-a -b -c",
)
state_from_dict = CompilerEnvState(**json.loads(original_state.json()))
assert state_from_dict.benchmark == "benchmark://cbench-v0/foo"
assert state_from_dict.walltime == 100
assert state_from_dict.reward == 1.5
assert state_from_dict.commandline == "-a -b -c"
def test_state_to_json_from_dict_no_reward():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=100, commandline="-a -b -c"
)
state_from_dict = CompilerEnvState(**json.loads(original_state.json()))
assert state_from_dict.benchmark == "benchmark://cbench-v0/foo"
assert state_from_dict.walltime == 100
assert state_from_dict.reward is None
assert state_from_dict.commandline == "-a -b -c"
def test_state_equality_different_types():
state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
assert not state == 5 # noqa testing __eq__
assert state != 5 # testing __ne__
def test_state_equality_same():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
assert a == b # testing __eq__
assert not a != b # noqa testing __ne__
def test_state_equality_differnt_walltime():
"""Test that walltime is not compared."""
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=10, commandline="-a -b -c"
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=5, commandline="-a -b -c"
)
assert a == b # testing __eq__
assert not a != b # noqa testing __ne__
def test_state_equality_one_sided_reward():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=5, commandline="-a -b -c"
)
assert a == b # testing __eq__
assert b == a # testing __eq__
assert not a != b # noqa testing __ne__
assert not b != a # noqa testing __ne__
def test_state_equality_equal_reward():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
assert a == b # testing __eq__
assert b == a # testing __eq__
assert not a != b # noqa testing __ne__
assert not b != a # noqa testing __ne__
def test_state_equality_unequal_reward():
a = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
b = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=3,
)
assert not a == b # noqa testing __eq__
assert not b == a # noqatesting __eq__
assert a != b # testing __ne__
assert b != a # testing __ne__
def test_compiler_env_state_writer():
buf = StringIO()
writer = CompilerEnvStateWriter(buf)
writer.write_state(
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
),
flush=True,
)
assert buf.getvalue() == (
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
def test_compiler_env_state_writer_no_header():
buf = StringIO()
writer = CompilerEnvStateWriter(buf, header=False)
writer.write_state(
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
),
flush=True,
)
assert buf.getvalue() == "benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
@pytest.mark.parametrize("flush", range(1))
def test_compiler_env_state_writer_with_statement(tmpwd: Path, flush: bool):
path = Path("results.csv")
assert not path.is_file() # Sanity check.
f = open(path, "w")
with CompilerEnvStateWriter(f) as writer:
writer.write_state(
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
),
flush=flush,
)
assert f.closed
with open(path) as f:
assert f.read() == (
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
def test_compiler_env_state_reader():
buf = StringIO(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_no_header():
buf = StringIO("benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n")
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_with_header():
buf = StringIO(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_with_header_out_of_order_columns():
buf = StringIO(
"commandline,reward,benchmark,walltime\n"
"-a -b -c,2.0,benchmark://cbench-v0/foo,5.0\n"
)
reader = CompilerEnvStateReader(buf)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_compiler_env_state_reader_empty_input():
buf = StringIO("")
reader = CompilerEnvStateReader(buf)
assert list(reader) == []
def test_compiler_env_state_reader_header_only():
buf = StringIO("benchmark,reward,walltime,commandline\n")
reader = CompilerEnvStateReader(buf)
assert list(reader) == []
def test_state_from_csv_invalid_format():
buf = StringIO("abcdef\n")
reader = CompilerEnvStateReader(buf)
with pytest.raises(
ValueError, match=r"Expected 4 columns in the first row of CSV: \['abcdef'\]"
):
next(iter(reader))
def test_state_serialize_deserialize_equality():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=100,
reward=1.5,
commandline="-a -b -c",
)
buf = StringIO()
CompilerEnvStateWriter(buf).write_state(original_state)
buf.seek(0) # Rewind the buffer for reading.
state_from_csv = next(iter(CompilerEnvStateReader(buf)))
assert state_from_csv.benchmark == "benchmark://cbench-v0/foo"
assert state_from_csv.walltime == 100
assert state_from_csv.reward == 1.5
assert state_from_csv.commandline == "-a -b -c"
def test_state_serialize_deserialize_equality_no_reward():
original_state = CompilerEnvState(
benchmark="benchmark://cbench-v0/foo", walltime=100, commandline="-a -b -c"
)
buf = StringIO()
CompilerEnvStateWriter(buf).write_state(original_state)
buf.seek(0) # Rewind the buffer for reading.
state_from_csv = next(iter(CompilerEnvStateReader(buf)))
assert state_from_csv.benchmark == "benchmark://cbench-v0/foo"
assert state_from_csv.walltime == 100
assert state_from_csv.reward is None
assert state_from_csv.commandline == "-a -b -c"
def test_read_paths_stdin(monkeypatch):
monkeypatch.setattr(
"sys.stdin",
StringIO(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
),
)
reader = CompilerEnvStateReader.read_paths(["-"])
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_read_paths_file(tmp_path):
file_dir = f"{tmp_path}/test.csv"
with open(file_dir, "w") as csv_file:
csv_file.write(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
)
reader = CompilerEnvStateReader.read_paths([file_dir])
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def test_read_paths_url(monkeypatch):
urls = ["https://compilergym.ai/benchmarktest.csv"]
class MockResponse:
def __init__(self, text, status_code):
self.text = text
self.status_code = status_code
def ok_mock_response(*args, **kwargs):
return MockResponse(
(
"benchmark,reward,walltime,commandline\n"
"benchmark://cbench-v0/foo,2.0,5.0,-a -b -c\n"
),
200,
)
monkeypatch.setattr(requests, "get", ok_mock_response)
reader = CompilerEnvStateReader.read_paths(urls)
assert list(reader) == [
CompilerEnvState(
benchmark="benchmark://cbench-v0/foo",
walltime=5,
commandline="-a -b -c",
reward=2,
)
]
def bad_mock_response(*args, **kwargs):
return MockResponse("", 404)
monkeypatch.setattr(requests, "get", bad_mock_response)
with pytest.raises(requests.exceptions.InvalidURL):
reader = CompilerEnvStateReader.read_paths(urls)
list(reader)
def test_read_paths_bad_inputs():
bad_dirs = [
"/fake/directory/file.csv",
"fake/directory/file.csv",
"https://www.compilergym.ai/benchmark",
"htts://www.compilergym.ai/benchmark.csv",
"htts://www.compilergym.ai/benchmark",
]
with pytest.raises(FileNotFoundError):
reader = CompilerEnvStateReader.read_paths(bad_dirs)
list(reader)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/compiler_env_state_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/bin:random_search."""
import tempfile
from pathlib import Path
import gym
from compiler_gym.random_search import random_search, replay_actions_from_logs
from tests.pytest_plugins.common import set_command_line_flags
from tests.test_main import main
def make_env():
env = gym.make("llvm-autophase-ic-v0")
env.benchmark = "cbench-v1/dijkstra"
return env
def test_random_search_smoke_test():
with tempfile.TemporaryDirectory() as tmp:
outdir = Path(tmp)
set_command_line_flags(["argv0"])
env = random_search(
make_env=make_env,
outdir=outdir,
patience=50,
total_runtime=3,
nproc=1,
skip_done=False,
)
env.close()
assert (outdir / "random_search.json").is_file()
assert (outdir / "random_search_progress.csv").is_file()
assert (outdir / "random_search_best_actions.txt").is_file()
assert (outdir / "optimized.bc").is_file()
with make_env() as env:
replay_actions_from_logs(env, Path(outdir))
assert (outdir / "random_search_best_actions_progress.csv").is_file()
assert (outdir / "random_search_best_actions_commandline.txt").is_file()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/random_search_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
from getpass import getuser
from typing import List, Optional
import pytest
from compiler_gym.util import debug_util as dbg
def main(extra_pytest_args: Optional[List[str]] = None, debug_level: int = 1):
"""The main entry point for the pytest runner.
An example file which uses this:
from compiler_gym.util.test_main import main
def test_foo():
assert 1 + 1 == 2
if __name__ == "__main__":
main()
In the above, the single test_foo test will be executed.
:param extra_pytest_args: A list of additional command line options to pass
to pytest.
:param debug_level: The debug level to use to run tests. Higher levels are
more verbose and may be useful for diagnosing test failures. Normally
CompilerGym executes with a debug level of 0.
"""
dbg.set_debug_level(debug_level)
# Keep test data isolated from user data.
os.environ[
"COMPILER_GYM_SITE_DATA"
] = f"/tmp/compiler_gym_{getuser()}/tests/site_data"
os.environ["COMPILER_GYM_CACHE"] = f"/tmp/compiler_gym_{getuser()}/tests/cache"
pytest_args = sys.argv + [
# Run pytest verbosely to print out test names to provide context in
# case of failures.
"-vv",
# Disable "Module already imported" warnings. See:
# https://docs.pytest.org/en/latest/how-to/usage.html#calling-pytest-from-python-code
"-W",
"ignore:Module already imported:pytest.PytestWarning",
# Disable noisy "Flaky tests passed" messages.
"--no-success-flaky-report",
]
# Support for sharding. If a py_test target has the shard_count attribute
# set (in the range [1,50]), then the pytest-shard module is used to divide
# the tests among the shards. See https://pypi.org/project/pytest-shard/
sharded_test = os.environ.get("TEST_TOTAL_SHARDS")
if sharded_test:
num_shards = int(os.environ["TEST_TOTAL_SHARDS"])
shard_index = int(os.environ["TEST_SHARD_INDEX"])
pytest_args += [f"--shard-id={shard_index}", f"--num-shards={num_shards}"]
else:
pytest_args += ["-p", "no:pytest-shard"]
pytest_args += extra_pytest_args or []
returncode = pytest.main(pytest_args)
# By default pytest will fail with an error if no tests are collected.
# Disable that behavior here (with a warning) since there are legitimate
# cases where we may want to run a test file with no tests in it. For
# example, when running on a continuous integration service where all the
# tests are marked with the @skip_on_ci decorator.
if returncode == pytest.ExitCode.NO_TESTS_COLLECTED.value:
print(
"WARNING: The test suite was empty. Is that intended?",
file=sys.stderr,
)
returncode = 0
sys.exit(returncode)
|
CompilerGym-development
|
tests/test_main.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym:validation_result."""
import json
import pytest
from compiler_gym import CompilerEnvState, ValidationError, ValidationResult
from tests.test_main import main
def test_validation_error_equality():
e1 = ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
)
e2 = ValidationError( # Same as e1
type="Syntax Error",
data={"data": [1, 2, 3]},
)
e3 = ValidationError( # Different "type"
type="Foobar",
data={"data": [1, 2, 3]},
)
e4 = ValidationError( # Different "data" dict
type="Syntax Error",
data={"data": [1, 2, 3, 4]},
)
assert e1 == e2
assert e1 != e3
assert e3 != e4
def test_validation_error_json():
"""Check that JSON serialize/de-serialize produces equivalent errors."""
error = ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
)
assert ValidationError(**json.loads(error.json())) == error
def test_validation_result_json():
result = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
errors=[
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
)
],
)
assert ValidationResult(**json.loads(result.json())) == result
def test_validation_result_equality_different_states():
a = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test/a",
commandline="test",
walltime=1,
),
walltime=3,
)
b = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test/b",
commandline="test",
walltime=1,
),
walltime=3,
)
assert a != b
def test_validation_result_equality_different_walltimes():
a = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
)
b = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=10,
),
walltime=10,
)
assert a == b
def test_validation_result_equality_different_errors_order():
a = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
errors=[
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
),
ValidationError(
type="Runtime Error",
data={"a": "b"},
),
],
)
b = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
errors=[
ValidationError(
type="Runtime Error",
data={"a": "b"},
),
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
),
],
)
assert a == b
def test_validation_result_join_no_inputs():
with pytest.raises(ValueError, match=r"^No states to join$"):
ValidationResult.join([])
def test_validation_result_join_one_input():
result = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
errors=[
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
)
],
)
joined_result = ValidationResult.join([result])
assert result == joined_result
def test_validation_result_join_two_inputs_different_errors():
a = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=1,
),
walltime=3,
errors=[
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
)
],
)
b = ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=10,
),
walltime=3,
errors=[
ValidationError(
type="Type Error",
data={"a": "b"},
)
],
)
c = ValidationResult.join([a, b])
assert c == ValidationResult(
state=CompilerEnvState(
benchmark="benchmark://example-compiler-v0/test",
commandline="test",
walltime=10,
),
walltime=3,
errors=[
ValidationError(
type="Syntax Error",
data={"data": [1, 2, 3]},
),
ValidationError(
type="Type Error",
data={"a": "b"},
),
],
)
# Test walltime, which is excluded from equality comparisons.
assert c.walltime == 6
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/validation_result_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import CommandlineWithTerminalAction, ConstrainedCommandline
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_commandline_with_terminal_action(env: LlvmEnv):
mem2reg_unwrapped_index = env.action_space["-mem2reg"]
env = CommandlineWithTerminalAction(env)
mem2reg_index = env.action_space["-mem2reg"]
reg2mem_index = env.action_space["-reg2mem"]
assert mem2reg_index == mem2reg_unwrapped_index
env.reset()
_, _, done, info = env.step(mem2reg_index)
assert not done, info
_, _, done, info = env.multistep([reg2mem_index, reg2mem_index])
assert not done, info
assert env.actions == [mem2reg_index, reg2mem_index, reg2mem_index]
_, _, done, info = env.step(len(env.action_space.flags) - 1)
assert done
assert "terminal_action" in info
def test_commandline_with_terminal_action_fork(env: LlvmEnv):
env = CommandlineWithTerminalAction(env)
assert env.unwrapped.action_space != env.action_space # Sanity check.
with env.fork() as fkd:
assert fkd.action_space == env.action_space
_, _, done, _ = env.step(len(env.action_space.flags) - 1)
assert done
_, _, done, _ = fkd.step(len(env.action_space.flags) - 1)
assert done
def test_constrained_action_space(env: LlvmEnv):
mem2reg_index = env.action_space["-mem2reg"]
reg2mem_index = env.action_space["-reg2mem"]
env = ConstrainedCommandline(env=env, flags=["-mem2reg", "-reg2mem"])
assert env.action_space.n == 2
assert env.action_space.flags == ["-mem2reg", "-reg2mem"]
assert env.action(0) == mem2reg_index
assert env.action([0, 1]) == [mem2reg_index, reg2mem_index]
env.reset()
env.step(0)
env.multistep([1, 1])
assert env.actions == [0, 1, 1]
def test_constrained_action_space_fork(env: LlvmEnv):
mem2reg_index = env.action_space["-mem2reg"]
reg2mem_index = env.action_space["-reg2mem"]
env = ConstrainedCommandline(env=env, flags=["-mem2reg", "-reg2mem"])
fkd = env.fork()
try:
assert fkd.action_space.n == 2
assert fkd.action_space.flags == ["-mem2reg", "-reg2mem"]
assert fkd.action(0) == mem2reg_index
assert fkd.action([0, 1]) == [mem2reg_index, reg2mem_index]
fkd.reset()
fkd.step(0)
fkd.multistep([1, 1])
assert fkd.actions == [0, 1, 1]
finally:
fkd.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/commandline_wrappers_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.wrappers.llvm."""
import numpy as np
import pytest
from flaky import flaky
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.errors import BenchmarkInitError
from compiler_gym.wrappers import RuntimePointEstimateReward
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_invalid_runtime_count(env: LlvmEnv):
env = RuntimePointEstimateReward(env, runtime_count=-10)
with pytest.raises(
ValueError, match="runtimes_per_observation_count must be >= 1. Received: -10"
):
env.reset()
def test_invalid_warmup_count(env: LlvmEnv):
env = RuntimePointEstimateReward(env, warmup_count=-10)
with pytest.raises(
ValueError,
match="warmup_runs_count_per_runtime_observation must be >= 0. Received: -10",
):
env.reset()
def test_reward_range(env: LlvmEnv):
env = RuntimePointEstimateReward(env, runtime_count=3)
assert env.reward_range == (-float("inf"), float("inf"))
def test_reward_range_not_runnable_benchmark(env: LlvmEnv):
env = RuntimePointEstimateReward(env, runtime_count=3)
with pytest.raises(
BenchmarkInitError, match=r"^Benchmark is not runnable: benchmark://npb-v0/1$"
):
env.reset(benchmark="benchmark://npb-v0/1")
@flaky # Runtime can fail
def test_fork(env: LlvmEnv):
env = RuntimePointEstimateReward(env)
with env.fork() as fkd:
assert fkd.reward_space_spec.name == "runtime"
@pytest.mark.parametrize("runtime_count", [1, 3, 5])
@pytest.mark.parametrize("warmup_count", [0, 1, 3])
@pytest.mark.parametrize("estimator", [np.median, min])
@flaky # Runtime can fail
def test_reward_values(env: LlvmEnv, runtime_count, warmup_count, estimator):
env = RuntimePointEstimateReward(
env, runtime_count=runtime_count, warmup_count=warmup_count, estimator=estimator
)
env.reset()
assert env.reward_space_spec.runtime_count == runtime_count
assert env.reward_space_spec.warmup_count == warmup_count
assert env.reward_space_spec.estimator == estimator
_, reward_a, done, info = env.step(env.action_space.sample())
assert not done, info
_, reward_b, done, info = env.step(env.action_space.sample())
assert not done, info
_, reward_c, done, info = env.step(env.action_space.sample())
assert not done, info
assert env.episode_reward == reward_a + reward_b + reward_c
assert reward_a or reward_b or reward_c
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/llvm_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import Counter
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_Counter_reset(env: LlvmEnv):
with Counter(env) as env:
env.reset()
assert env.counters == {
"close": 0,
"fork": 0,
"reset": 1,
"step": 0,
}
env.reset()
assert env.counters == {
"close": 0,
"fork": 0,
"reset": 2,
"step": 0,
}
def test_Counter_step(env: LlvmEnv):
with Counter(env) as env:
env.reset()
env.step(0)
assert env.counters == {
"close": 0,
"fork": 0,
"reset": 1,
"step": 1,
}
def test_Counter_double_close(env: LlvmEnv):
with Counter(env) as env:
env.close()
env.close()
assert env.counters == {
"close": 2,
"fork": 0,
"reset": 0,
"step": 0,
}
# Implicit close in `with` statement.
assert env.counters == {
"close": 3,
"fork": 0,
"reset": 0,
"step": 0,
}
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/counter_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import CycleOverBenchmarks, TimeLimit
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_wrapped_close(env: LlvmEnv):
env = TimeLimit(env, max_episode_steps=5)
env.close()
assert env.service is None
def test_wrapped_fork_type(env: LlvmEnv):
env = TimeLimit(env, max_episode_steps=5)
fkd = env.fork()
try:
assert isinstance(fkd, TimeLimit)
finally:
fkd.close()
def test_wrapped_step_multi_step(env: LlvmEnv):
env = TimeLimit(env, max_episode_steps=5)
env.reset(benchmark="benchmark://cbench-v1/dijkstra")
env.multistep([0, 0, 0])
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
assert env.actions == [0, 0, 0]
def test_wrapped_custom_step_args(env: LlvmEnv):
env = TimeLimit(env, max_episode_steps=5)
env.reset(benchmark="benchmark://cbench-v1/dijkstra")
(ic,), _, _, _ = env.step(0, observation_spaces=["IrInstructionCount"])
assert isinstance(ic, int)
def test_time_limit_reached(env: LlvmEnv):
env = TimeLimit(env, max_episode_steps=3)
env.reset()
_, _, done, info = env.step(0)
assert not done, info
_, _, done, info = env.step(0)
assert not done, info
_, _, done, info = env.step(0)
assert done, info
assert info["TimeLimit.truncated"], info
_, _, done, info = env.step(0)
assert done, info
assert info["TimeLimit.truncated"], info
def test_time_limit_fork(env: LlvmEnv):
"""Check that the time limit state is copied on fork()."""
env = TimeLimit(env, max_episode_steps=3)
env.reset()
_, _, done, info = env.step(0) # 1st step
assert not done, info
fkd = env.fork()
try:
_, _, done, info = env.step(0) # 2nd step
assert not done, info
_, _, done, info = fkd.step(0) # 2nd step
assert not done, info
_, _, done, info = env.step(0) # 3rd step
assert done, info
_, _, done, info = fkd.step(0) # 3rd step
assert done, info
finally:
fkd.close()
def test_time_limit(env: LlvmEnv):
"""Check CycleOverBenchmarks does not break TimeLimit"""
env = TimeLimit(env, max_episode_steps=3)
env = CycleOverBenchmarks(
env,
benchmarks=[
"benchmark://cbench-v1/crc32",
],
)
env.reset()
_, _, done, info = env.step(0)
assert not done, info
_, _, done, info = env.step(0)
assert not done, info
_, _, done, info = env.step(0)
assert done
assert info["TimeLimit.truncated"], info
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/time_limit_wrappers_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import (
CycleOverBenchmarks,
CycleOverBenchmarksIterator,
IterateOverBenchmarks,
RandomOrderBenchmarks,
)
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_iterate_over_benchmarks(env: LlvmEnv):
env = IterateOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/dijkstra",
],
)
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
with pytest.raises(StopIteration):
env.reset()
def test_iterate_over_benchmarks_fork(env: LlvmEnv):
"""Test that fork() copies over benchmark iterator state."""
env = IterateOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/dijkstra",
],
)
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
fkd = env.fork()
try:
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/dijkstra"
finally:
fkd.close()
def test_iterate_over_benchmarks_fork_shared_iterator(env: LlvmEnv):
"""Test fork() using a single benchmark iterator shared between forks."""
env = IterateOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/dijkstra",
],
fork_shares_iterator=True,
)
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
fkd = env.fork()
try:
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/dijkstra"
finally:
fkd.close()
def test_cycle_over_benchmarks(env: LlvmEnv):
env = CycleOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
],
)
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
def test_cycle_over_benchmarks_fork(env: LlvmEnv):
env = CycleOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
],
)
fkd = env.fork()
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
try:
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/crc32"
finally:
fkd.close()
def test_cycle_over_benchmarks_fork_shared_iterator(env: LlvmEnv):
env = CycleOverBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/dijkstra",
],
fork_shares_iterator=True,
)
fkd = env.fork()
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
try:
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/crc32"
finally:
fkd.close()
def test_cycle_over_benchmarks_iterator(env):
env = CycleOverBenchmarksIterator(
env,
make_benchmark_iterator=lambda: (
"benchmark://cbench-v1/dijkstra",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/adpcm",
),
)
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/adpcm"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/qsort"
with env.fork() as fkd:
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/dijkstra"
fkd.reset()
assert fkd.benchmark == "benchmark://cbench-v1/qsort"
assert env.benchmark == "benchmark://cbench-v1/qsort"
env.reset()
assert fkd.benchmark == "benchmark://cbench-v1/qsort"
assert env.benchmark == "benchmark://cbench-v1/adpcm"
def test_random_order_benchmarks(env: LlvmEnv):
env = RandomOrderBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
],
)
env.reset()
assert env.benchmark in {
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
}
env.reset()
assert env.benchmark in {
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
}
env.reset()
assert env.benchmark in {
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
}
def test_random_order_benchmarks_fork(env: LlvmEnv):
env = RandomOrderBenchmarks(
env=env,
benchmarks=[
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
],
)
env.reset()
assert env.benchmark in {
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/qsort",
}
fkd = env.fork()
try:
fkd.reset()
env.reset()
finally:
fkd.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/datasets_wrappers_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/wrappers/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
import pytest
from pytest import warns
from compiler_gym.datasets import Datasets
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import ActionWrapper, CompilerEnvWrapper
from compiler_gym.wrappers import ObservationWrapper as CoreObservationWrapper
from compiler_gym.wrappers import RewardWrapper as CoreRewardWrapper
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
class ObservationWrapper(CoreObservationWrapper):
def __init__(self, env):
super().__init__(env)
def convert_observation(self, observation):
return observation
class RewardWrapper(CoreRewardWrapper):
def __init__(self, env):
super().__init__(env)
def convert_reward(self, reward):
return reward
@pytest.fixture(
scope="module",
params=[
ActionWrapper,
CompilerEnvWrapper,
ObservationWrapper,
RewardWrapper,
],
)
def wrapper_type(request):
"""A test fixture that yields one of the CompilerGym wrapper types."""
return request.param
def test_wrapped_close(env: LlvmEnv, wrapper_type):
env = wrapper_type(env)
env.close()
assert env.service is None
def test_wrapped_properties(env: LlvmEnv, wrapper_type):
"""Test accessing the non-standard properties."""
with wrapper_type(env) as env:
assert env.actions == []
assert env.benchmark
assert isinstance(env.datasets, Datasets)
def test_wrapped_fork_type(env: LlvmEnv, wrapper_type):
"""Test forking a wrapper."""
env = wrapper_type(env)
fkd = env.fork()
try:
assert isinstance(fkd, wrapper_type)
finally:
fkd.close()
def test_wrapped_fork_subtype(env: LlvmEnv, wrapper_type):
"""Test forking a wrapper subtype."""
class MyWrapper(wrapper_type):
def __init__(self, env):
super().__init__(env)
env = MyWrapper(env)
fkd = env.fork()
try:
assert isinstance(fkd, MyWrapper)
finally:
fkd.close()
def test_wrapped_fork_subtype_custom_constructor(env: LlvmEnv, wrapper_type):
"""Test forking a wrapper with a custom constructor. This requires a custom
fork() implementation."""
class MyWrapper(wrapper_type):
def __init__(self, env, foo):
super().__init__(env)
self.foo = foo
def fork(self):
return MyWrapper(self.env.fork(), foo=self.foo)
env = MyWrapper(env, foo=1)
fkd = env.fork()
try:
assert isinstance(fkd, MyWrapper)
assert fkd.foo == 1
finally:
fkd.close()
def test_wrapped_step_multi_step(env: LlvmEnv):
"""Test passing a list of actions to step()."""
env = CompilerEnvWrapper(env)
env.reset()
env.multistep([0, 0, 0])
assert env.actions == [0, 0, 0]
def test_wrapped_step_custom_args(env: LlvmEnv, wrapper_type):
"""Test passing the custom CompilerGym step() keyword arguments."""
class MyWrapper(wrapper_type):
def convert_observation(self, observation):
return observation # pass thru
def action(self, action):
return action # pass thru
def convert_reward(self, reward):
return reward
env = MyWrapper(env)
env.reset()
(ir, ic), (icr, icroz), _, _ = env.multistep(
actions=[0, 0, 0],
observation_spaces=["Ir", "IrInstructionCount"],
reward_spaces=["IrInstructionCount", "IrInstructionCountOz"],
)
assert isinstance(ir, str)
assert isinstance(ic, int)
assert isinstance(icr, float)
assert isinstance(icroz, float)
assert env.unwrapped.observation.spaces["Ir"].space.contains(ir)
assert env.unwrapped.observation.spaces["IrInstructionCount"].space.contains(ic)
def test_wrapped_benchmark(env: LlvmEnv, wrapper_type):
"""Test that benchmark property has expected values."""
class MyWrapper(wrapper_type):
def convert_observation(self, observation):
return observation # pass thru
env.observation_space = "Ir"
env = MyWrapper(env)
ir_a = env.reset(benchmark="benchmark://cbench-v1/dijkstra")
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
ir_b = env.reset(benchmark="benchmark://cbench-v1/qsort")
assert env.benchmark == "benchmark://cbench-v1/qsort"
# Check that the observations for different benchmarks are different.
assert ir_a != ir_b
def test_wrapped_set_benchmark(env: LlvmEnv, wrapper_type):
"""Test that the benchmark attribute can be set on wrapped classes."""
class MyWrapper(wrapper_type):
def convert_observation(self, observation):
return observation # pass thru
env = MyWrapper(env)
# Set the benchmark attribute and check that it propagates.
env.benchmark = "benchmark://cbench-v1/dijkstra"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/dijkstra"
# Repeat again for a different benchmark.
with warns(
UserWarning,
match=r"Changing the benchmark has no effect until reset\(\) is called",
):
env.benchmark = "benchmark://cbench-v1/crc32"
env.reset()
assert env.benchmark == "benchmark://cbench-v1/crc32"
def test_wrapped_env_in_episode(env: LlvmEnv, wrapper_type):
class MyWrapper(wrapper_type):
def convert_observation(self, observation):
return observation
env = MyWrapper(env)
assert not env.in_episode
env.reset()
assert env.in_episode
def test_wrapped_env_changes_default_spaces(env: LlvmEnv, wrapper_type):
"""Test when an environment wrapper changes the default observation and reward spaces."""
class MyWrapper(wrapper_type):
def __init__(self, env: LlvmEnv):
super().__init__(env)
self.env.observation_space = "Autophase"
self.env.reward_space = "IrInstructionCount"
def convert_observation(self, observation):
return observation # pass thru
env = MyWrapper(env)
assert env.observation_space.shape == (56,)
assert env.observation_space_spec.id == "Autophase"
assert env.reward_space.name == "IrInstructionCount"
observation = env.reset()
assert env.observation_space.contains(observation)
def test_wrapped_env_change_spaces(env: LlvmEnv, wrapper_type):
"""Test changing the observation and reward spaces on a wrapped environment."""
class MyWrapper(wrapper_type):
def convert_observation(self, observation):
return observation # pass thru
def convert_reward(self, reward):
return reward # pass thru
env = MyWrapper(env)
env.observation_space = "Autophase"
env.reward_space = "IrInstructionCount"
assert env.observation_space.shape == (56,)
assert env.observation_space_spec.id == "Autophase"
assert env.reward_space.name == "IrInstructionCount"
def test_wrapped_action(mocker, env: LlvmEnv):
class MyWrapper(ActionWrapper):
def action(self, action):
return action - 1
def reverse_action(self, action):
return action + 1
env = MyWrapper(env)
mocker.spy(env, "action")
env.reset()
env.step(1)
env.step(2)
assert env.action.call_count == 2 # pylint: disable=no-member
assert env.actions == [0, 1]
def test_wrapped_observation(mocker, env: LlvmEnv):
"""Test using an ObservationWrapper that returns the length of the Ir string."""
class MyWrapper(ObservationWrapper):
def __init__(self, env):
super().__init__(env)
self.observation_space = "Ir"
def convert_observation(self, observation):
return len(observation)
env = MyWrapper(env)
assert env.reset() > 0
observation, _, _, _ = env.step(0)
assert observation > 0
def test_wrapped_observation_missing_definition(env: LlvmEnv):
with pytest.raises(TypeError):
env = CoreObservationWrapper(env)
def test_wrapped_reward(env: LlvmEnv):
class MyWrapper(RewardWrapper):
def convert_reward(self, reward):
return -5
env.reward_space = "IrInstructionCount"
env = MyWrapper(env)
env.reset()
_, reward, _, _ = env.step(0)
assert reward == -5
assert env.episode_reward == -5
_, reward, _, _ = env.step(0)
assert reward == -5
assert env.episode_reward == -10
def test_wrapped_env_close(env: LlvmEnv):
wrapped = CompilerEnvWrapper(env)
wrapped.reset()
assert wrapped.service is not None
wrapped.close()
assert wrapped.service is None
def test_wrapped_env_custom_close(env: LlvmEnv):
"""Test that a custom close() method is called on wrapped environments."""
class MyWrapper(CompilerEnvWrapper):
def __init__(self, env: LlvmEnv):
super().__init__(env)
self.custom_close = False
def close(self):
self.custom_close = True
self.env.close()
env = MyWrapper(env)
assert not env.custom_close
env.close()
assert env.custom_close
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/core_wrappers_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import CompilerEnvWrapper, SynchronousSqliteLogger
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_SynchronousSqliteLogger_creates_file(env: LlvmEnv, tmp_path):
db_path = tmp_path / "example.db"
env.observation_space = "Autophase"
env.reward_space = "IrInstructionCount"
env = SynchronousSqliteLogger(env, db_path)
env.reset()
env.step(0)
env.flush()
assert db_path.is_file()
def test_SynchronousSqliteLogger_requires_llvm_env(tmp_path):
with pytest.raises(TypeError, match="Requires LlvmEnv base environment"):
SynchronousSqliteLogger(1, tmp_path / "example.db")
def test_SynchronousSqliteLogger_wrapped_env(env: LlvmEnv, tmp_path):
env = CompilerEnvWrapper(env)
env = SynchronousSqliteLogger(env, tmp_path / "example.db")
env.reset()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/sqlite_logger_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.wrappers.llvm."""
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import ValidateBenchmarkAfterEveryStep
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_ValidateBenchmarkAfterEveryStep_valid(env: LlvmEnv):
env.reset()
type(env.benchmark).ivalidate = lambda *_: iter(())
env = ValidateBenchmarkAfterEveryStep(env, reward_penalty=-5)
_, reward, done, info = env.step(0)
assert reward != -5
assert not done
assert "error_details" not in info
@pytest.mark.parametrize("reward_penalty", [-5, 10])
def test_ValidateBenchmarkAfterEveryStep_invalid(env: LlvmEnv, reward_penalty):
env.reset()
type(env.benchmark).ivalidate = lambda *_: iter(["Oh no!"])
env = ValidateBenchmarkAfterEveryStep(env, reward_penalty=reward_penalty)
_, reward, done, info = env.step(0)
assert reward == reward_penalty
assert done
assert info["error_details"] == "Oh no!"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/validation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import ForkOnStep
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_ForkOnStep_step(env: LlvmEnv):
with ForkOnStep(env) as env:
env.reset()
assert env.stack == []
env.step(0)
assert env.actions == [0]
assert len(env.stack) == 1
assert env.stack[0].actions == []
env.step(1)
assert env.actions == [0, 1]
assert len(env.stack) == 2
assert env.stack[1].actions == [0]
assert env.stack[0].actions == []
def test_ForkOnStep_reset(env: LlvmEnv):
with ForkOnStep(env) as env:
env.reset()
env.step(0)
assert env.actions == [0]
assert len(env.stack) == 1
env.reset()
assert env.actions == []
assert env.stack == []
def test_ForkOnStep_double_close(env: LlvmEnv):
with ForkOnStep(env) as env:
env.close()
env.close()
def test_ForkOnStep_undo(env: LlvmEnv):
with ForkOnStep(env) as env:
env.reset()
env.step(0)
assert env.actions == [0]
assert len(env.stack) == 1
env.undo()
assert env.actions == []
assert not env.stack
# Undo of an empty stack:
env.undo()
assert env.actions == []
assert not env.stack
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/fork_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/loop_tool/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the loop_tool CompilerGym environment."""
import loop_tool_py as lt
import pytest
from flaky import flaky
import compiler_gym
from tests.test_main import main
@flaky
@pytest.mark.parametrize("backend", lt.backends())
@pytest.mark.timeout(600)
def test_basic(backend):
with compiler_gym.make("loop_tool-v0") as env:
env.observation_space = "flops"
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{backend}-v0/1024"
),
action_space="simple",
)
env.step(0)
env.step(1)
env.step(0)
env.step(1)
env.step(1)
env.step(0)
env.step(1)
env.step(0)
o = env.step(1)
print(o)
@flaky
@pytest.mark.parametrize("backend", lt.backends())
@pytest.mark.timeout(600)
def test_rand(backend):
with compiler_gym.make("loop_tool-v0") as env:
env.observation_space = "flops"
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{backend}-v0/128"
),
action_space="simple",
)
best = 0
for i in range(10):
a = env.action_space.sample()
o = env.step(a)
flops = o[0]
if flops > best:
best = flops
print(best)
@flaky
@pytest.mark.parametrize("backend", lt.backends())
@pytest.mark.timeout(600)
def test_induced_remainder(backend):
with compiler_gym.make("loop_tool-v0") as env:
env.observation_space = "loop_tree"
# reset
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{backend}-v0/1024"
),
action_space="simple",
)
# action toggle_mode
env.step(0)
# action up
env.step(1)
# action toggle_mode
env.step(0)
# action up
env.step(1)
# action up
o = env.step(1)
expected = f"""
for a in 341 r 1 : L0 {'cpu_parallel ' if backend=='cpu' else ''}[thread]
for a' in 3 : L1
for a'' in 1 : L2
%0[a] <- read()
for a'' in 1 : L4
%1[a] <- read()
for a'' in 1 : L6
%2[a] <- add(%0, %1)
for a'' in 1 : L8
%3[a] <- write(%2)
"""
lines = o[0].strip().split("\n")
out = "\n".join(line.rstrip() for line in lines)
assert out == expected.strip(), f"{out} \n vs \n {expected.strip()}"
@flaky
@pytest.mark.parametrize("backend", lt.backends())
@pytest.mark.timeout(600)
def test_thread_removal(backend):
with compiler_gym.make("loop_tool-v0") as env:
env.observation_space = "loop_tree"
# reset
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{backend}-v0/1024"
),
action_space="simple",
)
# action toggle_thread
o = env.step(3)
expected = """
for a in 1024 : L0
for a' in 1 : L1
for a'' in 1 : L2
%0[a] <- read()
for a'' in 1 : L4
%1[a] <- read()
for a'' in 1 : L6
%2[a] <- add(%0, %1)
for a'' in 1 : L8
%3[a] <- write(%2)
"""
lines = o[0].strip().split("\n")
out = "\n".join(line.rstrip() for line in lines)
assert out == expected.strip(), f"{out} \n vs \n {expected.strip()}"
@flaky
@pytest.mark.parametrize("backend", lt.backends())
@pytest.mark.timeout(600)
def test_thread_addition(backend):
with compiler_gym.make("loop_tool-v0") as env:
env.observation_space = "loop_tree"
# reset
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{backend}-v0/1024"
),
action_space="simple",
)
# action toggle_mode
env.step(0)
# action up
env.step(1)
# action toggle_thread
o = env.step(3)
expected = f"""
for a in 1024 : L0 {'cpu_parallel ' if backend=='cpu' else ''}[thread]
for a' in 1 : L1 {'cpu_parallel ' if backend=='cpu' else ''}[thread]
for a'' in 1 : L2
%0[a] <- read()
for a'' in 1 : L4
%1[a] <- read()
for a'' in 1 : L6
%2[a] <- add(%0, %1)
for a'' in 1 : L8
%3[a] <- write(%2)
"""
lines = o[0].strip().split("\n")
out = "\n".join(line.rstrip() for line in lines)
assert out == expected.strip(), f"{out} \n vs \n {expected.strip()}"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/loop_tool/actions_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test for LlvmEnv.fork()."""
import numpy as np
import pytest
from compiler_gym.envs import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The number of actions to run before and after calling fork().
PRE_FORK_ACTIONS = 10
POST_FORK_ACTIONS = 10
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv, reward_space: str):
"""This test generates a random trajectory and checks that fork() produces
an equivalent state. It then runs a second trajectory on the two
environments to check that behavior is consistent across them.
"""
env.observation_space = "Autophase"
env.reward_space = reward_space
env.reset()
print(f"Running fuzz test of environment {env.benchmark}")
# Take a few warmup steps to get an environment in a random state.
for _ in range(PRE_FORK_ACTIONS):
_, _, done, _ = env.step(env.action_space.sample())
if done: # Broken episode, restart.
break
else:
# Fork the environment and check that the states are equivalent.
fkd = env.fork()
try:
print(env.state) # For debugging in case of error.
assert env.state == fkd.state
# Check that environment states remain equal if identical
# subsequent steps are taken.
for _ in range(POST_FORK_ACTIONS):
action = env.action_space.sample()
observation_a, reward_a, done_a, _ = env.step(action)
observation_b, reward_b, done_b, _ = fkd.step(action)
print(env.state) # For debugging in case of error.
assert done_a == done_b
np.testing.assert_array_almost_equal(observation_a, observation_b)
if reward_a != reward_b:
pytest.fail(
f"Parent environment produced reward {reward_a}, fork produced reward {reward_b}"
)
if done_a:
break # Broken episode, we're done.
assert env.state == fkd.state
finally:
fkd.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_fork_env_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
from time import time
import gym
import numpy as np
import pytest
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_DIM
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
FUZZ_TIME_SECONDS = 2
@pytest.mark.timeout(600)
def test_fuzz(observation_space: str, reward_space: str):
"""Run randomly selected actions on a benchmark until a minimum amount of time has elapsed."""
with gym.make(
"llvm-v0", reward_space=reward_space, observation_space=observation_space
) as env:
benchmark = env.datasets["generator://llvm-stress-v0"].random_benchmark()
print(benchmark.uri) # For debugging in case of failure.
env.reset(benchmark=benchmark)
# Take a random step until a predetermined amount of time has elapsed.
end_time = time() + FUZZ_TIME_SECONDS
while time() < end_time:
observation, reward, done, _ = env.step(env.action_space.sample())
if done:
# Default-value for observation is an array of zeros.
np.testing.assert_array_equal(
observation, np.zeros((AUTOPHASE_FEATURE_DIM,))
)
assert isinstance(reward, float)
env = gym.make(
"llvm-v0",
reward_space=reward_space,
benchmark=benchmark,
observation_space=observation_space,
)
env.reset()
else:
assert isinstance(observation, np.ndarray)
assert observation.shape == (AUTOPHASE_FEATURE_DIM,)
assert isinstance(reward, float)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_random_actions_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/fuzzing/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test for LlvmEnv.action_space.to_string()."""
import os
import subprocess
from pathlib import Path
import pytest
from compiler_gym.envs import LlvmEnv
from compiler_gym.util.commands import Popen
from tests.pytest_plugins.random_util import apply_random_trajectory
from tests.test_main import main
pytest_plugins = [
"tests.pytest_plugins.llvm",
"tests.pytest_plugins.common",
]
# The uniform range for trajectory lengths.
RANDOM_TRAJECTORY_LENGTH_RANGE = (1, 50)
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv, tmpwd: Path, llvm_opt: Path, llvm_diff: Path):
"""This test produces a random trajectory and then uses the commandline
generated with opt to check that the states are equivalent.
"""
del tmpwd
env.reset()
env.write_ir("input.ll")
assert Path("input.ll").is_file()
# In case of a failure, create a regression test by copying the body of this
# function and replacing the below line with the commandline printed below.
apply_random_trajectory(
env, random_trajectory_length_range=RANDOM_TRAJECTORY_LENGTH_RANGE, timeout=30
)
commandline = env.action_space.to_string(env.actions)
print(env.state) # For debugging in case of failure.
# Write the post-trajectory state to file.
env.write_ir("env.ll")
assert Path("env.ll").is_file()
# Run the environment commandline using LLVM opt.
subprocess.check_call(
commandline, env={"PATH": str(llvm_opt.parent)}, shell=True, timeout=60
)
assert Path("output.ll").is_file()
os.rename("output.ll", "opt.ll")
with Popen(
[llvm_diff, "opt.ll", "env.ll"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
) as diff:
stdout, stderr = diff.communicate(timeout=300)
if diff.returncode:
pytest.fail(
f"Opt produced different output to CompilerGym "
f"(returncode: {diff.returncode}):\n{stdout}\n{stderr}"
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_commandline_opt_equivalence_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test for LlvmEnv.validate()."""
import numpy as np
import pytest
from compiler_gym.envs import LlvmEnv
from compiler_gym.errors import BenchmarkInitError
from tests.pytest_plugins.random_util import apply_random_trajectory
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The uniform range for trajectory lengths.
RANDOM_TRAJECTORY_LENGTH_RANGE = (1, 50)
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv, reward_space: str):
"""This test produces a random trajectory, resets the environment, then
replays the trajectory and checks that it produces the same state.
"""
env.observation_space = "Autophase"
env.reward_space = reward_space
benchmark = env.datasets["generator://csmith-v0"].random_benchmark()
print(benchmark.uri) # For debugging in case of failure.
try:
env.reset(benchmark=benchmark)
except BenchmarkInitError:
return
trajectory = apply_random_trajectory(
env, random_trajectory_length_range=RANDOM_TRAJECTORY_LENGTH_RANGE
)
print(env.state) # For debugging in case of failure.
env.reset(benchmark=benchmark)
for i, (action, observation, reward, done) in enumerate(trajectory, start=1):
print(f"Replaying step {i}: {env.action_space.flags[action]}")
replay_observation, replay_reward, replay_done, info = env.step(action)
assert done == replay_done, info
np.testing.assert_array_almost_equal(observation, replay_observation)
np.testing.assert_almost_equal(reward, replay_reward)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_trajectory_replay_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for action space determinism."""
import hashlib
import random
import pytest
from compiler_gym.envs import LlvmEnv
from tests.pytest_plugins.llvm import BENCHMARK_NAMES
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
ACTION_REPTITION_COUNT = 20
def sha1(string: str):
sha1 = hashlib.sha1()
sha1.update(string.encode("utf-8"))
return sha1.hexdigest()
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv):
"""Run an action multiple times from the same starting state and check that
the generated LLVM-IR is the same.
Caveats of this test:
* The initial state is an unoptimized benchmark. If a pass depends
on other passes to take effect it will not be tested.
* Non-determinism is tested by running the action 20 times. Extremely
unlikely non-determinism may not be detected.
"""
action = env.action_space.sample()
action_name = env.action_space.names[action]
benchmark = random.choice(BENCHMARK_NAMES)
env.observation_space = "Ir"
checksums = set()
for i in range(1, ACTION_REPTITION_COUNT + 1):
ir = env.reset(benchmark=benchmark)
checksum_before = sha1(ir)
ir, _, done, _ = env.step(action)
assert not done
checksums.add(sha1(ir))
if len(checksums) != 1:
pytest.fail(
f"Repeating the {action_name} action {i} times on "
f"{benchmark} produced different states"
)
# An action which has no effect is not likely to be nondeterministic.
if list(checksums)[0] == checksum_before:
break
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_deterministic_action_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test for LlvmEnv.validate()."""
import random
import pytest
from compiler_gym.envs import LlvmEnv
from tests.pytest_plugins.llvm import VALIDATABLE_CBENCH_URIS
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The uniform range for trajectory lengths.
RANDOM_TRAJECTORY_LENGTH_RANGE = (1, 50)
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv):
"""This test generates a random trajectory and validates the semantics."""
benchmark = random.choice(VALIDATABLE_CBENCH_URIS)
num_actions = random.randint(*RANDOM_TRAJECTORY_LENGTH_RANGE)
print(benchmark)
while True:
env.reset(benchmark=benchmark)
for _ in range(num_actions):
_, _, done, _ = env.step(env.action_space.sample())
if done:
break # Broken trajectory, retry.
else:
print(f"Validating state {env.state}")
result = env.validate()
assert result.okay(), result
# Stop the test.
break
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_cbench_validate_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test LLVM backend using llvm-stress."""
import pytest
from compiler_gym.envs import LlvmEnv
from compiler_gym.errors import BenchmarkInitError
from tests.pytest_plugins.random_util import apply_random_trajectory
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The uniform range for trajectory lengths.
RANDOM_TRAJECTORY_LENGTH_RANGE = (1, 10)
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv, observation_space: str, reward_space: str):
"""This test produces a random trajectory using a program generated using
llvm-stress.
"""
benchmark = env.datasets["generator://llvm-stress-v0"].random_benchmark()
print(benchmark.uri) # For debugging in case of failure.
env.observation_space = observation_space
env.reward_space = reward_space
try:
env.reset(benchmark=benchmark)
except BenchmarkInitError:
return # Benchmark is invalid.
apply_random_trajectory(
env,
random_trajectory_length_range=RANDOM_TRAJECTORY_LENGTH_RANGE,
timeout=10,
)
print(env.state) # For debugging in case of failure.
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_stress_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:capture_output."""
import sys
from compiler_gym.util.capture_output import capture_output
from tests.test_main import main
def test_capture_print_statements():
with capture_output() as out:
print("Hello")
print("World!", file=sys.stderr)
assert out.stdout == "Hello\n"
assert out.stderr == "World!\n"
def test_nested_capture():
with capture_output() as outer:
with capture_output() as inner:
print("Hello")
print("World!")
assert inner.stdout == "Hello\n"
assert outer.stdout == "World!\n"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/capture_output_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:timer."""
import logging
import os
from compiler_gym.util import debug_util as dbg
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_get_debug_level_environment_variable(temporary_environ):
del temporary_environ
os.environ.clear()
os.environ["COMPILER_GYM_DEBUG"] = "0"
assert dbg.get_debug_level() == 0
os.environ["COMPILER_GYM_DEBUG"] = "1"
assert dbg.get_debug_level() == 1
def test_get_and_set_debug_level(temporary_environ):
del temporary_environ
os.environ.clear()
dbg.set_debug_level(0)
assert dbg.get_debug_level() == 0
dbg.set_debug_level(1)
assert dbg.get_debug_level() == 1
def test_negative_debug_level(temporary_environ):
del temporary_environ
os.environ.clear()
dbg.set_debug_level(-1)
assert dbg.get_debug_level() == 0
def test_out_of_range_debug_level(temporary_environ):
del temporary_environ
os.environ.clear()
dbg.set_debug_level(15)
assert dbg.get_debug_level() == 15
def test_get_logging_level(temporary_environ):
del temporary_environ
os.environ.clear()
dbg.set_debug_level(0)
assert dbg.get_logging_level() == logging.ERROR
dbg.set_debug_level(1)
assert dbg.get_logging_level() == logging.WARNING
dbg.set_debug_level(2)
assert dbg.get_logging_level() == logging.INFO
dbg.set_debug_level(3)
assert dbg.get_logging_level() == logging.DEBUG
dbg.set_debug_level(4)
assert dbg.get_logging_level() == logging.DEBUG
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/debug_util_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:download."""
import pytest
from compiler_gym.errors import DownloadFailed, TooManyRequests
from compiler_gym.util import download
from compiler_gym.util.runfiles_path import cache_path
from tests.test_main import main
@pytest.mark.parametrize("max_retries", [1, 2, 3, 5, 10])
def test_download_timeout_retry_loop(mocker, max_retries: int):
"""Check that download attempts are repeated with sleep() on error."""
def patched_download(*args):
raise TooManyRequests
mocker.patch.object(download, "sleep")
mocker.patch.object(download, "_do_download_attempt", patched_download)
mocker.spy(download, "_do_download_attempt")
with pytest.raises(TooManyRequests):
download.download(urls="example", max_retries=max_retries)
assert download._do_download_attempt.call_count == max_retries
assert download.sleep.call_count == max_retries
starting_wait_time = 10 # The initial wait time in seconds.
download.sleep.assert_called_with(starting_wait_time * 1.5 ** (max_retries - 1))
@pytest.mark.parametrize("max_retries", [1, 2, 3, 5, 10])
def test_download_failed_retry_loop(mocker, max_retries: int):
"""Check that download attempts are repeated without sleep() on error."""
def patched_download(*args):
raise DownloadFailed
mocker.patch.object(download, "sleep")
mocker.patch.object(download, "_do_download_attempt", patched_download)
mocker.spy(download, "_do_download_attempt")
with pytest.raises(DownloadFailed):
download.download(urls="example", max_retries=max_retries)
assert download._do_download_attempt.call_count == max_retries
assert download.sleep.call_count == 0
def test_download_cache_hit(mocker):
"""Check that download is not repeated on cache hit."""
data = b"Hello, world"
data_checksum = "4ae7c3b6ac0beff671efa8cf57386151c06e58ca53a78d83f36107316cec125f"
cached_path = cache_path(f"downloads/{data_checksum}")
# Tidy up from a previous test, if applicable.
if cached_path.is_file():
cached_path.unlink()
def patched_download(*args):
return data
mocker.patch.object(download, "_get_url_data", patched_download)
mocker.spy(download, "_get_url_data")
assert (
download.download(
"example",
sha256="4ae7c3b6ac0beff671efa8cf57386151c06e58ca53a78d83f36107316cec125f",
)
== data
)
download._get_url_data.assert_called_once_with("example")
assert cached_path.is_file()
# Cache hit.
assert (
download.download(
"example",
sha256="4ae7c3b6ac0beff671efa8cf57386151c06e58ca53a78d83f36107316cec125f",
)
== data
)
assert download._get_url_data.call_count == 1
def test_download_mismatched_checksum(mocker):
"""Check that error is raised when checksum does not match expected."""
def patched_download(*args):
return b"Hello, world"
mocker.patch.object(download, "_get_url_data", patched_download)
with pytest.raises(DownloadFailed, match="Checksum of download does not match"):
download.download("example", sha256="123")
def test_download_no_urls():
with pytest.raises(ValueError, match="No URLs to download"):
download.download(urls=[])
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/download_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym/util/shell_format.py"""
from compiler_gym.util import shell_format as fmt
from tests.test_main import main
def test_indent():
assert fmt.indent("abc") == " abc"
assert fmt.indent("abc", n=2) == " abc"
assert fmt.indent("abc\ndef") == " abc\n def"
def test_join_cmd():
assert fmt.join_cmd(["a", "b", "c"]) == "a b c"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/shell_format_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym/util/locks.py"""
from datetime import datetime
from pathlib import Path
from threading import Thread
from flaky import flaky
from compiler_gym.util.runfiles_path import create_user_logs_dir
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
@flaky # Unlikely event that timestamps change
def test_create_user_logs_dir(temporary_environ, tmpdir):
tmpdir = Path(tmpdir)
temporary_environ["COMPILER_GYM_LOGS"] = str(tmpdir)
dir = create_user_logs_dir("foo")
now = datetime.now()
assert dir.parent.parent == tmpdir / "foo"
year, month, day = dir.parent.name.split("-")
assert int(year) == now.year
assert int(month) == now.month
assert int(day) == now.day
hour, minute, second = dir.name.split("-")
assert int(hour) == now.hour
assert int(minute) == now.minute
assert int(second) == now.second
def test_create_user_logs_dir_multithreaded(temporary_environ, tmpdir):
tmpdir = Path(tmpdir)
temporary_environ["COMPILER_GYM_LOGS"] = str(tmpdir)
class MakeDir(Thread):
def __init__(self):
super().__init__()
self.dir = None
def run(self):
self.dir = create_user_logs_dir("foo")
def join(self):
super().join()
return self.dir
threads = [MakeDir() for _ in range(5)]
for t in threads:
t.start()
dirs = [t.join() for t in threads]
# Every directory should be unique.
print(dirs)
assert len(set(dirs)) == len(dirs)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/runfiles_path_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.util.executor."""
import sys
from typing import Iterable
import pytest
from compiler_gym.util.executor import Executor
def submitit_installed():
"""Determine if submitit library is available."""
try:
import submitit # noqa
return True
except ImportError:
return False
def executor_types() -> Iterable[str]:
"""Yield the types of executor."""
yield "local"
yield "debug"
if submitit_installed():
yield "slurm"
@pytest.fixture(scope="module", params=list(executor_types()))
def executor_type(request) -> str:
"""Test fixture which yields an executor type."""
return request.param
def _hello_fn():
return "Hello, world"
@pytest.mark.xfail(
sys.platform == "darwin",
reason="'ResourceWarning: unclosed <socket.socket ...>' when type == local",
)
def test_no_args_call(tmpdir, executor_type: str):
with Executor(type=executor_type, cpus=1).get_executor(logs_dir=tmpdir) as executor:
job = executor.submit(_hello_fn)
assert job.result() == "Hello, world"
def _add_fn(a, b, *args, **kwargs):
return a + b + sum(args) + kwargs["c"]
def test_call_with_args(tmpdir, executor_type: str):
with Executor(type=executor_type, cpus=1).get_executor(logs_dir=tmpdir) as executor:
job = executor.submit(_add_fn, 1, 1, 1, 1, c=1, d=None)
assert job.result() == 5
|
CompilerGym-development
|
tests/util/executor_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:timer."""
from time import sleep
from compiler_gym.util import timer
from tests.test_main import main
def test_humanize_duration_seconds():
assert timer.humanize_duration(5) == "5.000s"
assert timer.humanize_duration(500.111111) == "500.1s"
def test_humanize_duration_ms():
assert timer.humanize_duration(0.0055) == "5.5ms"
assert timer.humanize_duration(0.5) == "500.0ms"
assert timer.humanize_duration(0.51) == "510.0ms"
assert timer.humanize_duration(0.9999) == "999.9ms"
def test_humanize_duration_us():
assert timer.humanize_duration(0.0005) == "500.0us"
assert timer.humanize_duration(0.0000119) == "11.9us"
def test_humanize_duration_ns():
assert timer.humanize_duration(0.0000005) == "500.0ns"
assert timer.humanize_duration(0.0000000019) == "1.9ns"
def test_humanize_duration_negative_seconds():
assert timer.humanize_duration(-1.5) == "-1.500s"
def test_humanize_duration_hms():
assert timer.humanize_duration_hms(0.05) == "0:00:00"
assert timer.humanize_duration_hms(0.999) == "0:00:00"
assert timer.humanize_duration_hms(5) == "0:00:05"
assert timer.humanize_duration_hms(500.111111) == "0:08:20"
assert timer.humanize_duration_hms(4210.4) == "1:10:10"
assert timer.humanize_duration_hms(36000) == "10:00:00"
def test_timer_elapsed_before_reset():
t = timer.Timer()
assert t.time == 0
sleep(0.1)
assert t.time == 0
def test_timer_elapsed_remains_constant():
with timer.Timer() as t:
sleep(0.1)
elapsed_a = t.time
assert elapsed_a > 0
sleep(0.1)
elapsed_b = t.time
assert elapsed_b == elapsed_a
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/timer_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:parallelization."""
from compiler_gym.util import parallelization
from tests.test_main import main
def test_thread_safe_tee():
a, b = parallelization.thread_safe_tee(range(100))
assert next(a) == 0
assert next(b) == 0
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/parallelization_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:truncate."""
from compiler_gym.util.truncate import truncate, truncate_lines
from tests.test_main import main
def test_truncate_no_truncation():
assert truncate("abc") == "abc"
assert truncate("abcdef\nabcdef", max_line_len=7, max_lines=2) == "abcdef\nabcdef"
def test_truncate_single_line():
assert truncate("abcdefghijklmnop", max_line_len=5) == "ab..."
def test_truncate_dual_lines():
assert (
truncate("abcdefghijklmnop\nbcdefghijklmnop", max_line_len=5, max_lines=3)
== "ab...\nbc..."
)
def test_truncate_final_line():
assert truncate("abc\ndef\n123", max_line_len=5, max_lines=2) == "abc\nde..."
assert truncate("abc\ndef\n123", max_line_len=10, max_lines=2) == "abc\ndef..."
def test_truncate_lines_no_truncation():
assert truncate_lines(["abc"]) == "abc"
assert (
truncate_lines(["abcdef", "abcdef"], max_line_len=7, max_lines=2)
== "abcdef\nabcdef"
)
def test_truncate_lines_single_line():
assert truncate_lines(["abcdefghijklmnop"], max_line_len=5) == "ab..."
def test_truncate_lines_dual_lines():
assert (
truncate_lines(
["abcdefghijklmnop", "bcdefghijklmnop"], max_line_len=5, max_lines=3
)
== "ab...\nbc..."
)
def test_truncate_lines_dual_lines_generator():
def gen():
yield "abcdefghijklmnop"
yield "bcdefghijklmnop"
assert truncate_lines(gen(), max_line_len=5, max_lines=3) == "ab...\nbc..."
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/truncate_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/util/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.util.commands."""
import subprocess
import pytest
from compiler_gym.util.commands import Popen, communicate
from tests.test_main import main
def test_communicate_timeout():
with pytest.raises(subprocess.TimeoutExpired):
with subprocess.Popen(["sleep", "60"]) as process:
communicate(process, timeout=1)
assert process.poll() is not None # Process is dead.
def test_popen():
with Popen(["echo"]) as process:
communicate(process, timeout=60)
assert process.poll() is not None # Process is dead.
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/commands_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:filesystem."""
from pathlib import Path
import pytest
from compiler_gym.util import filesystem
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_atomic_file_write_path(tmpwd: Path):
out = Path("a").resolve()
assert not out.is_file()
with filesystem.atomic_file_write(out) as tmp_out:
assert tmp_out != out
assert tmp_out.parent == out.parent
# Write to the temporary file as normal.
with open(tmp_out, "w") as f:
f.write("Hello!")
with open(out) as f:
assert f.read() == "Hello!"
assert not tmp_out.is_file()
def test_atomic_file_write_binary_io(tmpwd: Path):
out = Path("a").resolve()
with filesystem.atomic_file_write(out, fileobj=True) as f:
f.write("Hello!".encode("utf-8"))
with open(out) as f:
assert f.read() == "Hello!"
def test_atomic_file_write_text_io(tmpwd: Path):
out = Path("a").resolve()
with filesystem.atomic_file_write(out, fileobj=True, mode="w") as f:
f.write("Hello!")
with open(out) as f:
assert f.read() == "Hello!"
@pytest.mark.parametrize(
"path",
[
"/",
"/dev/null",
Path("/"),
Path("/dev/null"),
],
)
def test_not_is_in_memory(path):
assert not filesystem.is_in_memory(path)
@pytest.mark.parametrize(
"path",
[
"/dev/shm",
"/dev/shm/foo",
Path("/dev/shm"),
Path("/dev/shm/foo"),
],
)
def test_is_in_memory(path):
assert filesystem.is_in_memory(path)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/filesystem_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:temporary_working_directory."""
import os
import tempfile
from pathlib import Path
from compiler_gym.util.temporary_working_directory import temporary_working_directory
from tests.test_main import main
def test_temporary_working_directory_tempdir():
with temporary_working_directory() as cwdir:
# Suffix test rather than equality test because on macOS temporary
# directories can have a /private prefix.
assert os.getcwd().endswith(str(cwdir))
assert cwdir.is_dir()
assert not list(cwdir.iterdir())
(cwdir / "test").touch()
assert (cwdir / "test").is_file()
# Out of scope, the directory is removed.
assert not cwdir.is_dir()
def test_temporary_working_directory():
with tempfile.TemporaryDirectory() as d:
path = Path(d)
with temporary_working_directory(path) as cwdir:
assert path == cwdir
# Suffix test rather than equality test because on macOS temporary
# directories can have a /private prefix.
assert os.getcwd().endswith(str(path))
assert cwdir.is_dir()
assert not list(cwdir.iterdir())
(cwdir / "test").touch()
assert (cwdir / "test").is_file()
# Out of scope, the directory is preserved.
assert path.is_dir()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/temporary_working_directory_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:statistics."""
from pytest import approx
from compiler_gym.util.statistics import geometric_mean
from tests.test_main import main
def test_geometric_mean_empty_list():
assert geometric_mean([]) == 0
def test_geometric_mean_zero_value():
assert geometric_mean([0, 1, 2]) == 0
def test_geometric_mean_negative():
assert geometric_mean([-1, 1, 2]) == 0
def test_geometric_mean_123():
assert geometric_mean([1, 2, 3]) == approx(1.8171205928321)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/statistics_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:minimize_trajectory."""
import logging
import sys
from typing import List
import pytest
from compiler_gym.util import minimize_trajectory as mt
from compiler_gym.util.gym_type_hints import ActionType
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# Verbose logging for tests.
logging.basicConfig(level=logging.DEBUG)
class MockActionSpace:
"""A mock action space for use by MockEnv."""
def __init__(self, actions):
self.flags = {a: str(a) for a in set(actions)}
class MockValidationResult:
"""A mock validation result for use by MockEnv."""
def __init__(self, okay):
self._okay = okay
def okay(self):
return self._okay
class MockEnv:
"""A mock environment for testing trajectory minimization."""
def __init__(self, actions: List[ActionType], validate=lambda env: True):
self.original_trajectory = actions
self.actions = actions.copy()
self.validate = lambda: MockValidationResult(validate(self))
self.benchmark = "benchmark"
self.action_space = MockActionSpace(set(actions))
def reset(self, benchmark):
self.actions = []
assert benchmark == self.benchmark
def multistep(self, actions):
for action in actions:
assert action in self.original_trajectory
self.actions += actions
return None, None, False, {}
def make_hypothesis(val: int):
"""Create a hypothesis that checks if `val` is in actions."""
def hypothesis(env):
print("hypothesis?()", env.actions, val in env.actions, file=sys.stderr)
return val in env.actions
return hypothesis
@pytest.mark.parametrize("n", range(10))
def test_bisect_explicit_hypothesis(n: int):
"""Test that bisection chops off the tail."""
env = MockEnv(actions=list(range(10)))
list(mt.bisect_trajectory(env, make_hypothesis(n)))
assert env.actions == list(range(n + 1))
@pytest.mark.parametrize("n", range(10))
def test_bisect_implicit_hypothesis(n: int):
"""Test bisection again but using the implicit hypothesis that
env.validate() fails.
"""
env = MockEnv(
actions=list(range(10)), validate=lambda env: not make_hypothesis(n)(env)
)
list(mt.bisect_trajectory(env))
assert env.actions == list(range(n + 1))
@pytest.mark.parametrize("n", range(10))
def test_reverse_bisect(n: int):
"""Test that reverse bisection chops off the prefix."""
env = MockEnv(actions=list(range(10)))
list(mt.bisect_trajectory(env, make_hypothesis(n), reverse=True))
assert env.actions == list(range(n, 10))
def test_minimize_trajectory_iteratively():
"""Test that reverse bisection chops off the prefix."""
env = MockEnv(actions=list(range(10)))
minimized = [0, 3, 4, 5, 8, 9]
def hypothesis(env):
return all(x in env.actions for x in minimized)
list(mt.minimize_trajectory_iteratively(env, hypothesis))
assert env.actions == minimized
def test_minimize_trajectory_iteratively_no_effect():
"""Test that reverse bisection chops off the prefix."""
env = MockEnv(actions=list(range(10)))
minimized = list(range(10))
def hypothesis(env):
return env.actions == minimized
list(mt.minimize_trajectory_iteratively(env, hypothesis))
assert env.actions == minimized
def test_random_minimization():
"""Test that random minimization reduces trajectory."""
env = MockEnv(actions=list(range(10)))
minimized = [0, 1, 4]
def hypothesis(env):
return all(x in env.actions for x in minimized)
list(mt.random_minimization(env, hypothesis))
assert len(env.actions) <= 10
assert len(env.actions) >= len(minimized)
assert all(a in list(range(10)) for a in env.actions)
def test_random_minimization_no_effect():
"""Test random minimization when there's no improvement to be had."""
env = MockEnv(actions=list(range(10)))
minimized = list(range(10))
def hypothesis(env):
return env.actions == minimized
list(mt.random_minimization(env, hypothesis))
assert env.actions == minimized
def test_minimize_trajectory_iteratively_llvm_crc32(env):
"""Test trajectory minimization on a real environment."""
env.reset(benchmark="cbench-v1/crc32")
env.multistep(
[
env.action_space["-mem2reg"],
env.action_space["-gvn"],
env.action_space["-reg2mem"],
]
)
def hypothesis(env):
return (
env.action_space["-mem2reg"] in env.actions
and env.action_space["-reg2mem"] in env.actions
)
list(mt.minimize_trajectory_iteratively(env, hypothesis))
assert env.actions == [
env.action_space["-mem2reg"],
env.action_space["-reg2mem"],
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/minimize_trajectory_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import compiler_gym.util.permutation as permutation
from tests.test_main import main
def test_permutation_number_mapping():
original_permutation = np.array([4, 3, 1, 5, 2, 6, 0], dtype=int)
permutation_number = permutation.convert_permutation_to_number(original_permutation)
mapped_permutation = permutation.convert_number_to_permutation(
n=permutation_number, permutation_size=len(original_permutation)
)
assert np.array_equal(original_permutation, mapped_permutation)
original_permutation2 = np.array([2, 0, 5, 1, 4, 6, 3], dtype=int)
permutation_number2 = permutation.convert_permutation_to_number(
original_permutation2
)
mapped_permutation2 = permutation.convert_number_to_permutation(
n=permutation_number2, permutation_size=len(original_permutation2)
)
np.testing.assert_array_equal(original_permutation2, mapped_permutation2)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/permutation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //compiler_gym/bin:manual_env."""
import re
import sys
from difflib import unified_diff
from io import StringIO
from random import seed
import pytest
from absl import app, flags
from compiler_gym.bin.manual_env import main
from compiler_gym.util.capture_output import capture_output
from tests.test_main import main as _test_main
FLAGS = flags.FLAGS
def io_check(input, output, rnd_seed=100):
"""Run the shell with the given input and check the output matches the
output regex"""
seed(rnd_seed)
old_stdin = sys.stdin
try:
with capture_output() as out:
try:
sys.stdin = StringIO(input)
main(["argv0", "--env=llvm-v0"])
except SystemExit:
pass # Expected behaviour is to call sys.exit().
print(out.stdout)
pattern = (
r"""Initialized environment in [0-9.mu]*s
Welcome to the CompilerGym Shell!
---------------------------------
Type help or \? for more information.
The 'tutorial' command will give a step by step guide.
"""
+ output
+ r"""
compiler_gym:[a-zA-Z0-9/-]+> Exiting
"""
)
# Strip ANSI escape sequences from output that are used for formatting.
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
stdout = ansi_escape.sub("", out.stdout)
# Strip trailing whitespace from output.
stdout = "\n".join(n.rstrip() for n in stdout.split("\n"))
if not re.match(pattern, stdout):
# Create a diff between the expected regex and the actual output.
# Diffing a regex will create a lot of false-positives, since any
# character groups or other expressions will be different, but can
# still be helful for tracking down the important differences.
diff = unified_diff(
pattern.split("\n"),
stdout.split("\n"),
fromfile="Expected output regex",
tofile="Actual output",
)
pytest.fail("\n".join(diff))
finally:
sys.stdin = old_stdin
def test_list_datasets():
FLAGS.unparse_flags()
io_check(
"""list_datasets""", r"""compiler_gym:cbench-v1/qsort> .*cbench-v[0-9]+.*"""
)
def test_list_benchmarks():
FLAGS.unparse_flags()
io_check(
"""list_benchmarks""",
r"""compiler_gym:cbench-v1/qsort> .*cbench-v[0-9]+/adpcm.*""",
)
def test_list_actions():
FLAGS.unparse_flags()
io_check(
"""list_actions""", r"""compiler_gym:cbench-v1/qsort> .*-adce.* -strip.*"""
)
def test_list_rewards():
FLAGS.unparse_flags()
io_check(
"""list_rewards""",
r"""compiler_gym:cbench-v1/qsort> .*IrInstructionCount.* TextSizeOz.*""",
)
def test_list_observations():
FLAGS.unparse_flags()
io_check(
"""list_observations""",
r"""compiler_gym:cbench-v1/qsort> Autophase, .*, TextSizeOz""",
)
def test_set_benchmark():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s""",
)
def test_actions_stack_back_stack():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
action -mem2reg -adce -adce
stack
back
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Action -adce
Action -adce
No effect
Actions -mem2reg -adce -adce in [0-9.mu]*s with reward 0.
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
3 | -adce | False | False | - | 0
2 | -adce | True | False | - | 0
1 | -mem2reg | True | False | - | 0
0 | <init> | False | False | 0 | 0
compiler_gym:cbench-v[0-9]+/adpcm> Undid -adce in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
2 | -adce | True | False | - | 0
1 | -mem2reg | True | False | - | 0
0 | <init> | False | False | 0 | 0""",
)
def test_reward():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_reward IrInstructionCount
action -mem2reg
reward
reward IrInstructionCountNorm
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Reward: 287.000000
Actions -mem2reg in [0-9.mu]*s with reward 287.0.
compiler_gym:cbench-v[0-9]+/adpcm> 0.000000
Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> 0.506173
Reward IrInstructionCountNorm in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
1 | -mem2reg | True | False | 287 | 287
0 | <init> | False | False | 0 | 0
""",
)
def test_observation():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_observation IrInstructionCount
action -mem2reg
observation
observation IrInstructionCountOz
""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Observation IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Observation: 280
Actions -mem2reg in [0-9.mu]*s with reward 0.
compiler_gym:cbench-v[0-9]+/adpcm> 280
Observation IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> 209
Observation IrInstructionCountOz in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> 209
Observation IrInstructionCountOz in [0-9.mu]*s""",
)
def test_try_all_actions():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_reward IrInstructionCount
try_all_actions""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action: -add-discriminators Reward: 0.000000
Action: -adce Reward: 1.000000
(.|\n)*
Got actions in [0-9.mu]*s
Action | Effect | Done | Reward
---------------------------------+----------+--------+---------
-mem2reg | True | False | 181
-sroa | True | False | 181
-newgvn | True | False | 74
-gvn | True | False | 72
(.|\n)*
-structurizecfg | True | False | -25
-bounds-checking | True | False | -60""",
)
def test_simplify_stack():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_reward IrInstructionCount
action -mem2reg -adce -adce
simplify_stack
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Reward: 287.000000
Action -adce
Reward: 2.000000
Action -adce
Reward: 0.000000
No effect
Actions -mem2reg -adce -adce in [0-9.mu]*s with reward 289.0.
compiler_gym:cbench-v[0-9]+/adpcm>
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
2 | -adce | True | False | 2 | 289
1 | -mem2reg | True | False | 287 | 287
0 | <init> | False | False | 0 | 0""",
)
def test_simplify_stack_no_reward():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
action -mem2reg -adce -adce
simplify_stack
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Action -adce
Action -adce
No effect
Actions -mem2reg -adce -adce in [0-9.mu]*s with reward 0.
compiler_gym:cbench-v[0-9]+/adpcm>
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
2 | -adce | True | False | - | 0
1 | -mem2reg | True | False | - | 0
0 | <init> | False | False | 0 | 0""",
)
def test_hill_climb(monkeypatch):
FLAGS.unparse_flags()
i = 0
def incr():
nonlocal i
i += 1
return i
monkeypatch.setattr("random.randrange", lambda _: incr())
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_reward IrInstructionCount
hill_climb 2
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Step: 1 Action: -adce Reward: 1.000000 Accept: True
Step: 2 Action: -aggressive-instcombine Reward: 0.000000 Accept: False
Hill climb complete in [0-9.mu]*s. Accepted 1 of 2 steps for total reward of 1.0.
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
1 | -adce | True | False | 1 | 1
0 | <init> | False | False | 0 | 0""",
)
def test_greedy():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_reward IrInstructionCount
greedy
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action: -add-discriminators Reward: 0.000000
Action: -adce Reward: 1.000000
(.|\n)*
Action: -mem2reg Reward: 287.000000
(.|\n)*
Action: -mergereturn Reward: -1.000000
Step: 1 Selected action: -mem2reg Reward: 287.000000
Greedy 1 steps in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
1 | -mem2reg | True | False | 181 | 181
0 | <init> | False | False | 0 | 0""",
)
def test_actions_string():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
action -mem2reg -adce
commandline""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Action -adce
Actions -mem2reg -adce in [0-9.mu]*s with reward 0.
compiler_gym:cbench-v[0-9]+/adpcm> \$ opt -mem2reg -adce input.bc -o output.bc""",
)
def test_reset():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
action -mem2reg -adce
reset
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Action -adce
Actions -mem2reg -adce in [0-9.mu]*s with reward 0.
compiler_gym:cbench-v[0-9]+/adpcm> Reset in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
0 | <init> | False | False | 0 | 0""",
)
def test_unrecognized_flags():
FLAGS.unparse_flags()
with pytest.raises(app.UsageError) as ctx:
main(["argv0", "unknown-option"])
assert str(ctx.value) == "Unknown command line arguments: ['unknown-option']"
def test_missing_required_flag():
FLAGS.unparse_flags()
with pytest.raises(app.UsageError) as ctx:
main(["argv0"])
assert str(ctx.value) == "--env must be set"
def test_ls_env():
FLAGS.unparse_flags()
with capture_output() as out:
try:
main(["argv0", "--ls_env"])
except SystemExit:
pass # Expected behaviour is to call sys.exit().
assert "llvm-" in out.stdout
if __name__ == "__main__":
_test_main()
|
CompilerGym-development
|
tests/bin/manual_env_bin_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/bin:validate."""
import tempfile
from io import StringIO
from pathlib import Path
from typing import List
import pytest
from compiler_gym.bin.validate import main
from compiler_gym.util.capture_output import capture_output
from tests.pytest_plugins.common import set_command_line_flags, skip_on_ci
from tests.test_main import main as _test_main
def test_okay_llvm_result(monkeypatch):
stdin = """
benchmark,reward,commandline,walltime
benchmark://cbench-v1/crc32,0,opt input.bc -o output.bc,0.3
""".strip()
set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
monkeypatch.setattr("sys.stdin", StringIO(stdin))
with capture_output() as out:
main(["argv0", "-"])
assert "✅ cbench-v1/crc32 " in out.stdout
assert not out.stderr
def test_okay_llvm_result_file_input():
with tempfile.TemporaryDirectory() as d:
path = Path(d) / "test.csv"
with open(str(path), "w") as f:
f.write(
"""
benchmark,reward,commandline,walltime
benchmark://cbench-v1/crc32,0,opt input.bc -o output.bc,0.3
""".strip()
)
set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
with capture_output() as out:
main(["argv0", str(path)])
assert "✅ cbench-v1/crc32 " in out.stdout
assert not out.stderr
def test_no_input(monkeypatch):
set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
monkeypatch.setattr("sys.stdin", StringIO(""))
with capture_output() as out:
with pytest.raises(SystemExit):
main(["argv0", "-"])
assert "No inputs to validate" in out.stderr
def test_invalid_reward_llvm_result(monkeypatch):
stdin = """
benchmark,reward,commandline,walltime
benchmark://cbench-v1/crc32,0.5,opt input.bc -o output.bc,0.3
""".strip()
set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
monkeypatch.setattr("sys.stdin", StringIO(stdin))
with capture_output() as out:
with pytest.raises(SystemExit):
main(["argv0", "-"])
assert (
"❌ cbench-v1/crc32 Expected reward 0.5 but received reward 0.0\n"
in out.stdout
)
assert not out.stderr
def test_invalid_csv_format(monkeypatch):
stdin = "invalid\ncsv\nformat"
set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
monkeypatch.setattr("sys.stdin", StringIO(stdin))
with capture_output() as out:
with pytest.raises(SystemExit):
main(["argv0", "-"])
assert "Expected 4 columns in the first row of CSV" in out.stderr
@skip_on_ci
def test_multiple_valid_inputs(monkeypatch):
stdin = """
benchmark,reward,walltime,commandline
benchmark://cbench-v1/crc32,,0,opt input.bc -o output.bc
benchmark://cbench-v1/crc32,,0,opt input.bc -o output.bc
benchmark://cbench-v1/crc32,,0,opt input.bc -o output.bc
""".strip()
set_command_line_flags(["argv0", "--env=llvm-v0"])
monkeypatch.setattr("sys.stdin", StringIO(stdin))
with capture_output() as out:
main(["argv0", "-"])
assert not out.stderr
assert out.stdout.count("✅") == 3 # Every benchmark passed.
@skip_on_ci
@pytest.mark.parametrize(
"benchmarks",
[
[
"benchmark://cbench-v1/gsm",
"benchmark://cbench-v1/lame",
"benchmark://cbench-v1/stringsearch",
"benchmark://cbench-v1/ghostscript",
],
[
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/sha",
"benchmark://cbench-v1/ispell",
"benchmark://cbench-v1/blowfish",
],
[
"benchmark://cbench-v1/adpcm",
"benchmark://cbench-v1/tiffdither",
"benchmark://cbench-v1/bzip2",
"benchmark://cbench-v1/stringsearch2",
],
[
"benchmark://cbench-v1/bitcount",
"benchmark://cbench-v1/jpeg-d",
"benchmark://cbench-v1/jpeg-c",
"benchmark://cbench-v1/dijkstra",
],
[
"benchmark://cbench-v1/rijndael",
"benchmark://cbench-v1/patricia",
"benchmark://cbench-v1/tiff2rgba",
"benchmark://cbench-v1/crc32",
],
[
"benchmark://cbench-v1/tiff2bw",
"benchmark://cbench-v1/tiffmedian",
"benchmark://cbench-v1/susan",
],
],
)
def test_validate_cbench_null_options(monkeypatch, benchmarks: List[str]):
stdin = "\n".join(
[
"benchmark,reward,walltime,commandline",
]
+ [f"{b},,0,opt input.bc -o output.bc" for b in benchmarks]
)
set_command_line_flags(["argv0", "--env=llvm-v0"])
monkeypatch.setattr("sys.stdin", StringIO(stdin))
with capture_output() as out:
main(["argv0", "-"])
assert not out.stderr
assert out.stdout.count("✅") == len(benchmarks) # Every benchmark passed.
if __name__ == "__main__":
_test_main()
|
CompilerGym-development
|
tests/bin/validate_bin_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/bin/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/bin:service."""
import sys
import gym
import pytest
from absl import flags
import compiler_gym
from compiler_gym.bin.service import print_service_capabilities
from compiler_gym.errors import EnvironmentNotSupported
from tests.test_main import main
@pytest.mark.parametrize("env_name", compiler_gym.COMPILER_GYM_ENVS)
@pytest.mark.xfail(
sys.platform == "darwin",
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
def test_print_service_capabilities_smoke_test(env_name: str):
flags.FLAGS(["argv0"])
try:
with gym.make(env_name) as env:
print_service_capabilities(env)
except EnvironmentNotSupported:
pass # Environment not supported on this test platform.
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/bin/service_bin_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.datasets.uri."""
from compiler_gym.datasets import BenchmarkUri
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_from_string_1():
uri = BenchmarkUri.from_string("benchmark://test-v0")
assert uri.scheme == "benchmark"
assert uri.dataset == "test-v0"
assert uri.path == ""
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "benchmark://test-v0"
def test_from_string_2():
uri = BenchmarkUri.from_string("test-v0")
assert uri.scheme == "benchmark"
assert uri.dataset == "test-v0"
assert uri.path == ""
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "benchmark://test-v0"
def test_from_string_3():
uri = BenchmarkUri.from_string("benchmark://test-v0")
assert uri.scheme == "benchmark"
assert uri.dataset == "test-v0"
assert uri.path == ""
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "benchmark://test-v0"
def test_from_string_4():
uri = BenchmarkUri.from_string(
"generator://csmith-v0/this path has whitespace/in/it"
)
assert uri.scheme == "generator"
assert uri.dataset == "csmith-v0"
assert uri.path == "/this path has whitespace/in/it"
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "generator://csmith-v0/this path has whitespace/in/it"
def test_from_string_5():
uri = BenchmarkUri.from_string("generator://csmith-v0/0")
assert uri.scheme == "generator"
assert uri.dataset == "csmith-v0"
assert uri.path == "/0"
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "generator://csmith-v0/0"
def test_from_string_6():
uri = BenchmarkUri.from_string("generator://csmith-v0?a=b&c=d#foo")
assert uri.scheme == "generator"
assert uri.dataset == "csmith-v0"
assert uri.path == ""
assert uri.params == {"a": ["b"], "c": ["d"]}
assert uri.fragment == "foo"
assert str(uri) == "generator://csmith-v0?a=b&c=d#foo"
def test_from_string_7():
uri = BenchmarkUri.from_string("")
assert uri.scheme == "benchmark"
assert uri.dataset == ""
assert uri.path == ""
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "benchmark:"
def test_from_string_8():
uri = BenchmarkUri.from_string("generator:")
assert uri.scheme == "generator"
assert uri.dataset == ""
assert uri.path == ""
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "generator:"
def test_canonicalize_1():
assert BenchmarkUri.canonicalize("test-v0") == "benchmark://test-v0"
def test_startswith():
uri = BenchmarkUri.from_string("benchmark://test-v0/foo")
assert not uri.startswith("!!!")
assert uri.startswith("b")
assert uri.startswith("benchmark://test-v0/fo")
def test_endswith():
uri = BenchmarkUri.from_string("benchmark://test-v0/foo")
assert not uri.endswith("!!!")
assert uri.endswith("o")
assert uri.endswith("mark://test-v0/foo")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/datasets/uri_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/datasets:files_dataset_test."""
import tempfile
from pathlib import Path
import numpy as np
import pytest
from compiler_gym.datasets import FilesDataset
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
@pytest.fixture(scope="function")
def empty_dataset() -> FilesDataset:
with tempfile.TemporaryDirectory() as d:
yield FilesDataset(
name="benchmark://test-v0",
description="",
license="MIT",
dataset_root=Path(d) / "files",
site_data_base=Path(d) / "site_data",
)
@pytest.fixture(scope="function", params=["", "memoized-ids"])
def populated_dataset(request) -> FilesDataset:
with tempfile.TemporaryDirectory() as d:
df = Path(d) / "files"
(df / "a").mkdir(parents=True)
(df / "b").mkdir()
with open(df / "e.txt", "w") as f:
f.write("e")
(df / "f.txt").touch()
(df / "g.jpg").touch()
(df / "a" / "a.txt").touch()
(df / "a" / "b.txt").touch()
(df / "b" / "a.txt").touch()
(df / "b" / "b.txt").touch()
(df / "b" / "c.txt").touch()
(df / "b" / "d.jpg").touch()
yield FilesDataset(
name="benchmark://test-v0",
description="",
license="MIT",
dataset_root=Path(d) / "files",
site_data_base=Path(d) / "site_data",
memoize_uris=request.param == "memoized-ids",
)
def test_dataset_is_installed(empty_dataset: FilesDataset):
assert empty_dataset.installed
def test_empty_dataset(empty_dataset: FilesDataset):
assert empty_dataset.size == 0
assert list(empty_dataset.benchmark_uris()) == []
assert list(empty_dataset.benchmarks()) == []
def test_populated_dataset(populated_dataset: FilesDataset):
for _ in range(2):
assert list(populated_dataset.benchmark_uris()) == [
"benchmark://test-v0/e.txt",
"benchmark://test-v0/f.txt",
"benchmark://test-v0/g.jpg",
"benchmark://test-v0/a/a.txt",
"benchmark://test-v0/a/b.txt",
"benchmark://test-v0/b/a.txt",
"benchmark://test-v0/b/b.txt",
"benchmark://test-v0/b/c.txt",
"benchmark://test-v0/b/d.jpg",
]
assert populated_dataset.size == 9
def test_populated_dataset_benchmark_lookup(populated_dataset: FilesDataset):
bm = populated_dataset.benchmark("benchmark://test-v0/e.txt")
assert bm.uri == "benchmark://test-v0/e.txt"
assert bm.proto.uri == "benchmark://test-v0/e.txt"
assert bm.proto.program.contents.decode("utf-8") == "e"
def test_populated_dataset_first_file(populated_dataset: FilesDataset):
bm = next(populated_dataset.benchmarks())
assert bm.uri == "benchmark://test-v0/e.txt"
assert bm.proto.uri == "benchmark://test-v0/e.txt"
assert bm.proto.program.contents.decode("utf-8") == "e"
def test_populated_dataset_benchmark_lookup_not_found(populated_dataset: FilesDataset):
with pytest.raises(
LookupError, match=r"^Benchmark not found: benchmark://test-v0/not/a/file"
):
populated_dataset.benchmark("benchmark://test-v0/not/a/file")
def test_populated_dataset_with_file_extension_filter(populated_dataset: FilesDataset):
populated_dataset.benchmark_file_suffix = ".jpg"
assert list(populated_dataset.benchmark_uris()) == [
"benchmark://test-v0/g",
"benchmark://test-v0/b/d",
]
assert populated_dataset.size == 2
def test_populated_dataset_random_benchmark(populated_dataset: FilesDataset):
num_benchmarks = 3
rng = np.random.default_rng(0)
random_benchmarks = {
b.uri
for b in (
populated_dataset.random_benchmark(rng) for _ in range(num_benchmarks)
)
}
assert len(random_benchmarks) == num_benchmarks
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/datasets/files_dataset_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/datasets."""
from pathlib import Path
import numpy as np
import pytest
from compiler_gym.datasets.datasets import Datasets, round_robin_iterables
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.service.proto import Benchmark as BenchmarkProto
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
class MockDataset:
"""A mock Dataset class."""
def __init__(self, name):
self.name = name
self.installed = False
self.deprecated = False
self.benchmark_values = []
self.sort_order = 0
def install(self):
self.installed = True
def uninstall(self):
self.installed = False
def benchmark_uris(self):
return (b.uri for b in self.benchmark_values)
def benchmarks(self):
yield from self.benchmark_values
def benchmark_from_parsed_uri(self, uri: BenchmarkUri):
for b in self.benchmark_values:
if b.uri == str(uri):
return b
raise KeyError(str(uri))
def random_benchmark(self, random_state=None):
return random_state.choice(self.benchmark_values)
def __repr__(self):
return str(self.name)
class MockBenchmark:
"""A mock Benchmark class."""
def __init__(self, uri):
self.uri = uri
def __repr__(self):
return str(self.uri)
def test_enumerate_datasets_empty():
datasets = Datasets([])
assert list(datasets) == []
def test_enumerate_datasets():
da = MockDataset("benchmark://a")
db = MockDataset("benchmark://b")
datasets = Datasets((da, db))
assert list(datasets) == [da, db]
def test_enumerate_datasets_with_custom_sort_order():
da = MockDataset("benchmark://a")
db = MockDataset("benchmark://b")
db.sort_order = -1
datasets = Datasets((da, db))
assert list(datasets) == [db, da]
def test_enumerate_deprecated_datasets():
da = MockDataset("benchmark://a")
db = MockDataset("benchmark://b")
datasets = Datasets((da, db))
db.deprecated = True
assert list(datasets) == [da]
assert list(datasets.datasets(with_deprecated=True)) == [da, db]
def test_enumerate_datasets_deprecated_at_construction_time():
da = MockDataset("benchmark://a")
db = MockDataset("benchmark://b")
db.deprecated = True
datasets = Datasets((da, db))
assert list(datasets) == [da]
assert list(datasets.datasets(with_deprecated=True)) == [da, db]
def test_datasets_add_dataset():
datasets = Datasets([])
da = MockDataset("benchmark://foo-v0")
datasets["benchmark://foo-v0"] = da
assert list(datasets) == [da]
def test_datasets_add_deprecated_dataset():
datasets = Datasets([])
da = MockDataset("benchmark://a")
da.deprecated = True
datasets["benchmark://foo-v0"] = da
assert list(datasets) == []
def test_datasets_remove():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
del datasets["benchmark://foo-v0"]
assert list(datasets) == []
def test_datasets_get_item():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
assert datasets.dataset("benchmark://foo-v0") == da
assert datasets["benchmark://foo-v0"] == da
def test_datasets_contains():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
assert "benchmark://foo-v0" in datasets
def test_datasets_get_item_default_scheme():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
assert datasets.dataset("foo-v0") == da
assert datasets["foo-v0"] == da
def test_datasets_get_item_lookup_miss():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
with pytest.raises(LookupError, match=r"^Dataset not found: benchmark://bar-v0$"):
datasets.dataset("benchmark://bar-v0")
with pytest.raises(LookupError, match=r"^Dataset not found: benchmark://bar-v0$"):
_ = datasets["benchmark://bar-v0"]
def test_datasets_contains_lookup_miss():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
assert "benchmark://bar-v0" not in datasets
def test_benchmark_lookup_by_uri():
da = MockDataset("benchmark://foo-v0")
db = MockDataset("benchmark://bar-v0")
ba = MockBenchmark(uri="benchmark://foo-v0/abc")
da.benchmark_values.append(ba)
datasets = Datasets([da, db])
assert datasets.benchmark("benchmark://foo-v0/abc") == ba
def test_round_robin():
iters = iter(
[
iter([0, 1, 2, 3, 4, 5]),
iter(["a", "b", "c"]),
iter([0.5, 1.0]),
]
)
assert list(round_robin_iterables(iters)) == [
0,
"a",
0.5,
1,
"b",
1.0,
2,
"c",
3,
4,
5,
]
def test_benchmark_uris_order():
da = MockDataset("benchmark://foo-v0")
db = MockDataset("benchmark://bar-v0")
ba = MockBenchmark(uri="benchmark://foo-v0/abc")
bb = MockBenchmark(uri="benchmark://foo-v0/123")
bc = MockBenchmark(uri="benchmark://bar-v0/abc")
bd = MockBenchmark(uri="benchmark://bar-v0/123")
da.benchmark_values.append(ba)
da.benchmark_values.append(bb)
db.benchmark_values.append(bc)
db.benchmark_values.append(bd)
datasets = Datasets([da, db])
assert list(datasets.benchmark_uris()) == [b.uri for b in datasets.benchmarks()]
# Datasets are ordered by name, so bar-v0 before foo-v0.
assert list(datasets.benchmark_uris()) == [
"benchmark://bar-v0/abc",
"benchmark://foo-v0/abc",
"benchmark://bar-v0/123",
"benchmark://foo-v0/123",
]
def test_benchmarks_iter_deprecated():
da = MockDataset("benchmark://foo-v0")
db = MockDataset("benchmark://bar-v0")
db.deprecated = True
ba = MockBenchmark(uri="benchmark://foo-v0/abc")
bb = MockBenchmark(uri="benchmark://foo-v0/123")
bc = MockBenchmark(uri="benchmark://bar-v0/abc")
bd = MockBenchmark(uri="benchmark://bar-v0/123")
da.benchmark_values.append(ba)
da.benchmark_values.append(bb)
db.benchmark_values.append(bc)
db.benchmark_values.append(bd)
datasets = Datasets([da, db])
# Iterate over the benchmarks. The deprecated dataset is not included.
assert list(datasets.benchmark_uris()) == [b.uri for b in datasets.benchmarks()]
assert list(datasets.benchmark_uris()) == [
"benchmark://foo-v0/abc",
"benchmark://foo-v0/123",
]
# Repeat the above, but include the deprecated datasets.
assert list(datasets.benchmark_uris(with_deprecated=True)) == [
b.uri for b in datasets.benchmarks(with_deprecated=True)
]
assert list(datasets.benchmark_uris(with_deprecated=True)) == [
"benchmark://bar-v0/abc",
"benchmark://foo-v0/abc",
"benchmark://bar-v0/123",
"benchmark://foo-v0/123",
]
@pytest.mark.parametrize("weighted", [False, True])
def test_random_benchmark(mocker, weighted: bool):
da = MockDataset("benchmark://foo-v0")
da.size = 10
ba = MockBenchmark(uri="benchmark://foo-v0/abc")
da.benchmark_values.append(ba)
datasets = Datasets([da])
mocker.spy(da, "random_benchmark")
num_benchmarks = 5
rng = np.random.default_rng(0)
random_benchmarks = {
b.uri
for b in (
datasets.random_benchmark(rng, weighted=weighted)
for _ in range(num_benchmarks)
)
}
assert da.random_benchmark.call_count == num_benchmarks
assert len(random_benchmarks) == 1
assert next(iter(random_benchmarks)) == "benchmark://foo-v0/abc"
def test_dataset_proto_scheme(tmpdir):
"""Test the proto:// scheme handler."""
tmpdir = Path(tmpdir)
datasets = Datasets(datasets={})
proto = BenchmarkProto(uri="hello world")
with open(tmpdir / "file.pb", "wb") as f:
f.write(proto.SerializeToString())
benchmark = datasets.benchmark(f"proto://{tmpdir}/file.pb")
assert benchmark.proto.uri == "hello world"
assert benchmark.uri == "benchmark://hello world"
def test_dataset_proto_scheme_file_not_found(tmpdir):
tmpdir = Path(tmpdir)
datasets = Datasets(datasets={})
with pytest.raises(FileNotFoundError):
datasets.benchmark(f"proto://{tmpdir}/not_a_file")
def test_dataset_file_scheme(tmpdir):
"""Test the file:// scheme handler."""
tmpdir = Path(tmpdir)
datasets = Datasets(datasets={})
with open(tmpdir / "file.dat", "w") as f:
f.write("hello, world")
benchmark = datasets.benchmark(f"file://{tmpdir}/file.dat")
assert benchmark.proto.uri == f"file://{tmpdir}/file.dat"
assert benchmark.proto.program.contents == b"hello, world"
assert benchmark.uri == f"file://{tmpdir}/file.dat"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/datasets/datasets_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/datasets."""
from pathlib import Path
import pytest
from compiler_gym.datasets.dataset import Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
# pylint: disable=abstract-method
def test_dataset_properties():
"""Test the dataset property values."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert dataset.name == "benchmark://test-v0"
assert dataset.scheme == "benchmark"
assert dataset.description == "A test dataset"
assert dataset.license == "MIT"
def test_dataset_optional_properties():
"""Test the default values of optional dataset properties."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert dataset.references == {} # Default value.
assert not dataset.deprecated
assert dataset.sort_order == 0
assert dataset.validatable == "No"
def test_dataset_default_version():
"""Test the dataset property values."""
dataset = Dataset(
name="benchmark://test",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert dataset.name == "benchmark://test"
assert dataset.scheme == "benchmark"
assert dataset.version == 0
def test_dataset_optional_properties_explicit_values():
"""Test the non-default values of optional dataset properties."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
references={"GitHub": "https://github.com/facebookresearch/CompilerGym"},
deprecated="Deprecation message",
sort_order=10,
validatable="Yes",
)
assert dataset.references == {
"GitHub": "https://github.com/facebookresearch/CompilerGym"
}
assert dataset.deprecated
assert dataset.sort_order == 10
assert dataset.validatable == "Yes"
def test_dataset_inferred_properties():
"""Test the values of inferred dataset properties."""
dataset = Dataset(
name="benchmark://test-v2",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert dataset.scheme == "benchmark"
assert dataset.version == 2
def test_dataset_properties_read_only(tmpwd: Path):
"""Test that dataset properties are read-only."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
with pytest.raises(AttributeError):
dataset.name = "benchmark://test-v1"
with pytest.raises(AttributeError):
dataset.description = "A test dataset"
with pytest.raises(AttributeError):
dataset.license = "MIT"
with pytest.raises(AttributeError):
dataset.site_data_path = tmpwd
def test_dataset_site_data_directory(tmpwd: Path):
"""Test the path generated for site data."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
# Use endswith() since tmpwd on macOS may have a '/private' prefix.
assert str(dataset.site_data_path).endswith(
str(tmpwd / "test" / "benchmark" / "test-v0")
)
assert not dataset.site_data_path.is_dir() # Dir is not created until needed.
def test_dataset_deprecation_message(tmpwd: Path):
"""Test that a deprecation warning is emitted on install()."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
deprecated="The cat sat on the mat",
)
with pytest.warns(DeprecationWarning, match="The cat sat on the mat"):
dataset.install()
def test_dataset_equality_and_sorting():
"""Test comparison operators between datasets."""
a = Dataset(
name="benchmark://a-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
a2 = Dataset(
name="benchmark://a-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
b = Dataset(
name="benchmark://b-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert a == a2
assert a != b
assert a < b
assert a <= b
assert b > a
assert b >= a
# String comparisons
assert a == "benchmark://a-v0"
assert a != "benchmark://b-v0"
assert a < "benchmark://b-v0"
# Sorting
assert sorted([a2, b, a]) == [
"benchmark://a-v0",
"benchmark://a-v0",
"benchmark://b-v0",
]
class DatasetForTesting(Dataset):
"""A dataset to use for testing."""
def __init__(self, benchmarks=None):
super().__init__(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
self._benchmarks = benchmarks or {
"benchmark://test-v0/a": 1,
"benchmark://test-v0/b": 2,
"benchmark://test-v0/c": 3,
}
def benchmark_uris(self):
return sorted(self._benchmarks)
def benchmark_from_parsed_uri(self, uri: BenchmarkUri):
return self._benchmarks[str(uri)]
@property
def size(self):
return len(self._benchmarks)
def test_dataset_size():
dataset = DatasetForTesting()
assert dataset.size == 3
assert len(dataset) == 3
def test_benchmarks_lookup_by_uri():
dataset = DatasetForTesting()
assert dataset.benchmark("benchmark://test-v0/b") == 2
assert dataset["benchmark://test-v0/b"] == 2
def test_benchmarks_iter():
dataset = DatasetForTesting()
assert list(dataset.benchmarks()) == [1, 2, 3]
assert list(dataset) == [1, 2, 3]
def test_with_site_data():
"""Test the dataset property values."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert dataset.has_site_data
def test_without_site_data():
"""Test the dataset property values."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
)
assert not dataset.has_site_data
with pytest.raises(
ValueError, match=r"^Dataset has no site data path: benchmark://test-v0$"
):
dataset.site_data_path # noqa
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/datasets/dataset_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/datasets:benchmark."""
from pathlib import Path
import pytest
from compiler_gym.datasets import Benchmark, BenchmarkSource
from compiler_gym.errors import ValidationError
from compiler_gym.service.proto import Benchmark as BenchmarkProto
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_benchmark_attribute_outside_init():
"""Test that new attributes can be added to Benchmark."""
benchmark = Benchmark(None)
benchmark.foobar = 123 # pylint: disable=attribute-defined-outside-init
assert benchmark.foobar == 123
def test_benchmark_subclass_attribute_outside_init():
"""Test that new attributes can be added to Benchmark subclass."""
class TestBenchmark(Benchmark):
pass
benchmark = TestBenchmark(None)
benchmark.foobar = 123 # pylint: disable=attribute-defined-outside-init
assert benchmark.foobar == 123
def test_benchmark_properties():
"""Test benchmark properties."""
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
assert benchmark.uri == "benchmark://example-compiler-v0/foobar"
assert benchmark.proto == BenchmarkProto(
uri="benchmark://example-compiler-v0/foobar"
)
def test_benchmark_immutable():
"""Test that benchmark properties are immutable."""
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
with pytest.raises(AttributeError):
benchmark.uri = 123
with pytest.raises(AttributeError):
benchmark.proto = 123
def test_add_validation_callbacks_values():
"""Test methods for adding and checking custom validation callbacks."""
def a(env):
pass
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
assert benchmark.validation_callbacks() == []
assert not benchmark.is_validatable()
benchmark.add_validation_callback(a)
assert benchmark.validation_callbacks() == [a]
assert benchmark.is_validatable()
benchmark.add_validation_callback(a)
assert benchmark.validation_callbacks() == [a, a]
def test_add_validation_callbacks_call_count():
"""Test that custom validation callbacks are called on validate()."""
a_call_count = 0
b_call_count = 0
def a(env):
nonlocal a_call_count
a_call_count += 1
def b(env):
nonlocal b_call_count
b_call_count += 1
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
benchmark.add_validation_callback(a)
errors = benchmark.validate(env=None)
assert errors == []
assert a_call_count == 1
assert b_call_count == 0
benchmark.add_validation_callback(b)
errors = benchmark.validate(env=None)
assert errors == []
assert a_call_count == 2
assert b_call_count == 1
def test_validation_callback_error():
"""Test error propagation from custom validation callback."""
def a(env):
yield ValidationError(type="Compilation Error")
yield ValidationError(type="Runtime Error")
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
benchmark.add_validation_callback(a)
errors = benchmark.validate(env=None)
assert errors == [
ValidationError(type="Compilation Error"),
ValidationError(type="Runtime Error"),
]
def test_validation_callback_error_iter():
"""Test error propagation from custom validation callback using iterable."""
def a(env):
yield ValidationError(type="Compilation Error")
yield ValidationError(type="Runtime Error")
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
benchmark.add_validation_callback(a)
errors = benchmark.ivalidate(env=None)
next(errors) == ValidationError(type="Compilation Error")
next(errors) == ValidationError(type="Runtime Error")
def test_validation_callback_flaky():
"""Test error propagation on callback which *may* fail."""
flaky = False
def a(env):
nonlocal flaky
del env
if flaky:
yield ValidationError(type="Runtime Error")
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
benchmark.add_validation_callback(a)
errors = benchmark.validate(env=None)
assert errors == []
flaky = True
errors = benchmark.validate(env=None)
assert errors == [
ValidationError(type="Runtime Error"),
]
def test_eq_benchmarks():
a = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foo"))
b = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foo"))
assert a == b
def test_eq_strings():
a = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foo"))
b = "benchmark://example-compiler-v0/foo"
assert a == b
def test_ne_benchmarks():
a = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foo"))
b = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/bar"))
assert a != b
def test_ne_strings():
a = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foo"))
b = "benchmark://example-compiler-v0/bar"
assert a != b
def test_benchmark_sources(tmpwd: Path):
a = Benchmark(
BenchmarkProto(uri="benchmark://example-compiler-v0/foo"),
sources=[("example.py", "Hello, world!".encode("utf-8"))],
)
a.add_source(BenchmarkSource(filename="foo.py", contents="Hi".encode("utf-8")))
assert list(a.sources) == [
BenchmarkSource("example.py", "Hello, world!".encode("utf-8")),
BenchmarkSource(filename="foo.py", contents="Hi".encode("utf-8")),
]
a.write_sources_to_directory("benchmark_sources")
with open(tmpwd / "benchmark_sources" / "example.py") as f:
assert f.read() == "Hello, world!"
with open(tmpwd / "benchmark_sources" / "foo.py") as f:
assert f.read() == "Hi"
def test_benchmark_from_file(tmpwd: Path):
path = tmpwd / "foo.txt"
with open(path, "w") as f:
f.write("Hello, world!")
benchmark = Benchmark.from_file("benchmark://example-compiler-v0/foo", path)
assert benchmark.proto.program.contents.decode("utf-8") == "Hello, world!"
def test_benchmark_from_file_not_found(tmpwd: Path):
path = tmpwd / "foo.txt"
with pytest.raises(FileNotFoundError, match=str(path)):
Benchmark.from_file("benchmark://example-compiler-v0/foo", path)
def test_dataset_equality_and_sorting():
"""Test comparison operators between datasets."""
a = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/a"))
a2 = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/a"))
b = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/b"))
assert a == a2
assert a != b
assert a < b
assert a <= b
assert b > a
assert b >= a
# String comparisons
assert a == "benchmark://example-compiler-v0/a"
assert a != "benchmark://example-compiler-v0/b"
assert a < "benchmark://example-compiler-v0/b"
# Sorting
assert sorted([a2, b, a]) == [
"benchmark://example-compiler-v0/a",
"benchmark://example-compiler-v0/a",
"benchmark://example-compiler-v0/b",
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/datasets/benchmark_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/spaces:scalar."""
from compiler_gym.spaces import Commandline, CommandlineFlag
from tests.test_main import main
def test_sample():
space = Commandline(
[
CommandlineFlag(name="a", flag="-a", description=""),
CommandlineFlag(name="b", flag="-b", description=""),
CommandlineFlag(name="c", flag="-c", description=""),
],
name="test",
)
assert space.sample() in {0, 1, 2}
def test_contains():
space = Commandline(
[
CommandlineFlag(name="a", flag="-a", description=""),
CommandlineFlag(name="b", flag="-b", description=""),
CommandlineFlag(name="c", flag="-c", description=""),
],
name="test",
)
assert space.contains(0)
assert space.contains(1)
assert space.contains(2)
assert not space.contains(-11)
assert not space.contains(1.5)
assert not space.contains(4)
def test_to_and_from_string():
space = Commandline(
[
CommandlineFlag(name="a", flag="-a", description=""),
CommandlineFlag(name="b", flag="-b", description=""),
CommandlineFlag(name="c", flag="-c", description=""),
],
name="test",
)
assert space.to_string([0, 1, 2]) == "-a -b -c"
assert space.from_string(space.to_string([0, 1, 2])) == [0, 1, 2]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/commandline_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from gym.spaces import Discrete
from compiler_gym.spaces import Dict
from tests.test_main import main
def test_equal():
assert Dict({"a": Discrete(2), "b": Discrete(3)}, name="test_dict") == Dict(
{"a": Discrete(2), "b": Discrete(3)}, name="test_dict"
)
def test_not_equal():
dict_space = Dict({"a": Discrete(2), "b": Discrete(3)}, name="test_dict")
assert dict_space != Dict({"a": Discrete(2), "c": Discrete(3)}, name="test_dict")
assert dict_space != Dict({"a": Discrete(2)}, name="test_dict")
assert dict_space != Dict({"a": Discrete(2), "b": Discrete(3)}, name="test_dict_2")
assert dict_space != "not_a_dict"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/dict_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/spaces:sequence."""
from copy import deepcopy
import pytest
from compiler_gym.spaces import Scalar, Sequence, SpaceSequence
from tests.test_main import main
def test_sample():
space = Sequence(name="test", size_range=(0, None), dtype=int)
with pytest.raises(NotImplementedError):
space.sample()
def test_str_contains():
space = Sequence(name="test", size_range=(0, None), dtype=str)
assert space.contains("Hello, world!")
assert space.contains("")
assert not space.contains([1, 2, 3])
def test_str_contains_too_long():
space = Sequence(name="test", size_range=(0, 4), dtype=str)
assert not space.contains("Hello, world!")
assert space.contains("")
assert not space.contains([1, 2, 3])
def test_str_contains_too_short():
space = Sequence(name="test", size_range=(3, None), dtype=str)
assert space.contains("Hello, world!")
assert not space.contains("")
assert not space.contains([1, 2, 3])
def test_int_contains():
space = Sequence(name="test", size_range=(5, 5), dtype=int)
assert not space.contains(list(range(4)))
assert space.contains(list(range(5)))
assert not space.contains(list(range(6)))
def test_contains_with_float_scalar_range():
space = Sequence(
name="test",
size_range=(3, 3),
dtype=float,
scalar_range=Scalar(name="test", min=0, max=1, dtype=float),
)
assert space.contains([0.0, 0.0, 0.0])
assert space.contains([0.1, 1.0, 0.5])
assert not space.contains([0.0, 0.0, -1.0]) # out of bounds
assert not space.contains([0.0, 0, 0.1]) # wrong dtype
assert not space.contains([0.0, 0]) # wrong shape
def test_bytes_contains():
space = Sequence(name="test", size_range=(0, None), dtype=bytes)
assert space.contains(b"Hello, world!")
assert space.contains(b"")
assert not space.contains("Hello, world!")
def test_space_sequence_contains():
subspace = Scalar(name="subspace", min=0, max=1, dtype=float)
space_seq = SpaceSequence(name="seq", space=subspace, size_range=(0, 2))
assert space_seq.contains([0.5, 0.6])
assert not space_seq.contains(["not-a-number"])
assert not space_seq.contains([2.0])
assert not space_seq.contains([0.1, 0.2, 0.3])
def test_equal():
seq = Sequence(
name="seq",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq == deepcopy(seq)
def test_not_equal():
seq = Sequence(
name="seq",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq2",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq",
size_range=[0, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq",
size_range=[1, 3],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq",
size_range=[1, 2],
dtype=float,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt2",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[0, 4],
)
assert seq != Sequence(
name="seq",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 5],
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/sequence_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from gym.spaces import Discrete
from compiler_gym.spaces import Tuple
from tests.test_main import main
def test_equal():
assert Tuple([Discrete(2), Discrete(3)], name="test_tuple") == Tuple(
[Discrete(2), Discrete(3)], name="test_tuple"
)
def test_not_equal():
tuple_space = Tuple([Discrete(2), Discrete(3)], name="test_tuple")
assert tuple_space != Tuple([Discrete(3), Discrete(3)], name="test_tuple")
assert tuple_space != Tuple([Discrete(2)], name="test_tuple")
assert tuple_space != Tuple([Discrete(2), Discrete(3)], name="test_tuple_2")
assert tuple_space != "not_a_tuple"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/tuple_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym/spaces/action_space.py."""
from compiler_gym.spaces import ActionSpace, Discrete, NamedDiscrete
from tests.test_main import main
class MockActionSpace:
name = "mock"
foo = 1
def sample(self):
return 1
def seed(self, s):
pass
def contains(self, x):
pass
def __repr__(self) -> str:
return self.name
def test_action_space_forward(mocker):
a = MockActionSpace()
ma = ActionSpace(a)
assert ma.name == "mock"
assert ma.foo == 1
mocker.spy(a, "sample")
assert ma.sample() == 1
assert a.sample.call_count == 1
mocker.spy(a, "seed")
ma.seed(10)
assert a.seed.call_count == 1
mocker.spy(a, "contains")
10 in ma
assert a.contains.call_count == 1
def test_action_space_comparison():
a = MockActionSpace()
b = ActionSpace(a)
c = MockActionSpace()
assert b == a
assert b.wrapped == a
assert b != c
def test_action_space_default_string_conversion():
"""Test that to_string() and from_string() are forward to subclasses."""
a = Discrete(name="a", n=3)
ma = ActionSpace(a)
assert ma.to_string([0, 1, 0]) == "0,1,0"
assert ma.from_string("0,1,0") == [0, 1, 0]
def test_action_space_forward_string_conversion():
"""Test that to_string() and from_string() are forward to subclasses."""
a = NamedDiscrete(name="a", items=["a", "b", "c"])
ma = ActionSpace(a)
assert ma.to_string([0, 1, 2, 0]) == "a b c a"
assert ma.from_string("a b c a") == [0, 1, 2, 0]
def test_action_space_str():
ma = ActionSpace(MockActionSpace())
assert str(ma) == "ActionSpace(mock)"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/action_space_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/spaces/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/spaces:named_discrete."""
import pytest
from compiler_gym.spaces import NamedDiscrete
from tests.test_main import main
def test_empty_space():
with pytest.raises(ValueError, match="No values for discrete space"):
NamedDiscrete([], name="test")
def test_invalid_name_lookup():
space = NamedDiscrete(["foo"], name="test")
with pytest.raises(ValueError):
_ = space["bar"]
def test_space_size():
space = NamedDiscrete(["a", "b", "c"], name="test")
assert space.n == 3
def test_name_lookup():
space = NamedDiscrete(["a", "b", "c"], name="test")
assert space["a"] == 0
assert space["b"] == 1
assert space["c"] == 2
def test_repr():
space = NamedDiscrete(["foo", "bar"], name="test")
assert str(space) == "NamedDiscrete([foo, bar])"
def test_to_string():
space = NamedDiscrete(["foo", "bar"], name="test")
assert space.to_string(0) == "foo"
assert space.to_string([0]) == "foo"
assert space.to_string([0, 0, 1]) == "foo foo bar"
def test_equal():
assert NamedDiscrete(["a", "b"], name="test_named_discrete") == NamedDiscrete(
["a", "b"], name="test_named_discrete"
)
def test_not_equal():
named_discrete = NamedDiscrete(["a", "b"], name="test_named_discrete")
assert named_discrete != NamedDiscrete(["a", "bb"], name="test_named_discrete")
assert named_discrete != NamedDiscrete(["a", "b", "c"], name="test_named_discrete")
assert named_discrete != NamedDiscrete(["a", "b"], name="test_named_discrete_2")
assert named_discrete != "not_a_named_discrete"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/named_discrete_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.spaces.Reward."""
from copy import deepcopy
from compiler_gym.spaces import Reward
from tests.test_main import main
def test_equal():
reward = Reward(
name="test_reward",
observation_spaces=["a", "b"],
default_value=5,
min=-10,
max=10,
default_negates_returns=True,
success_threshold=3,
deterministic=False,
platform_dependent=True,
)
assert reward == deepcopy(reward)
assert reward == "test_reward"
def test_not_equal():
reward = Reward(
name="test_reward",
observation_spaces=["a", "b"],
default_value=5,
min=-10,
max=10,
default_negates_returns=True,
success_threshold=3,
deterministic=False,
platform_dependent=True,
)
reward2 = deepcopy(reward)
reward2.name = "test_reward_2"
assert reward != reward2
assert reward != "test_reward_2"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/reward_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/spaces:scalar."""
from copy import copy, deepcopy
from compiler_gym.spaces import Scalar
from tests.test_main import main
def test_sample():
space = Scalar(name="test", min=-10, max=10, dtype=int)
x = space.sample()
assert isinstance(x, int)
assert -10 <= x <= 10
def test_int_contains():
space = Scalar(name="test", min=-10, max=10, dtype=int)
assert space.contains(-10)
assert not space.contains(-11)
assert not space.contains(0.5)
def test_int_contains_no_upper_bound():
space = Scalar(name="test", min=0, max=None, dtype=int)
assert space.contains(0)
assert not space.contains(-1)
assert space.contains(1000)
def test_equality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=0, max=None, dtype=int)
assert space_a == space_b
def test_dtype_inequality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=0, max=None, dtype=float)
assert space_a != space_b
def test_upper_bound_inequality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=0, max=5, dtype=int)
assert space_a != space_b
def test_lower_bound_inequality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=None, max=None, dtype=int)
assert space_a != space_b
def test_equal():
assert Scalar(name="test_scalar", min=-10, max=10, dtype=int) == Scalar(
name="test_scalar", min=-10, max=10, dtype=int
)
def test_not_equal():
scalar = Scalar(name="test_scalar", min=-10, max=10, dtype=int)
assert scalar != Scalar(name="test_scalar_2", min=-10, max=10, dtype=int)
assert scalar != Scalar(name="test_scalar", min=-5, max=10, dtype=int)
assert scalar != Scalar(name="test_scalar", min=-10, max=5, dtype=int)
assert scalar != Scalar(name="test_scalar", min=-10, max=10, dtype=float)
assert scalar != "not_as_scalar"
def test_deepcopy_regression_test():
"""Test to reproduce github.com/facebookresearch/CompilerGym/issues/768."""
x = Scalar(name="foo")
copy(x)
deepcopy(x)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/scalar_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from compiler_gym.spaces import Box
from tests.test_main import main
def test_equal():
assert Box(low=0, high=1, name="test_box", shape=[1, 2], dtype=int) == Box(
low=0, high=1, name="test_box", shape=[1, 2], dtype=int
)
assert Box(low=0, high=1, name="test_box", shape=[1, 2], dtype=int) == Box(
low=0, high=1, name="test_box", shape=[1, 2], dtype=float
)
def test_not_equal():
box = Box(low=0, high=1, name="test_box", shape=[1, 2], dtype=int)
assert box != Box(low=0, high=1, name="test_box_2", shape=[1, 2], dtype=int)
assert box != Box(low=-1, high=1, name="test_box", shape=[1, 2], dtype=int)
assert box != Box(low=0, high=2, name="test_box", shape=[1, 2], dtype=int)
assert box != Box(low=0, high=1, name="test_box", shape=[1, 3], dtype=int)
assert box != "not_a_box"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/box_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from compiler_gym.spaces import Discrete
from tests.test_main import main
def test_equal():
assert Discrete(2, name="test_discrete") == Discrete(2, name="test_discrete")
def test_not_equal():
discrete = Discrete(2, name="test_discrete")
assert discrete != Discrete(3, name="test_discrete")
assert discrete != Discrete(2, name="test_discrete_2")
assert discrete != "not_a_discrete"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/discrete_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from compiler_gym.spaces import Permutation, Scalar
from tests.test_main import main
def test_invalid_scalar_range_dtype():
with pytest.raises(
TypeError, match="Permutation space can have integral scalar range only."
):
Permutation(name="", scalar_range=Scalar(name="", min=0, max=2, dtype=float))
def test_equal():
assert Permutation(
name="perm", scalar_range=Scalar(name="range", min=0, max=2, dtype=int)
) == Permutation(
name="perm", scalar_range=Scalar(name="range", min=0, max=2, dtype=int)
)
def test_not_equal():
permutation = Permutation(
name="perm", scalar_range=Scalar(name="range", min=0, max=2, dtype=int)
)
assert permutation != Permutation(
name="perm", scalar_range=Scalar(name="range", min=0, max=1, dtype=int)
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/permutation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
from enum import Enum
from io import StringIO
from pathlib import Path
from typing import List
import gym
import pytest
from flaky import flaky
import compiler_gym
from compiler_gym.compiler_env_state import (
CompilerEnvStateReader,
CompilerEnvStateWriter,
)
from compiler_gym.envs import CompilerEnv, llvm
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from compiler_gym.errors import ServiceError
from compiler_gym.service.connection import CompilerGymServiceConnection
from tests.pytest_plugins import llvm as llvm_plugin
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="function", params=["local", "service"])
def env(request) -> CompilerEnv:
"""Create an LLVM environment."""
if request.param == "local":
with gym.make("llvm-v0") as env:
yield env
else:
service = CompilerGymServiceConnection(llvm.LLVM_SERVICE_BINARY)
try:
with LlvmEnv(service=service.connection.url) as env:
yield env
finally:
service.close()
def test_service_version(env: LlvmEnv):
assert env.version == compiler_gym.__version__
def test_compiler_version(env: LlvmEnv):
assert env.compiler_version.startswith("10.0.0")
def test_action_space_names(env: LlvmEnv, action_names: List[str]):
assert set(env.action_space.names) == set(action_names)
def test_action_spaces_names(env: LlvmEnv):
assert {a.name for a in env.action_spaces} == {"PassesAll"}
def test_all_flags_are_unique(env: LlvmEnv):
assert sorted(env.action_space.flags) == sorted(set(env.action_space.flags))
@pytest.mark.parametrize("benchmark_name", llvm_plugin.BENCHMARK_NAMES)
def test_benchmark_names(env: LlvmEnv, benchmark_name: str):
"""Check that all benchmark names can be found in the datasets."""
assert env.datasets.benchmark(benchmark_name)
@pytest.mark.parametrize("always_send_benchmark_on_reset", [False, True])
def test_double_reset(env: LlvmEnv, always_send_benchmark_on_reset: bool):
env.service.opts.always_send_benchmark_on_reset = always_send_benchmark_on_reset
env.reset(benchmark="cbench-v1/crc32")
env.reset(benchmark="cbench-v1/crc32")
assert env.in_episode
@flaky
def test_connection_dies_default_reward(env: LlvmEnv):
env.reward_space = "IrInstructionCount"
env.reset(benchmark="cbench-v1/crc32")
env.reward_space.default_negates_returns = False
env.reward_space.default_value = 2.5
env.episode_reward = 10
# Kill the service. Note killing the service for a ManagedConnection will
# result in a ServiceError because we have not ended the session we started
# with env.reset() above. For UnmanagedConnection, this error will not be
# raised.
try:
env.service.close()
except ServiceError as e:
assert "Service exited with returncode " in str(e)
_, reward, done, _ = env.step(0)
assert done
assert reward == 2.5
@flaky
def test_connection_dies_default_reward_negated(env: LlvmEnv):
env.reward_space = "IrInstructionCount"
env.reset(benchmark="cbench-v1/crc32")
env.reward_space.default_negates_returns = True
env.reward_space.default_value = 2.5
env.episode_reward = 10
# Kill the service. Note killing the service for a ManagedConnection will
# result in a ServiceError because we have not ended the session we started
# with env.reset() above. For UnmanagedConnection, this error will not be
# raised.
try:
env.service.close()
except ServiceError as e:
assert "Service exited with returncode " in str(e)
observation, reward, done, _ = env.step(0)
assert done
assert reward == -7.5 # negates reward.
def test_state_serialize_deserialize_equality(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
env.episode_reward = 10
state = env.state
assert state.reward == 10
buf = StringIO()
CompilerEnvStateWriter(buf).write_state(state)
buf.seek(0) # Rewind the buffer for reading.
state_from_csv = next(iter(CompilerEnvStateReader(buf)))
assert state_from_csv.reward == 10
assert state == state_from_csv
def test_apply_state(env: LlvmEnv):
"""Test that apply() on a clean environment produces same state."""
env.reward_space = "IrInstructionCount"
env.reset(benchmark="cbench-v1/crc32")
env.step(env.action_space.flags.index("-mem2reg"))
with gym.make("llvm-v0", reward_space="IrInstructionCount") as other:
other.apply(env.state)
assert other.state == env.state
def test_set_observation_space_from_spec(env: LlvmEnv):
env.observation_space = env.observation.spaces["Autophase"]
obs = env.observation_space
env.observation_space = "Autophase"
assert env.observation_space == obs
def test_set_reward_space_from_spec(env: LlvmEnv):
env.reward_space = env.reward.spaces["IrInstructionCount"]
reward = env.reward_space
env.reward_space = "IrInstructionCount"
assert env.reward_space == reward
def test_same_reward_after_reset(env: LlvmEnv):
"""Check that running the same action after calling reset() produces
same reward.
"""
env.reward_space = "IrInstructionCount"
env.benchmark = "cbench-v1/dijkstra"
action = env.action_space.flags.index("-instcombine")
env.reset()
_, reward_a, _, _ = env.step(action)
assert reward_a, "Sanity check that action produces a reward"
env.reset()
_, reward_b, _, _ = env.step(action)
assert reward_a == reward_b
def test_write_bitcode(env: LlvmEnv, tmpwd: Path):
env.reset(benchmark="cbench-v1/crc32")
env.write_bitcode("file.bc")
assert Path("file.bc").is_file()
def test_write_ir(env: LlvmEnv, tmpwd: Path):
env.reset(benchmark="cbench-v1/crc32")
env.write_bitcode("file.ll")
assert Path("file.ll").is_file()
def test_ir_sha1(env: LlvmEnv, tmpwd: Path):
env.reset(benchmark="cbench-v1/crc32")
before = env.ir_sha1
_, _, done, info = env.step(env.action_space.flags.index("-mem2reg"))
assert not done, info
assert not info["action_had_no_effect"], "sanity check failed, action had no effect"
after = env.ir_sha1
assert before != after
def test_generate_enum_declarations(env: LlvmEnv):
assert issubclass(llvm.observation_spaces, Enum)
assert issubclass(llvm.reward_spaces, Enum)
def test_step_multiple_actions_list(env: LlvmEnv):
"""Pass a list of actions to step()."""
env.reset(benchmark="cbench-v1/crc32")
actions = [
env.action_space.flags.index("-mem2reg"),
env.action_space.flags.index("-reg2mem"),
]
_, _, done, _ = env.multistep(actions)
assert not done
assert env.actions == actions
def test_step_multiple_actions_generator(env: LlvmEnv):
"""Pass an iterable of actions to step()."""
env.reset(benchmark="cbench-v1/crc32")
actions = (
env.action_space.flags.index("-mem2reg"),
env.action_space.flags.index("-reg2mem"),
)
_, _, done, _ = env.multistep(actions)
assert not done
assert env.actions == [
env.action_space.flags.index("-mem2reg"),
env.action_space.flags.index("-reg2mem"),
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/llvm_env_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for LLVM benchmark handling."""
import pytest
from compiler_gym.envs import llvm
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.errors import BenchmarkInitError
from compiler_gym.util.runfiles_path import runfiles_path
from tests.pytest_plugins.common import bazel_only
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
INVALID_IR_PATH = runfiles_path("tests/llvm/invalid_ir.ll")
@bazel_only # invalid_ir.ll not installed
def test_reset_invalid_ir(env: LlvmEnv):
"""Test that setting the $CXX to an invalid binary raises an error."""
benchmark = llvm.make_benchmark(INVALID_IR_PATH)
with pytest.raises(BenchmarkInitError, match="Failed to compute .text size cost"):
env.reset(benchmark=benchmark)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/invalid_ir_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for LLVM benchmark handling."""
import re
import subprocess
import tempfile
from pathlib import Path
import gym
import pytest
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import LlvmEnv, llvm
from compiler_gym.errors import BenchmarkInitError
from compiler_gym.service.proto import Benchmark as BenchmarkProto
from compiler_gym.service.proto import File
from compiler_gym.third_party import llvm as llvm_paths
from compiler_gym.util.runfiles_path import runfiles_path
from compiler_gym.util.temporary_working_directory import temporary_working_directory
from tests.pytest_plugins.common import bazel_only
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The path of an IR file that assembles but does not compile.
INVALID_IR_PATH = runfiles_path("tests/llvm/invalid_ir.ll")
EXAMPLE_BITCODE_FILE = runfiles_path(
"compiler_gym/third_party/cbench/cbench-v1/crc32.bc"
)
EXAMPLE_BITCODE_IR_INSTRUCTION_COUNT = 242
def test_reset_invalid_benchmark(env: LlvmEnv):
invalid_benchmark = "an invalid benchmark"
with pytest.raises(
LookupError, match=f"Dataset not found: benchmark://{invalid_benchmark}"
):
env.reset(benchmark=invalid_benchmark)
def test_invalid_benchmark_data(env: LlvmEnv):
benchmark = Benchmark.from_file_contents(
"benchmark://new", "Invalid bitcode".encode("utf-8")
)
with pytest.raises(
BenchmarkInitError, match='Failed to parse LLVM bitcode: "benchmark://new"'
):
env.reset(benchmark=benchmark)
def test_invalid_benchmark_missing_file(env: LlvmEnv):
benchmark = Benchmark(
BenchmarkProto(
uri="benchmark://new",
)
)
with pytest.raises(ValueError, match="No program set"):
env.reset(benchmark=benchmark)
def test_benchmark_path_empty_file(env: LlvmEnv):
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
(tmpdir / "test.bc").touch()
benchmark = Benchmark.from_file("benchmark://new", tmpdir / "test.bc")
with pytest.raises(BenchmarkInitError, match="Failed to parse LLVM bitcode"):
env.reset(benchmark=benchmark)
def test_invalid_benchmark_path_contents(env: LlvmEnv):
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
with open(str(tmpdir / "test.bc"), "w") as f:
f.write("Invalid bitcode")
benchmark = Benchmark.from_file("benchmark://new", tmpdir / "test.bc")
with pytest.raises(BenchmarkInitError, match="Failed to parse LLVM bitcode"):
env.reset(benchmark=benchmark)
def test_benchmark_path_invalid_scheme(env: LlvmEnv):
benchmark = Benchmark(
BenchmarkProto(
uri="benchmark://new", program=File(uri="invalid_scheme://test")
),
)
with pytest.raises(
ValueError,
match=(
"Invalid benchmark data URI. "
'Only the file:/// scheme is supported: "invalid_scheme://test"'
),
):
env.reset(benchmark=benchmark)
def test_custom_benchmark(env: LlvmEnv):
benchmark = Benchmark.from_file("benchmark://new", EXAMPLE_BITCODE_FILE)
env.reset(benchmark=benchmark)
assert env.benchmark == "benchmark://new"
def test_custom_benchmark_constructor():
benchmark = Benchmark.from_file("benchmark://new", EXAMPLE_BITCODE_FILE)
with gym.make("llvm-v0", benchmark=benchmark) as env:
env.reset()
assert env.benchmark == "benchmark://new"
def test_make_benchmark_single_bitcode(env: LlvmEnv):
benchmark = llvm.make_benchmark(EXAMPLE_BITCODE_FILE)
assert benchmark == f"benchmark://file-v0{EXAMPLE_BITCODE_FILE}"
assert benchmark.uri.scheme == "benchmark"
assert benchmark.uri.dataset == "file-v0"
with open(EXAMPLE_BITCODE_FILE, "rb") as f:
contents = f.read()
assert benchmark.proto.program.contents == contents
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
assert env.observation["IrInstructionCount"] == EXAMPLE_BITCODE_IR_INSTRUCTION_COUNT
@bazel_only
def test_make_benchmark_single_ll():
"""Test passing a single .ll file into make_benchmark()."""
benchmark = llvm.make_benchmark(INVALID_IR_PATH)
assert str(benchmark.uri).startswith("benchmark://user-v0/")
assert benchmark.uri.scheme == "benchmark"
assert benchmark.uri.dataset == "user-v0"
def test_make_benchmark_single_clang_job(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "input.c"
with open(str(source), "w") as f:
f.write("int A() { return 0; }")
benchmark = llvm.make_benchmark(str(source))
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @A\(\)", env.observation["Ir"])
def test_make_benchmark_split_clang_job(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source_1 = Path(d) / "a.c"
source_2 = Path(d) / "b.c"
with open(str(source_1), "w") as f:
f.write("int B() { return A(); }")
with open(str(source_2), "w") as f:
f.write("int A() { return 0; }")
benchmark = llvm.make_benchmark(
[
str(source_1),
str(source_2),
]
)
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @A\(\)", env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @B\(\)", env.observation["Ir"])
def test_make_benchmark_single_clang_invocation_multiple_inputs():
with tempfile.TemporaryDirectory() as d:
source_1 = Path(d) / "a.c"
source_2 = Path(d) / "b.c"
with open(str(source_1), "w") as f:
f.write("int B() { return A(); }")
with open(str(source_2), "w") as f:
f.write("int A() { return 0; }")
# cannot specify -o when generating multiple output files
with pytest.raises(OSError):
llvm.make_benchmark(llvm.ClangInvocation([str(source_1), str(source_2)]))
def test_make_benchmark_undefined_symbol(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "a.c"
with open(str(source), "w") as f:
f.write("int main() { return A(); }")
benchmark = llvm.make_benchmark(source)
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"declare (dso_local )?i32 @A\(\.\.\.\)", env.observation["Ir"])
def test_make_benchmark_missing_file():
with tempfile.TemporaryDirectory() as d:
with pytest.raises(FileNotFoundError):
llvm.make_benchmark(Path(d) / "a.c")
with pytest.raises(FileNotFoundError):
llvm.make_benchmark(str(Path(d) / "a.c"))
def test_make_benchmark_unrecognized_file_type():
with tempfile.TemporaryDirectory() as d:
path = Path(d) / "foo.txt"
path.touch()
with pytest.raises(ValueError, match=r"Unrecognized file type"):
llvm.make_benchmark(path)
def test_make_benchmark_clang_job_standard_libraries(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "input.cc"
with open(str(source), "w") as f:
f.write('#include <stdio.h>\nint A() { printf(""); return 0; }')
benchmark = llvm.make_benchmark(str(source))
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @_Z1Av\(\)", env.observation["Ir"])
assert re.search(r"declare (dso_local )?i32 @printf", env.observation["Ir"])
def test_make_benchmark_invalid_clang_job():
with pytest.raises(OSError, match="Compilation job failed with returncode"):
llvm.make_benchmark(llvm.ClangInvocation(["-invalid-arg"]))
def test_custom_benchmark_is_added_on_service_restart(env: LlvmEnv):
# When the service is restarted, the environment still uses the same custom
# benchmark.
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "a.c"
with open(str(source), "w") as f:
f.write("int main() { return 0; }")
benchmark = llvm.make_benchmark(source)
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
# Kill the service so that the next call to reset() starts a new one.
env.close()
assert env.service is None
env.reset()
assert env.benchmark == benchmark.uri
def test_two_custom_benchmarks_reset(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "a.c"
with open(str(source), "w") as f:
f.write("int main() { return 0; }")
benchmark1 = llvm.make_benchmark(source)
benchmark2 = llvm.make_benchmark(source)
assert benchmark1.uri != benchmark2.uri
env.reset(benchmark=benchmark1)
assert env.benchmark == benchmark1.uri
env.reset()
assert env.benchmark == benchmark1.uri
with pytest.warns(
UserWarning,
match=r"Changing the benchmark has no effect until reset\(\) is called",
):
env.benchmark = benchmark2
env.reset()
assert env.benchmark == benchmark2.uri
def test_failing_build_cmd(env: LlvmEnv, tmpdir):
"""Test that reset() raises an error if build command fails."""
(Path(tmpdir) / "program.c").touch()
benchmark = env.make_benchmark(Path(tmpdir) / "program.c")
benchmark.proto.dynamic_config.build_cmd.argument.extend(
["$CC", "$IN", "-invalid-cc-argument"]
)
benchmark.proto.dynamic_config.build_cmd.timeout_seconds = 10
with pytest.raises(
BenchmarkInitError,
match=r"clang: error: unknown argument: '-invalid-cc-argument'",
):
env.reset(benchmark=benchmark)
def test_make_benchmark_from_command_line_empty_input(env: LlvmEnv):
with pytest.raises(ValueError, match="Input command line is empty"):
env.make_benchmark_from_command_line("")
with pytest.raises(ValueError, match="Input command line is empty"):
env.make_benchmark_from_command_line([])
@pytest.mark.parametrize("cmd", ["gcc", ["gcc"]])
def test_make_benchmark_from_command_line_insufficient_args(env: LlvmEnv, cmd):
with pytest.raises(ValueError, match="Input command line 'gcc' is too short"):
env.make_benchmark_from_command_line(cmd)
@pytest.mark.parametrize("cmd", ["gcc in.c -o foo", ["gcc", "in.c", "-o", "foo"]])
def test_make_benchmark_from_command_line_build_cmd(env: LlvmEnv, cmd):
with temporary_working_directory() as cwd:
with open("in.c", "w") as f:
f.write("int main() { return 0; }")
bm = env.make_benchmark_from_command_line(cmd, system_includes=False)
assert bm.proto.dynamic_config.build_cmd.argument[:4] == [
str(llvm_paths.clang_path()),
"-xir",
"$IN",
"-o",
]
assert bm.proto.dynamic_config.build_cmd.argument[-1].endswith(f"{cwd}/foo")
@pytest.mark.parametrize("cmd", ["gcc in.c -o foo", ["gcc", "in.c", "-o", "foo"]])
def test_make_benchmark_from_command_line(env: LlvmEnv, cmd):
with temporary_working_directory() as cwd:
with open("in.c", "w") as f:
f.write("int main() { return 0; }")
bm = env.make_benchmark_from_command_line(cmd)
assert not (cwd / "foo").is_file()
env.reset(benchmark=bm)
assert "main()" in env.ir
assert (cwd / "foo").is_file()
(cwd / "foo").unlink()
bm.compile(env)
assert (cwd / "foo").is_file()
def test_make_benchmark_from_command_line_no_system_includes(env: LlvmEnv):
with temporary_working_directory():
with open("in.c", "w") as f:
f.write(
"""
#include <stdio.h>
int main() { return 0; }
"""
)
with pytest.raises(BenchmarkInitError, match="stdio.h"):
env.make_benchmark_from_command_line("gcc in.c", system_includes=False)
def test_make_benchmark_from_command_line_system_includes(env: LlvmEnv):
with temporary_working_directory():
with open("in.c", "w") as f:
f.write(
"""
#include <stdio.h>
int main() { return 0; }
"""
)
env.make_benchmark_from_command_line("gcc in.c")
def test_make_benchmark_from_command_line_stdin(env: LlvmEnv):
with pytest.raises(ValueError, match="Input command line reads from stdin"):
env.make_benchmark_from_command_line(["gcc", "-xc", "-"])
@pytest.mark.parametrize("retcode", [1, 5])
def test_make_benchmark_from_command_line_multiple_input_sources(
env: LlvmEnv, retcode: int
):
"""Test that command lines with multiple source files are linked together."""
with temporary_working_directory() as cwd:
with open("a.c", "w") as f:
f.write("int main() { return B(); }")
with open("b.c", "w") as f:
f.write(f"int B() {{ return {retcode}; }}")
bm = env.make_benchmark_from_command_line(["gcc", "a.c", "b.c", "-o", "foo"])
assert not (cwd / "foo").is_file()
env.reset(benchmark=bm)
assert "main()" in env.ir
bm.compile(env)
assert (cwd / "foo").is_file()
p = subprocess.Popen(["./foo"])
p.communicate(timeout=60)
assert p.returncode == retcode
@pytest.mark.parametrize("retcode", [1, 5])
def test_make_benchmark_from_command_line_mixed_source_and_object_files(
env: LlvmEnv, retcode: int
):
"""Test a command line that contains both source files and precompiled
object files. The object files should be filtered from compilation but
used for the final link.
"""
with temporary_working_directory():
with open("a.c", "w") as f:
f.write(
"""
#include "b.h"
int A() {
return B();
}
int main() {
return A();
}
"""
)
with open("b.c", "w") as f:
f.write(f"int B() {{ return {retcode}; }}")
with open("b.h", "w") as f:
f.write("int B();")
# Compile b.c to object file:
subprocess.check_call([str(llvm_paths.clang_path()), "b.c", "-c"], timeout=60)
assert (Path("b.o")).is_file()
bm = env.make_benchmark_from_command_line(["gcc", "a.c", "b.o", "-o", "foo"])
env.reset(benchmark=bm)
bm.compile(env)
assert Path("foo").is_file()
p = subprocess.Popen(["./foo"])
p.communicate(timeout=60)
assert p.returncode == retcode
def test_make_benchmark_from_command_line_only_object_files(env: LlvmEnv):
with temporary_working_directory():
with open("a.c", "w") as f:
f.write("int A() { return 5; }")
# Compile b.c to object file:
subprocess.check_call([str(llvm_paths.clang_path()), "a.c", "-c"], timeout=60)
assert (Path("a.o")).is_file()
with pytest.raises(
ValueError, match="Input command line has no source file inputs"
):
env.make_benchmark_from_command_line(["gcc", "a.o", "-c"])
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/custom_benchmarks_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Regression tests for LlvmEnv.validate()."""
from io import StringIO
import pytest
from compiler_gym import CompilerEnvStateReader
from compiler_gym.envs import LlvmEnv
from tests.pytest_plugins.common import skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The maximum number of times to call validate() on a state to check for an
# error.
VALIDATION_FLAKINESS = 3
# A list of CSV states that should pass validation, to be used as regression
# tests.
REGRESSION_TEST_STATES = """\
benchmark://cbench-v1/rijndael,,,opt -gvn -loop-unroll -instcombine -gvn -loop-unroll -instcombine input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -gvn -loop-unroll -mem2reg -loop-rotate -gvn -loop-unroll -mem2reg -loop-rotate input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -gvn-hoist input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -jump-threading -sink -partial-inliner -mem2reg -inline -jump-threading -sink -partial-inliner -mem2reg -inline input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -mem2reg -indvars -loop-unroll -simplifycfg -mem2reg -indvars -loop-unroll -simplifycfg input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -mem2reg -instcombine -early-cse-memssa -loop-unroll input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -reg2mem -licm -reg2mem -licm -reg2mem -licm input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -sroa -simplifycfg -partial-inliner input.bc -o output.bc
"""
REGRESSION_TEST_STATES = list(CompilerEnvStateReader(StringIO(REGRESSION_TEST_STATES)))
REGRESSION_TEST_STATE_NAMES = [
f"{s.benchmark},{s.commandline}" for s in REGRESSION_TEST_STATES
]
# A list of CSV states that are known to fail validation.
KNOWN_BAD_STATES = """\
benchmark://cbench-v1/susan,0.40581008446378297,6.591785192489624,opt -mem2reg -reg2mem -gvn -reg2mem -gvn -newgvn input.bc -o output.bc
"""
KNOWN_BAD_STATES = list(CompilerEnvStateReader(StringIO(KNOWN_BAD_STATES)))
KNOWN_BAD_STATE_NAMES = [f"{s.benchmark},{s.commandline}" for s in KNOWN_BAD_STATES]
#
# NOTE(github.com/facebookresearch/CompilerGym/issues/103): The following
# regresison tests are deprecated after -structurizecfg was deactivated:
#
# benchmark://cbench-v1/tiff2bw,,,opt -structurizecfg input.bc -o output.bc
# benchmark://cbench-v1/tiff2rgba,,,opt -structurizecfg input.bc -o output.bc
# benchmark://cbench-v1/tiffdither,,,opt -structurizecfg input.bc -o output.bc
# benchmark://cbench-v1/tiffmedian,,,opt -structurizecfg input.bc -o output.bc
# benchmark://cBench-v0/susan,-0.5352209944751382,1.849454402923584,opt -structurizecfg -loop-extract -mergereturn -structurizecfg -loop-extract -mergereturn input.bc -o output.bc
# benchmark://cBench-v0/susan,0.9802486187845304,1.7552905082702637,opt -mem2reg -simplifycfg -lcssa -break-crit-edges -newgvn -mem2reg -simplifycfg -lcssa -break-crit-edges -newgvn input.bc -o output.bc
@skip_on_ci
@pytest.mark.parametrize("state", KNOWN_BAD_STATES, ids=KNOWN_BAD_STATE_NAMES)
def test_validate_known_bad_trajectory(env: LlvmEnv, state):
env.apply(state)
for _ in range(VALIDATION_FLAKINESS):
result = env.validate()
if result.okay():
pytest.fail("Validation succeeded on state where it should have failed")
@skip_on_ci
@pytest.mark.parametrize(
"state", REGRESSION_TEST_STATES, ids=REGRESSION_TEST_STATE_NAMES
)
def test_validate_known_good_trajectory(env: LlvmEnv, state):
env.apply(state)
for _ in range(VALIDATION_FLAKINESS):
result = env.validate()
if not result.okay():
pytest.fail(f"Validation failed: {result}\n{result.dict()}")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/validation_regression_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import gym
import numpy as np
import pytest
import compiler_gym # noqa Register environments.
from compiler_gym.envs import llvm
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from compiler_gym.errors import ServiceError
from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv
from compiler_gym.service.connection import CompilerGymServiceConnection
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_DIM
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
@pytest.fixture(scope="function", params=["local", "service"])
def env(request) -> ClientServiceCompilerEnv:
# Redefine fixture to test both gym.make(...) and unmanaged service
# connections.
if request.param == "local":
with gym.make("llvm-v0") as env:
yield env
else:
service = CompilerGymServiceConnection(llvm.LLVM_SERVICE_BINARY)
try:
with LlvmEnv(service=service.connection.url) as env:
yield env
finally:
service.close()
def test_service_env_dies_reset(env: ClientServiceCompilerEnv):
env.observation_space = "Autophase"
env.reward_space = "IrInstructionCount"
env.reset("cbench-v1/crc32")
# Kill the service. Note killing the service for a ManagedConnection will
# result in a ServiceError because we have not ended the session we started
# with env.reset() above. For UnmanagedConnection, this error will not be
# raised.
try:
env.service.close()
except ServiceError as e:
assert "Service exited with returncode " in str(e)
# Check that the environment doesn't fall over.
observation, reward, done, info = env.step(0)
assert done, info["error_details"]
assert not env.in_episode
# Check that default values are returned.
np.testing.assert_array_equal(observation, np.zeros(AUTOPHASE_FEATURE_DIM))
assert reward == 0
# Reset the environment and check that it works.
env.reset(benchmark="cbench-v1/crc32")
assert env.in_episode
observation, reward, done, info = env.step(0)
assert not done, info["error_details"]
assert observation is not None
assert reward is not None
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/service_connection_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for LlvmEnv.episode_reward."""
from compiler_gym.envs import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_episode_reward_init_zero(env: LlvmEnv):
env.reward_space = "IrInstructionCount"
env.reset("cbench-v1/crc32")
assert env.episode_reward == 0
_, reward, _, _ = env.step(env.action_space["-mem2reg"])
assert reward > 0
assert env.episode_reward == reward
env.reset()
assert env.episode_reward == 0
def test_episode_reward_with_non_default_reward_space(env: LlvmEnv):
"""Test that episode_reward is not updated when custom rewards passed to
step()."""
env.reward_space = "IrInstructionCountOz"
env.reset("cbench-v1/crc32")
assert env.episode_reward == 0
_, rewards, _, _ = env.step(
env.action_space["-mem2reg"],
reward_spaces=["IrInstructionCount"],
)
assert rewards[0] > 0
assert env.episode_reward == 0
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/episode_reward_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import os
import sys
from typing import Any, Dict, List, NamedTuple
import gym
import networkx as nx
import numpy as np
import pytest
from flaky import flaky
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from compiler_gym.spaces import Box
from compiler_gym.spaces import Dict as DictSpace
from compiler_gym.spaces import Scalar, Sequence
from tests.pytest_plugins.common import ci_only
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_default_observation_space(env: LlvmEnv):
env.observation_space = "Autophase"
assert env.observation_space.shape == (56,)
assert env.observation_space_spec.id == "Autophase"
env.observation_space = None
assert env.observation_space is None
assert env.observation_space_spec is None
invalid = "invalid value"
with pytest.raises(LookupError, match=f"Observation space not found: {invalid}"):
env.observation_space = invalid
def test_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
assert set(env.observation.spaces.keys()) == {
"Autophase",
"AutophaseDict",
"Bitcode",
"BitcodeFile",
"Buildtime",
"CpuInfo",
"Inst2vec",
"Inst2vecEmbeddingIndices",
"Inst2vecPreprocessedText",
"InstCount",
"InstCountDict",
"InstCountNorm",
"InstCountNormDict",
"Ir",
"IrInstructionCount",
"IrInstructionCountO0",
"IrInstructionCountO3",
"IrInstructionCountOz",
"IrSha1",
"IsBuildable",
"IsRunnable",
"LexedIr",
"LexedIrTuple",
"ObjectTextSizeBytes",
"ObjectTextSizeO0",
"ObjectTextSizeO3",
"ObjectTextSizeOz",
"Programl",
"ProgramlJson",
"Runtime",
"TextSizeBytes",
"TextSizeO0",
"TextSizeO3",
"TextSizeOz",
}
def test_ir_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Ir"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == str
assert space.space.size_range == (0, np.iinfo(np.int64).max)
value: str = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, str)
assert space.space.contains(value)
assert space.deterministic
assert not space.platform_dependent
def test_ir_sha1_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IrSha1"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == str
assert space.space.size_range == (40, 40)
value: str = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, str)
assert len(value) == 40
assert space.space.contains(value)
assert space.deterministic
assert not space.platform_dependent
def test_bitcode_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Bitcode"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == np.int8
assert space.space.size_range == (0, np.iinfo(np.int64).max)
assert space.deterministic
assert not space.platform_dependent
value: str = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert value.dtype == np.int8
assert space.space.contains(value)
def test_bitcode_file_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "BitcodeFile"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == str
assert space.space.size_range == (0, 4096)
assert not space.deterministic
assert not space.platform_dependent
value: str = env.observation[key]
print(value) # For debugging in case of error.
try:
assert isinstance(value, str)
assert os.path.isfile(value)
assert space.space.contains(value)
finally:
os.unlink(value)
@pytest.mark.parametrize(
"benchmark_uri", ["cbench-v1/crc32", "cbench-v1/qsort", "cbench-v1/gsm"]
)
def test_bitcode_file_equivalence(env: LlvmEnv, benchmark_uri: str):
"""Test that LLVM produces the same bitcode as a file and as a byte array."""
env.reset(benchmark=benchmark_uri)
bitcode = env.observation.Bitcode()
bitcode_file = env.observation.BitcodeFile()
try:
with open(bitcode_file, "rb") as f:
bitcode_from_file = f.read()
assert bitcode.tobytes() == bitcode_from_file
finally:
os.unlink(bitcode_file)
# The Autophase feature vector for benchmark://cbench-v1/crc32 in its initial
# state.
AUTOPHASE_CBENCH_CRC32 = [
0,
0,
16,
12,
2,
16,
8,
2,
4,
8,
0,
0,
0,
29,
0,
24,
9,
2,
32,
44,
41,
14,
36,
16,
13,
0,
5,
26,
3,
5,
24,
20,
24,
33,
5,
10,
3,
51,
0,
1,
0,
5,
0,
0,
0,
42,
0,
1,
8,
5,
29,
242,
157,
15,
0,
103,
]
def test_autophase_observation_space_reset(env: LlvmEnv):
"""Test that the intial observation is returned on env.reset()."""
env.observation_space = "Autophase"
observation = env.reset("cbench-v1/crc32")
print(observation.tolist()) # For debugging on error.
np.testing.assert_array_equal(observation, AUTOPHASE_CBENCH_CRC32)
def test_instcount_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCount"
space = env.observation.spaces[key]
assert isinstance(space.space, Box)
assert space.space.dtype == np.int64
assert space.space.shape == (70,)
assert space.deterministic
assert not space.platform_dependent
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
expected_values = [
242,
29,
15,
5,
24,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
3,
1,
8,
26,
51,
42,
5,
0,
0,
0,
1,
5,
0,
0,
0,
0,
0,
0,
0,
0,
0,
20,
0,
0,
0,
10,
0,
0,
33,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
np.testing.assert_array_equal(value, expected_values)
assert value.dtype == np.int64
# The first value is the total number of instructions. This should equal the
# number of instructions.
assert sum(value[3:]) == value[0]
def test_instcount_dict_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCountDict"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
assert space.deterministic
assert not space.platform_dependent
value: Dict[str, int] = env.observation[key]
print(value) # For debugging in case of error.
assert len(value) == 70
def test_instcount_norm_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCountNorm"
space = env.observation.spaces[key]
assert isinstance(space.space, Box)
assert space.space.dtype == np.float32
assert space.space.shape == (69,)
assert space.deterministic
assert not space.platform_dependent
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (69,)
assert value.dtype == np.float32
# Assert that the normalized instruction counts sum to 1. Note that the
# first two features (#blocks and #funcs) must be excluded.
assert pytest.approx(sum(value[2:]), 1.0)
def test_instcount_norm_dict_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCountNormDict"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
assert space.deterministic
assert not space.platform_dependent
value: Dict[str, int] = env.observation[key]
print(value) # For debugging in case of error.
assert len(value) == 69
def test_autophase_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Autophase"
space = env.observation.spaces[key]
assert isinstance(space.space, Box)
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert value.shape == (56,)
assert space.deterministic
assert not space.platform_dependent
np.testing.assert_array_equal(value, AUTOPHASE_CBENCH_CRC32)
assert space.space.contains(value)
def test_autophase_dict_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "AutophaseDict"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
value: Dict[str, int] = env.observation[key]
print(value) # For debugging in case of error.
assert len(value) == 56
assert space.deterministic
assert not space.platform_dependent
def test_lexed_ir_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "LexedIr"
space = env.observation.spaces[key]
print(type(space.space))
print(space.space)
assert isinstance(space.space, Sequence)
value: Dict[str, np.array] = env.observation[key]
print(value) # For debugging in case of error
assert len(value) == 4
assert space.deterministic
assert not space.platform_dependent
def test_lexed_ir_tuple_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "LexedIrTuple"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: List[NamedTuple] = env.observation[key]
print(value) # For debugging in case of error
assert space.deterministic
assert not space.platform_dependent
def test_programl_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Programl"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
graph: nx.MultiDiGraph = env.observation[key]
assert isinstance(graph, nx.MultiDiGraph)
assert graph.number_of_nodes() == 512
assert graph.number_of_edges() == 907
assert graph.nodes[0] == {
"block": 0,
"function": 0,
"text": "[external]",
"type": 0,
}
assert space.deterministic
assert not space.platform_dependent
def test_programl_json_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "ProgramlJson"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
graph: Dict[str, Any] = env.observation[key]
assert isinstance(graph, dict)
def test_cpuinfo_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "CpuInfo"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
value: Dict[str, Any] = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, dict)
# Test each expected key, removing it as we go.
assert isinstance(value.pop("name"), str)
assert isinstance(value.pop("cores_count"), int)
assert isinstance(value.pop("l1i_cache_size"), int)
assert isinstance(value.pop("l1i_cache_count"), int)
assert isinstance(value.pop("l1d_cache_size"), int)
assert isinstance(value.pop("l1d_cache_count"), int)
assert isinstance(value.pop("l2_cache_size"), int)
assert isinstance(value.pop("l2_cache_count"), int)
assert isinstance(value.pop("l3_cache_size"), int)
assert isinstance(value.pop("l3_cache_count"), int)
assert isinstance(value.pop("l4_cache_size"), int)
assert isinstance(value.pop("l4_cache_count"), int)
# Anything left in the JSON dictionary now is an unexpected key.
assert not value
invalid = "invalid value"
with pytest.raises(KeyError) as ctx:
_ = env.observation[invalid]
assert str(ctx.value) == f"'{invalid}'"
assert space.deterministic
assert space.platform_dependent
@pytest.fixture
def cbench_crc32_inst2vec_embedding_indices() -> List[int]:
"""The expected inst2vec embedding indices for cbench-v1/crc32."""
# The linux/macOS builds of clang produce slightly different bitcodes.
if sys.platform.lower().startswith("linux"):
return [
8564,
8564,
8564,
8564,
5,
46,
46,
40,
8564,
13,
8,
8564,
1348,
178,
286,
214,
182,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
2298,
8564,
289,
291,
3729,
3729,
8564,
178,
289,
289,
200,
1412,
1412,
8564,
3032,
180,
3032,
293,
3032,
205,
415,
205,
213,
8564,
8564,
8564,
204,
8564,
213,
215,
364,
364,
216,
8564,
216,
8564,
8564,
8564,
311,
634,
204,
8564,
415,
182,
640,
214,
182,
295,
675,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
8564,
8564,
634,
204,
8564,
213,
215,
415,
205,
216,
8564,
8564,
8564,
182,
961,
214,
415,
214,
364,
364,
216,
8564,
293,
3032,
180,
3032,
8564,
3032,
295,
257,
8564,
291,
178,
178,
200,
214,
180,
3032,
205,
216,
8564,
182,
977,
204,
8564,
182,
213,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
182,
420,
214,
213,
8564,
200,
216,
8564,
182,
961,
180,
3032,
2298,
8564,
289,
8564,
289,
178,
178,
289,
311,
594,
311,
180,
3032,
180,
3032,
293,
3032,
364,
216,
8564,
295,
431,
311,
425,
204,
8564,
597,
8564,
594,
213,
8564,
295,
653,
311,
295,
634,
204,
8564,
182,
182,
597,
213,
8564,
216,
8564,
216,
8564,
295,
634,
612,
293,
3032,
180,
3032,
180,
3032,
257,
8564,
289,
289,
8564,
8564,
178,
178,
289,
364,
311,
594,
8564,
3032,
8564,
180,
3032,
180,
3032,
8564,
8564,
8564,
204,
8564,
8564,
8564,
364,
364,
216,
8564,
8564,
8564,
8564,
8564,
205,
216,
8564,
182,
182,
488,
204,
8564,
295,
597,
182,
640,
182,
540,
612,
8564,
216,
8564,
182,
640,
214,
216,
8564,
364,
364,
216,
8564,
180,
3032,
180,
3032,
8564,
3032,
295,
257,
]
elif sys.platform.lower().startswith("darwin"):
return [
8564,
8564,
5,
46,
46,
40,
8564,
13,
8,
8564,
1348,
178,
286,
214,
182,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
2298,
8564,
289,
291,
3729,
3729,
8564,
178,
289,
289,
200,
1412,
1412,
8564,
3032,
180,
3032,
293,
3032,
205,
415,
205,
213,
8564,
8564,
5666,
204,
8564,
213,
215,
364,
364,
216,
8564,
216,
8564,
5665,
8564,
311,
634,
204,
8564,
415,
182,
640,
214,
182,
295,
675,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
5665,
8564,
634,
204,
8564,
213,
215,
415,
205,
216,
8564,
5665,
8564,
182,
961,
214,
415,
214,
364,
364,
216,
8564,
293,
3032,
180,
3032,
8564,
3032,
295,
257,
8564,
291,
178,
178,
200,
214,
180,
3032,
205,
216,
8564,
182,
977,
204,
8564,
182,
213,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
182,
420,
214,
213,
8564,
200,
216,
8564,
182,
961,
180,
3032,
2298,
8564,
289,
8564,
289,
178,
178,
289,
311,
594,
311,
180,
3032,
180,
3032,
293,
3032,
364,
216,
8564,
295,
431,
311,
425,
204,
8564,
597,
8564,
594,
213,
8564,
295,
653,
311,
295,
634,
204,
8564,
182,
182,
597,
213,
8564,
216,
8564,
216,
8564,
295,
634,
612,
293,
3032,
180,
3032,
180,
3032,
257,
8564,
289,
289,
8564,
8564,
178,
178,
289,
364,
311,
594,
8564,
3032,
8564,
180,
3032,
180,
3032,
8564,
8564,
5666,
204,
8564,
5391,
8564,
364,
364,
216,
8564,
5665,
8564,
5665,
8564,
205,
216,
8564,
182,
182,
488,
204,
8564,
295,
597,
182,
640,
182,
540,
612,
8564,
216,
8564,
182,
640,
214,
216,
8564,
364,
364,
216,
8564,
180,
3032,
180,
3032,
8564,
3032,
295,
257,
]
else:
raise NotImplementedError(f"Unknown platform: {sys.platform}")
def test_inst2vec_preprocessed_observation_space(
env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int]
):
env.reset("cbench-v1/crc32")
key = "Inst2vecPreprocessedText"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: List[str] = env.observation[key]
assert isinstance(value, list)
for item, idx in zip(value, cbench_crc32_inst2vec_embedding_indices):
assert isinstance(item, str)
unk = env.inst2vec.vocab["!UNK"]
indices = [env.inst2vec.vocab.get(item, unk) for item in value]
print(indices) # For debugging in case of error.
assert indices == cbench_crc32_inst2vec_embedding_indices
assert space.deterministic
assert not space.platform_dependent
def test_inst2vec_embedding_indices_observation_space(
env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int]
):
env.reset("cbench-v1/crc32")
key = "Inst2vecEmbeddingIndices"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: List[int] = env.observation[key]
print(value) # For debugging in case of error.
print(value)
assert isinstance(value, list)
for item in value:
assert isinstance(item, int)
assert value == cbench_crc32_inst2vec_embedding_indices
assert space.deterministic
assert not space.platform_dependent
def test_inst2vec_observation_space(
env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int]
):
env.reset("cbench-v1/crc32")
key = "Inst2vec"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: np.ndarray = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert value.dtype == np.float32
height, width = value.shape
assert width == len(env.inst2vec.embeddings[0])
assert height == len(cbench_crc32_inst2vec_embedding_indices)
# Check a handful of values.
np.testing.assert_array_almost_equal(
value.tolist(),
[
env.inst2vec.embeddings[idx]
for idx in cbench_crc32_inst2vec_embedding_indices
],
)
assert space.deterministic
assert not space.platform_dependent
def test_ir_instruction_count_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IrInstructionCount"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 242
key = "IrInstructionCountO0"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 242
key = "IrInstructionCountO3"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 164
key = "IrInstructionCountOz"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 114
def test_object_text_size_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
# Expected .text sizes for this benchmark: -O0, -O3, -Oz.
crc32_code_sizes = {"darwin": [1171, 3825, 3289], "linux": [1183, 3961, 3286]}
actual_code_sizes = []
key = "ObjectTextSizeBytes"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
assert isinstance(value, int)
actual_code_sizes.append(value)
key = "ObjectTextSizeO0"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
assert isinstance(value, int)
key = "ObjectTextSizeO3"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
assert isinstance(value, int)
actual_code_sizes.append(value)
key = "ObjectTextSizeOz"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
assert isinstance(value, int)
actual_code_sizes.append(value)
# For debugging in case of error:
print("Expected code sizes:", crc32_code_sizes[sys.platform])
print("Actual code sizes:", actual_code_sizes)
assert crc32_code_sizes[sys.platform] == actual_code_sizes
def test_text_size_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "TextSizeBytes"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
key = "TextSizeO0"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value > 0 # Exact value is system dependent, see below.
key = "TextSizeO3"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value > 0 # Exact value is system dependent, see below.
key = "TextSizeOz"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value > 0 # Exact value is system dependent, see below.
# NOTE(cummins): The exact values here depend on the system toolchain and
# libraries, so only run this test on the GitHub CI runner environment where we
# can hardcode the values. If this test starts to fail, it may be because the CI
# runner environment has changed.
@ci_only
def test_text_size_observation_space_values(env: LlvmEnv):
env.reset("cbench-v1/crc32")
# Expected .text sizes for this benchmark: -O0, -O3, -Oz.
crc32_code_sizes = {"darwin": [16384, 16384, 16384], "linux": [2850, 5652, 4980]}
# For debugging in case of error.
print(env.observation["TextSizeO0"])
print(env.observation["TextSizeO3"])
print(env.observation["TextSizeOz"])
assert env.observation.TextSizeO0() == crc32_code_sizes[sys.platform][0]
assert env.observation.TextSizeO0() == crc32_code_sizes[sys.platform][0]
assert env.observation.TextSizeO3() == crc32_code_sizes[sys.platform][1]
assert env.observation.TextSizeOz() == crc32_code_sizes[sys.platform][2]
@flaky # Runtimes can timeout
def test_runtime_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Runtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert env.runtime_observation_count == 1
assert value.shape == (1,)
assert not space.deterministic
assert space.platform_dependent
assert space.space.contains(value)
for buildtime in value:
assert buildtime > 0
@flaky # Runtimes can timeout
def test_runtime_observation_space_different_observation_count(env: LlvmEnv):
"""Test setting a custom observation count for LLVM runtimes."""
env.reset("cbench-v1/crc32")
env.runtime_observation_count = 3
value: np.ndarray = env.observation["Runtime"]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (3,)
env.reset()
value: np.ndarray = env.observation["Runtime"]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (3,)
env.runtime_observation_count = 5
value: np.ndarray = env.observation["Runtime"]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (5,)
@flaky # Runtimes can timeout
def test_runtime_observation_space_invalid_observation_count(env: LlvmEnv):
"""Test setting an invalid custom observation count for LLVM runtimes."""
env.reset("cbench-v1/crc32")
val = env.runtime_observation_count
with pytest.raises(
ValueError, match="runtimes_per_observation_count must be >= 1. Received: -5"
):
env.runtime_observation_count = -5
assert env.runtime_observation_count == val # unchanged
def test_runtime_observation_space_not_runnable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "Runtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert env.observation[key] is None
@flaky # Build can timeout
def test_buildtime_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Buildtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert not space.deterministic
assert space.platform_dependent
value: np.ndarray = env.observation[key]
print(value) # For debugging in case of error.
assert value.shape == (1,)
assert space.space.contains(value)
assert value[0] >= 0
def test_buildtime_observation_space_not_runnable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "Buildtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert not space.deterministic
assert space.platform_dependent
assert env.observation[key] is None
def test_is_runnable_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IsRunnable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 1
def test_is_runnable_observation_space_not_runnable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "IsRunnable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 0
def test_is_buildable_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IsBuildable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 1
def test_is_buildable_observation_space_not_buildable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "IsBuildable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 0
def test_add_derived_space(env: LlvmEnv):
env.reset()
env.observation.add_derived_space(
id="IrLen",
base_id="Ir",
space=Box(name="IrLen", low=0, high=float("inf"), shape=(1,), dtype=int),
translate=lambda base: [15],
)
value = env.observation["IrLen"]
assert isinstance(value, list)
assert value == [15]
# Repeat the above test using the generated bound method.
value = env.observation.IrLen()
assert isinstance(value, list)
assert value == [15]
def test_derived_space_constructor():
"""Test that derived observation space can be specified at construction
time.
"""
with gym.make("llvm-v0") as env:
env.observation_space = "AutophaseDict"
a = env.reset()
with gym.make("llvm-v0", observation_space="AutophaseDict") as env:
b = env.reset()
assert a == b
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/observation_spaces_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for splitting and merging benchmarks."""
import random
import pytest
from compiler_gym.datasets import Benchmark
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm import llvm_benchmark as llvm
from compiler_gym.errors import BenchmarkInitError
from compiler_gym.validation_result import ValidationResult
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
@pytest.mark.timeout(600)
def test_cbench_split_merge_build(env: LlvmEnv, validatable_cbench_uri: str):
"""Split and then merge a cBench program, checking that the merged program
passes semantics validation.
"""
env.reset(benchmark=validatable_cbench_uri, reward_space="IrInstructionCount")
initial_instruction_count = env.observation.IrInstructionCount()
split = llvm.split_benchmark_by_function(env.benchmark)
merged = llvm.merge_benchmarks(split)
# Copy over the dynamic configuration to enable runtime semantics
# validation.
merged.proto.dynamic_config.MergeFrom(env.benchmark.proto.dynamic_config)
for cb in env.benchmark.validation_callbacks():
merged.add_validation_callback(cb)
env.reset(benchmark=merged)
assert env.observation.IrInstructionCount() == initial_instruction_count
result: ValidationResult = env.validate()
assert not result.error_details
assert result.reward_validated
assert not result.actions_replay_failed
assert not result.reward_validation_failed
assert result.benchmark_semantics_validated
assert not result.benchmark_semantics_validation_failed
assert result.okay()
def test_cbench_split_globalopt_merge_safe_unsafe_actions(
env: LlvmEnv, action_name: str
):
"""A test which shows that stripping symbols before split+merge causes
invalid results.
"""
safe = action_name not in {"-strip", "-strip-nondebug"}
env.reset(benchmark="benchmark://cbench-v1/sha")
env.step(env.action_space[action_name])
ic = env.observation.IrInstructionCount()
uri = f"benchmark://test-v0/{random.randrange(16**4):04x}"
split = llvm.split_benchmark_by_function(
Benchmark.from_file_contents(uri=uri, data=env.observation.Bitcode().tobytes())
)
def run_globalopt_on_benchmark(benchmark):
env.reset(benchmark=benchmark)
env.step(env.action_space["-globalopt"])
return Benchmark.from_file_contents(
uri=benchmark, data=env.observation.Bitcode().tobytes()
)
split = [run_globalopt_on_benchmark(s) for s in split]
merged = llvm.merge_benchmarks(split)
env.reset(benchmark=merged)
if safe:
assert env.observation.IrInstructionCount() == ic
else:
assert env.observation.IrInstructionCount() != ic
@pytest.mark.parametrize("action_name", ["-strip", "-strip-nondebug"])
def test_cbench_strip_unsafe_for_split(env: LlvmEnv, action_name: str):
"""Sanity check for test_cbench_split_globalopt_merge_safe_unsafe_actions()
above. Run the two strip actions and show that they are safe to use if you
don't split+merge.
"""
env.reset(benchmark="benchmark://cbench-v1/sha")
env.step(env.action_space[action_name])
uri = f"benchmark://test-v0/{random.randrange(16**4):04x}"
split = llvm.split_benchmark_by_function(
Benchmark.from_file_contents(uri=uri, data=env.observation.Bitcode().tobytes())
)
merged = llvm.merge_benchmarks(split)
# Copy over the dynamic config to compile the binary:
merged.proto.dynamic_config.MergeFrom(env.benchmark.proto.dynamic_config)
with pytest.raises(BenchmarkInitError):
env.reset(benchmark=merged)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/split_merge_integration_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
from threading import Thread
from typing import List
import gym
from flaky import flaky
from compiler_gym import CompilerEnv
from compiler_gym.util.gym_type_hints import ActionType
from tests.test_main import main
class ThreadedWorker(Thread):
"""Create an environment and run through a set of actions in a background thread."""
def __init__(self, env_name: str, benchmark: str, actions: List[ActionType]):
super().__init__()
self.done = False
self.env_name = env_name
self.benchmark = benchmark
self.actions = actions
assert actions
def run(self) -> None:
with gym.make(self.env_name, benchmark=self.benchmark) as env:
env.reset()
for action in self.actions:
self.observation, self.reward, done, self.info = env.step(action)
assert not done, self.info["error_details"]
self.done = True
class ThreadedWorkerWithEnv(Thread):
"""Create an environment and run through a set of actions in a background thread."""
def __init__(self, env: CompilerEnv, actions: List[ActionType]):
super().__init__()
self.done = False
self.env = env
self.actions = actions
assert actions
def run(self) -> None:
for action in self.actions:
self.observation, self.reward, done, self.info = self.env.step(action)
assert not done, self.info["error_details"]
self.done = True
@flaky # Timeout may be exceeded if the environment is slow to start.
def test_running_environment_in_background_thread():
"""Test launching and running an LLVM environment in a background thread."""
thread = ThreadedWorker(
env_name="llvm-autophase-ic-v0",
benchmark="cbench-v1/crc32",
actions=[0, 0, 0],
)
thread.start()
thread.join(timeout=10)
assert thread.done
assert thread.observation is not None
assert isinstance(thread.reward, float)
assert thread.info
@flaky # Timeout may be exceeded if the environment is slow to start.
def test_moving_environment_to_background_thread():
"""Test running an LLVM environment from a background thread. The environment
is made in the main thread and used in the background thread.
"""
with gym.make("llvm-autophase-ic-v0") as env:
env.reset(benchmark="cbench-v1/crc32")
thread = ThreadedWorkerWithEnv(env=env, actions=[0, 0, 0])
thread.start()
thread.join(timeout=10)
assert thread.done
assert thread.observation is not None
assert isinstance(thread.reward, float)
assert thread.info
assert env.in_episode
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/threading_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the LLVM environment action space."""
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_to_and_from_string_no_actions(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
assert env.action_space.to_string(env.actions) == "opt input.bc -o output.bc"
assert env.action_space.from_string(env.action_space.to_string(env.actions)) == []
def test_to_and_from_string(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
env.step(env.action_space.flags.index("-mem2reg"))
env.step(env.action_space.flags.index("-reg2mem"))
assert (
env.action_space.to_string(env.actions)
== "opt -mem2reg -reg2mem input.bc -o output.bc"
)
assert env.action_space.from_string(env.action_space.to_string(env.actions)) == [
env.action_space.flags.index("-mem2reg"),
env.action_space.flags.index("-reg2mem"),
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/action_space_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import multiprocessing as mp
import sys
from typing import List
import gym
import pytest
from flaky import flaky
from compiler_gym.envs import LlvmEnv
from compiler_gym.util.gym_type_hints import ActionType
from tests.pytest_plugins.common import macos_only
from tests.test_main import main
def process_worker(
env_name: str, benchmark: str, actions: List[ActionType], queue: mp.Queue
):
assert actions
with gym.make(env_name) as env:
env.reset(benchmark=benchmark)
for action in actions:
observation, reward, done, info = env.step(action)
assert not done
queue.put((observation, reward, done, info))
def process_worker_with_env(env: LlvmEnv, actions: List[ActionType], queue: mp.Queue):
assert actions
for action in actions:
observation, reward, done, info = env.step(action)
assert not done
queue.put((env, observation, reward, done, info))
@flaky # Test contains timeouts.
def test_running_environment_in_background_process():
"""Test launching and running an LLVM environment in a background process."""
queue = mp.Queue(maxsize=3)
process = mp.Process(
target=process_worker,
args=("llvm-autophase-ic-v0", "cbench-v1/crc32", [0, 0, 0], queue),
)
process.start()
try:
process.join(timeout=60)
result = queue.get(timeout=60)
observation, reward, done, info = result
assert not done
assert observation is not None
assert isinstance(reward, float)
assert info
finally:
# kill() was added in Python 3.7.
if sys.version_info >= (3, 7, 0):
process.kill()
else:
process.terminate()
process.join(timeout=60)
@macos_only
@pytest.mark.skipif(sys.version_info < (3, 8, 0), reason="Py >= 3.8 only")
def test_moving_environment_to_background_process_macos():
"""Test moving an LLVM environment to a background process."""
queue = mp.Queue(maxsize=3)
with gym.make("llvm-autophase-ic-v0") as env:
env.reset(benchmark="cbench-v1/crc32")
process = mp.Process(
target=process_worker_with_env, args=(env, [0, 0, 0], queue)
)
# Moving an environment to a background process is not supported because
# we are using a subprocess.Popen() to manage the service binary, which
# doesn't support pickling.
with pytest.raises(TypeError):
process.start()
def test_port_collision_test():
"""Test that attempting to connect to a port that is already in use succeeds."""
with gym.make("llvm-autophase-ic-v0") as env_a:
env_a.reset(benchmark="cbench-v1/crc32")
with LlvmEnv(service=env_a.service.connection.url) as env_b:
env_b.reset(benchmark="cbench-v1/crc32")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/multiprocessing_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests that module and source IDs are stripped in the LLVM modules."""
from compiler_gym.envs.llvm import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm", "tests.pytest_plugins.common"]
def test_no_module_id_builtin_benchmark(env: LlvmEnv):
"""Test that the module and source IDs are stripped in shipped benchmark."""
env.reset("cbench-v1/crc32")
ir = env.ir
print(ir) # For debugging in case of error.
assert "; ModuleID = '-'\n" in ir
assert '\nsource_filename = "-"\n' in ir
def test_no_module_id_custom_benchmark(env: LlvmEnv):
"""Test that the module and source IDs are stripped in custom benchmark."""
with open("source.c", "w") as f:
f.write("int A() {return 0;}")
benchmark = env.make_benchmark("source.c")
env.reset(benchmark=benchmark)
ir = env.ir
print(ir) # For debugging in case of error.
assert "; ModuleID = '-'\n" in ir
assert '\nsource_filename = "-"\n' in ir
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/module_id_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the compute_observation() function."""
from pathlib import Path
import networkx.algorithms.isomorphism
import pytest
from compiler_gym.envs.llvm import LlvmEnv, compute_observation
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_invalid_observation_space_name(env: LlvmEnv, tmpdir):
tmpdir = Path(tmpdir)
env.reset()
env.write_bitcode(tmpdir / "ir.bc")
space = env.observation.spaces["Ir"]
space.id = "NotARealName"
with pytest.raises(
ValueError, match="Invalid observation space name: NOT_A_REAL_NAME"
):
compute_observation(space, tmpdir / "ir.bc")
def test_missing_file(env: LlvmEnv, tmpdir):
tmpdir = Path(tmpdir)
env.reset()
with pytest.raises(FileNotFoundError, match=str(tmpdir / "ir.bc")):
compute_observation(env.observation.spaces["Ir"], tmpdir / "ir.bc")
def test_timeout_expired(env: LlvmEnv, tmpdir):
tmpdir = Path(tmpdir)
env.reset(benchmark="cbench-v1/jpeg-c") # larger benchmark
env.write_bitcode(tmpdir / "ir.bc")
space = env.observation.spaces["Programl"]
with pytest.raises(
TimeoutError, match="Failed to compute Programl observation in 0.1 seconds"
):
compute_observation(space, tmpdir / "ir.bc", timeout=0.1)
@pytest.mark.parametrize(
"observation_space", ["Ir", "IrInstructionCount", "ObjectTextSizeBytes"]
)
def test_observation_equivalence(env: LlvmEnv, tmpdir, observation_space: str):
"""Test that compute_observation() produces the same result as the environment."""
tmpdir = Path(tmpdir)
env.reset()
env.write_bitcode(tmpdir / "ir.bc")
observation = compute_observation(
env.observation.spaces[observation_space], tmpdir / "ir.bc"
)
assert observation == env.observation[observation_space]
def test_observation_programl_equivalence(env: LlvmEnv, tmpdir):
"""Test that compute_observation() produces the same result as the environment."""
tmpdir = Path(tmpdir)
env.reset()
env.write_bitcode(tmpdir / "ir.bc")
G = compute_observation(env.observation.spaces["Programl"], tmpdir / "ir.bc")
networkx.algorithms.isomorphism.is_isomorphic(G, env.observation.Programl())
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/compute_observation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
from compiler_gym.envs import CompilerEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_autophase_crc32_feature_vector(env: CompilerEnv):
env.reset(benchmark="cbench-v1/crc32")
print(env.benchmark) # For debugging in case of error.
features = env.observation["AutophaseDict"]
print(features) # For debugging on failure.
assert features == {
"BBNumArgsHi": 0,
"BBNumArgsLo": 0,
"onePred": 16,
"onePredOneSuc": 12,
"onePredTwoSuc": 2,
"oneSuccessor": 16,
"twoPred": 8,
"twoPredOneSuc": 2,
"twoEach": 4,
"twoSuccessor": 8,
"morePreds": 0,
"BB03Phi": 0,
"BBHiPhi": 0,
"BBNoPhi": 29,
"BeginPhi": 0,
"BranchCount": 24,
"returnInt": 9,
"CriticalCount": 2,
"NumEdges": 32,
"const32Bit": 44,
"const64Bit": 41,
"numConstZeroes": 14,
"numConstOnes": 36,
"UncondBranches": 16,
"binaryConstArg": 13,
"NumAShrInst": 0,
"NumAddInst": 5,
"NumAllocaInst": 26,
"NumAndInst": 3,
"BlockMid": 5,
"BlockLow": 24,
"NumBitCastInst": 20,
"NumBrInst": 24,
"NumCallInst": 33,
"NumGetElementPtrInst": 5,
"NumICmpInst": 10,
"NumLShrInst": 3,
"NumLoadInst": 51,
"NumMulInst": 0,
"NumOrInst": 1,
"NumPHIInst": 0,
"NumRetInst": 5,
"NumSExtInst": 0,
"NumSelectInst": 0,
"NumShlInst": 0,
"NumStoreInst": 42,
"NumSubInst": 0,
"NumTruncInst": 1,
"NumXorInst": 8,
"NumZExtInst": 5,
"TotalBlocks": 29,
"TotalInsts": 242,
"TotalMemInst": 157,
"TotalFuncs": 15,
"ArgsPhi": 0,
"testUnary": 103,
}
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/autophase_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import numpy as np
import pytest
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# Instruction counts for cbench-v1/crc32 benchmark that are used for testing
# reward signals.
CRC32_INSTRUCTION_COUNT = 242
CRC32_INSTRUCTION_COUNT_AFTER_REG2MEM = 249
CRC32_INSTRUCTION_COUNT_O3 = 164
CRC32_INSTRUCTION_COUNT_OZ = 114
def test_instruction_count_reward(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
assert env.observation.IrInstructionCount() == CRC32_INSTRUCTION_COUNT
action = env.action_space.flags.index("-reg2mem")
env.step(action)
assert env.observation.IrInstructionCount() == CRC32_INSTRUCTION_COUNT_AFTER_REG2MEM
ic_diff = CRC32_INSTRUCTION_COUNT - CRC32_INSTRUCTION_COUNT_AFTER_REG2MEM
assert env.reward.IrInstructionCount() == ic_diff
assert env.reward.IrInstructionCountNorm() == ic_diff / CRC32_INSTRUCTION_COUNT
o3_improvement = CRC32_INSTRUCTION_COUNT - CRC32_INSTRUCTION_COUNT_O3
assert env.reward.IrInstructionCountO3() == ic_diff / o3_improvement
oz_improvement = CRC32_INSTRUCTION_COUNT - CRC32_INSTRUCTION_COUNT_OZ
assert env.reward.IrInstructionCountOz() == ic_diff / oz_improvement
def test_reward_space(env: LlvmEnv):
env.reward_space = "IrInstructionCount"
assert env.reward_space.name == "IrInstructionCount"
env.reward_space = None
assert env.reward_space is None
invalid = "invalid value"
with pytest.raises(LookupError) as ctx:
env.reward_space = invalid
assert str(ctx.value) == f"Reward space not found: {invalid}"
def test_invalid_reward_space_name(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
invalid = "invalid value"
with pytest.raises(KeyError) as ctx:
_ = env.reward[invalid]
assert str(ctx.value) == f"'{invalid}'"
def test_reward_spaces(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
assert set(env.reward.spaces.keys()) == {
"IrInstructionCount",
"IrInstructionCountNorm",
"IrInstructionCountO3",
"IrInstructionCountOz",
"ObjectTextSizeBytes",
"ObjectTextSizeNorm",
"ObjectTextSizeO3",
"ObjectTextSizeOz",
"TextSizeBytes",
"TextSizeNorm",
"TextSizeO3",
"TextSizeOz",
}
def test_instruction_count_reward_spaces(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
key = "IrInstructionCount"
space = env.reward.spaces[key]
assert str(space) == "IrInstructionCount"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert not space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "IrInstructionCountNorm"
space = env.reward.spaces[key]
assert str(space) == "IrInstructionCountNorm"
assert env.reward[key] == 0
assert space.range == (-np.inf, 1.0)
assert space.deterministic
assert not space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "IrInstructionCountO3"
space = env.reward.spaces[key]
assert str(space) == "IrInstructionCountO3"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert not space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
key = "IrInstructionCountOz"
space = env.reward.spaces[key]
assert str(space) == "IrInstructionCountOz"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert not space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
def test_object_text_size_reward_spaces(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
key = "ObjectTextSizeBytes"
space = env.reward.spaces[key]
assert str(space) == "ObjectTextSizeBytes"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "ObjectTextSizeNorm"
space = env.reward.spaces[key]
assert str(space) == "ObjectTextSizeNorm"
assert env.reward[key] == 0
assert space.range == (-np.inf, 1.0)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "ObjectTextSizeO3"
space = env.reward.spaces[key]
assert str(space) == "ObjectTextSizeO3"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
key = "ObjectTextSizeOz"
space = env.reward.spaces[key]
assert str(space) == "ObjectTextSizeOz"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
def test_text_size_reward_spaces(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
key = "TextSizeBytes"
space = env.reward.spaces[key]
assert str(space) == "TextSizeBytes"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "TextSizeNorm"
space = env.reward.spaces[key]
assert str(space) == "TextSizeNorm"
assert env.reward[key] == 0
assert space.range == (-np.inf, 1.0)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "TextSizeO3"
space = env.reward.spaces[key]
assert str(space) == "TextSizeO3"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
key = "TextSizeOz"
space = env.reward.spaces[key]
assert str(space) == "TextSizeOz"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/reward_spaces_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/llvm/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import tempfile
from pathlib import Path
import gym
import pytest
from compiler_gym import CompilerEnvState
from compiler_gym.envs import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_validate_state_no_reward():
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
commandline="opt input.bc -o output.bc",
)
with gym.make("llvm-v0") as env:
result = env.validate(state)
assert result.okay()
assert not result.reward_validated
assert str(result) == "✅ cbench-v1/crc32"
def test_validate_state_with_reward():
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
reward=0,
commandline="opt input.bc -o output.bc",
)
with gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
result = env.validate(state)
assert result.okay()
assert result.reward_validated
assert not result.reward_validation_failed
assert str(result) == "✅ cbench-v1/crc32 0.0000"
def test_validate_state_invalid_reward():
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
reward=1,
commandline="opt input.bc -o output.bc",
)
with gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
result = env.validate(state)
assert not result.okay()
assert result.reward_validated
assert result.reward_validation_failed
assert (
str(result) == "❌ cbench-v1/crc32 Expected reward 1.0 but received reward 0.0"
)
def test_validate_state_without_state_reward():
"""Validating state when state has no reward value."""
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
commandline="opt input.bc -o output.bc",
)
with gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
result = env.validate(state)
assert result.okay()
assert not result.reward_validated
assert not result.reward_validation_failed
def test_validate_state_without_env_reward():
"""Validating state when environment has no reward space."""
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
reward=0,
commandline="opt input.bc -o output.bc",
)
with gym.make("llvm-v0") as env:
with pytest.warns(
UserWarning,
match=(
"Validating state with reward, "
"but environment has no reward space set"
),
):
result = env.validate(state)
assert result.okay()
assert not result.reward_validated
assert not result.reward_validation_failed
def test_no_validation_callback_for_custom_benchmark(env: LlvmEnv):
"""Test that a custom benchmark has no validation callback."""
with tempfile.TemporaryDirectory() as d:
p = Path(d) / "example.c"
with open(p, "w") as f:
print("int main() {return 0;}", file=f)
benchmark = env.make_benchmark(p)
env.reset(benchmark=benchmark)
assert not env.benchmark.is_validatable()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/validate_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.