python_code
stringlengths
0
4.04M
repo_name
stringlengths
7
58
file_path
stringlengths
5
147
import sys from pathlib import Path project_root = Path(__file__).absolute().parent.parent.parent sys.path.insert(0, str(project_root)) import os import time import numpy as np import torch import torch.nn as nn from torch import optim import torch.nn.functional as F import torchvision.models as models from . import logger as log # from . import resnet as models from . import utils from cnn.mobilenet_imagenet import MobileNet from cnn.mobilenet_imagenet import Butterfly1x1Conv from cnn.shufflenet_imagenet import ShuffleNet try: from apex.parallel import DistributedDataParallel as DDP from apex.fp16_utils import * from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.") class ModelAndLoss(nn.Module): def __init__(self, arch, loss, pretrained_weights=None, cuda=True, fp16=False, width=1.0, n_struct_layers=0, struct='D', softmax_struct='D', sm_pooling=1, groups=8, shuffle='P'): super(ModelAndLoss, self).__init__() self.arch = arch print("=> creating model '{}'".format(arch)) # model = models.build_resnet(arch[0], arch[1]) if arch == 'mobilenetv1': model = MobileNet(width_mult=width, structure=[struct] * n_struct_layers, softmax_structure=softmax_struct, sm_pooling=sm_pooling) # if args.distilled_param_path: # model.load_state_dict(model.mixed_model_state_dict(args.full_model_path, args.distilled_param_path)) elif arch == 'shufflenetv1': model = ShuffleNet(width_mult=width, groups=groups, shuffle=shuffle) else: model = models.__dict__[arch]() if pretrained_weights is not None: print("=> using pre-trained model from a file '{}'".format(arch)) model.load_state_dict(pretrained_weights) if cuda: model = model.cuda() if fp16: model = network_to_half(model) # define loss function (criterion) and optimizer criterion = loss() if cuda: criterion = criterion.cuda() self.model = model self.loss = criterion def forward(self, data, target): output = self.model(data) if hasattr(self, '_teacher_model'): with torch.no_grad(): teacher_output = self._teacher_model(data) loss = self.loss(output, teacher_output, target) else: loss = self.loss(output, target) return loss, output def distributed(self): self.model = DDP(self.model) def load_model_state(self, state): if not state is None: self.model.load_state_dict(state) def get_optimizer(parameters, fp16, lr, momentum, structured_momentum, weight_decay, nesterov=False, state=None, static_loss_scale=1., dynamic_loss_scale=False, bn_weight_decay = False): if bn_weight_decay: print(" ! Weight decay applied to BN parameters ") structured_params = [v for n, v in parameters if getattr(v, '_is_structured', False)] no_wd_params = [v for n, v in parameters if getattr(v, '_no_wd', False)] unstructured_params = [v for n, v in parameters if not getattr(v, '_is_structured', False) and not getattr(v, '_no_wd', False)] params_dict = [{'params': structured_params, 'weight_decay': 0.0, 'momentum': structured_momentum}, {'params': no_wd_params, 'weight_decay': 0.0}, {'params': unstructured_params}] else: print(" ! Weight decay NOT applied to BN parameters ") structured_params = [v for n, v in parameters if getattr(v, '_is_structured', False)] no_wd_params = [v for n, v in parameters if getattr(v, '_no_wd', False) or 'bn' in n] unstructured_params = [v for n, v in parameters if not getattr(v, '_is_structured', False) and not getattr(v, '_no_wd', False) and 'bn' not in n] params_dict = [{'params': structured_params, 'weight_decay': 0.0, 'momentum': structured_momentum}, {'params': no_wd_params, 'weight_decay': 0.0}, {'params': unstructured_params}] optimizer = torch.optim.SGD(params_dict, lr, momentum=momentum, weight_decay=weight_decay, nesterov = nesterov) if fp16: optimizer = FP16_Optimizer(optimizer, static_loss_scale=static_loss_scale, dynamic_loss_scale=dynamic_loss_scale, verbose=False) if not state is None: optimizer.load_state_dict(state) return optimizer def lr_policy(lr_fn, logger=None): if logger is not None: logger.register_metric('lr', log.IterationMeter(), log_level=1) def _alr(optimizer, iteration, epoch): lr = lr_fn(iteration, epoch) if logger is not None: logger.log_metric('lr', lr) for param_group in optimizer.param_groups: param_group['lr'] = lr return _alr def lr_step_policy(base_lr, steps, decay_factor, warmup_length, epoch_length, logger=None): def _lr_fn(iteration, epoch): if epoch < warmup_length: # lr = base_lr * (epoch + 1) / warmup_length lr = base_lr * (1 + iteration + epoch * epoch_length) / (warmup_length * epoch_length) else: lr = base_lr for s in steps: if epoch >= s: lr *= decay_factor return lr return lr_policy(_lr_fn, logger=logger) def lr_linear_policy(base_lr, warmup_length, epochs, epoch_length, logger=None): def _lr_fn(iteration, epoch): if epoch < warmup_length: # lr = base_lr * (epoch + 1) / warmup_length lr = base_lr * (1 + iteration + epoch * epoch_length) / (warmup_length * epoch_length) else: e = epoch - warmup_length es = epochs - warmup_length lr = base_lr * (1-(e/es)) return lr return lr_policy(_lr_fn, logger=logger) def lr_cosine_policy(base_lr, warmup_length, epochs, epoch_length, logger=None): def _lr_fn(iteration, epoch): if epoch < warmup_length: # lr = base_lr * (epoch + 1) / warmup_length lr = base_lr * (1 + iteration + epoch * epoch_length) / (warmup_length * epoch_length) else: e = epoch - warmup_length es = epochs - warmup_length lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr return lr return lr_policy(_lr_fn, logger=logger) def lr_exponential_policy(base_lr, warmup_length, epochs, final_multiplier=0.001, logger=None): es = epochs - warmup_length epoch_decay = np.power(2, np.log2(final_multiplier)/es) def _lr_fn(iteration, epoch): if epoch < warmup_length: lr = base_lr * (epoch + 1) / warmup_length else: e = epoch - warmup_length lr = base_lr * (epoch_decay ** e) return lr return lr_policy(_lr_fn, logger=logger) def get_train_step(model_and_loss, optimizer, fp16, use_amp = False, batch_size_multiplier = 1): def _step(input, target, optimizer_step=True, sync_metrics=False): input_var = input target_var = target loss, output = model_and_loss(input_var, target_var) # prec1, prec5 = torch.zeros(1), torch.zeros(1) prec1, prec5 = utils.accuracy(output.data, target, topk=(1, 5)) reduced_loss = loss.data # For best performance, it doesn't make sense to sync these metrics every # iteration, since they incur an allreduce and some host<->device syncs. if sync_metrics: if torch.distributed.is_initialized(): reduced_loss = utils.reduce_tensor(loss.data) prec1 = utils.reduce_tensor(prec1) prec5 = utils.reduce_tensor(prec5) else: reduced_loss = loss.data if fp16: optimizer.backward(loss) elif use_amp: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if optimizer_step: opt = optimizer.optimizer if isinstance(optimizer, FP16_Optimizer) else optimizer for param_group in opt.param_groups: for param in param_group['params']: param.grad /= batch_size_multiplier optimizer.step() optimizer.zero_grad() torch.cuda.synchronize() return reduced_loss, prec1, prec5 return _step def train(train_loader, model_and_loss, optimizer, lr_scheduler, fp16, logger, epoch, print_freq, use_amp=False, prof=-1, batch_size_multiplier=1, register_metrics=True): if register_metrics and logger is not None: logger.register_metric('train.top1', log.AverageMeter(), log_level = 0) logger.register_metric('train.top5', log.AverageMeter(), log_level = 0) logger.register_metric('train.loss', log.AverageMeter(), log_level = 0) logger.register_metric('train.total_time', log.AverageMeter(), log_level=0) logger.register_metric('train.compute_ips', log.AverageMeter(), log_level=1) logger.register_metric('train.total_ips', log.AverageMeter(), log_level=0) logger.register_metric('train.data_time', log.AverageMeter(), log_level=1) logger.register_metric('train.compute_time', log.AverageMeter(), log_level=1) step = get_train_step(model_and_loss, optimizer, fp16, use_amp = use_amp, batch_size_multiplier = batch_size_multiplier) model_and_loss.train() end = time.time() optimizer.zero_grad() data_iter = enumerate(train_loader) if logger is not None: data_iter = logger.iteration_generator_wrapper(data_iter) for i, (input, target) in data_iter: bs = input.size(0) lr_scheduler(optimizer, i, epoch) data_time = time.time() - end if prof > 0: if i >= prof: break optimizer_step = ((i + 1) % batch_size_multiplier) == 0 loss, prec1, prec5 = step(input, target, optimizer_step = optimizer_step, sync_metrics=i % print_freq == 0) it_time = time.time() - end if logger is not None: logger.log_metric('train.top1', to_python_float(prec1)) logger.log_metric('train.top5', to_python_float(prec5)) logger.log_metric('train.loss', to_python_float(loss)) logger.log_metric('train.total_time', it_time) logger.log_metric('train.compute_ips', calc_ips(bs, it_time - data_time)) logger.log_metric('train.total_ips', calc_ips(bs, it_time)) logger.log_metric('train.data_time', data_time) logger.log_metric('train.compute_time', it_time - data_time) end = time.time() def get_val_step(model_and_loss): def _step(input, target): input_var = input target_var = target with torch.no_grad(): loss, output = model_and_loss(input_var, target_var) prec1, prec5 = utils.accuracy(output.data, target, topk=(1, 5)) if torch.distributed.is_initialized(): reduced_loss = utils.reduce_tensor(loss.data) prec1 = utils.reduce_tensor(prec1) prec5 = utils.reduce_tensor(prec5) else: reduced_loss = loss.data torch.cuda.synchronize() return reduced_loss, prec1, prec5 return _step def validate(val_loader, model_and_loss, fp16, logger, epoch, prof=-1, register_metrics=True): if register_metrics and logger is not None: logger.register_metric('val.loss', log.AverageMeter(), log_level = 0) logger.register_metric('val.top1', log.AverageMeter(), log_level = 0) logger.register_metric('val.top5', log.AverageMeter(), log_level = 0) logger.register_metric('val.compute_ips', log.AverageMeter(), log_level = 1) logger.register_metric('val.total_ips', log.AverageMeter(), log_level = 1) logger.register_metric('val.data_time', log.AverageMeter(), log_level = 1) logger.register_metric('val.compute_time', log.AverageMeter(), log_level = 1) step = get_val_step(model_and_loss) top1 = log.AverageMeter() # switch to evaluate mode model_and_loss.eval() end = time.time() data_iter = enumerate(val_loader) if not logger is None: data_iter = logger.iteration_generator_wrapper(data_iter, val=True) for i, (input, target) in data_iter: bs = input.size(0) data_time = time.time() - end if prof > 0: if i > prof: break loss, prec1, prec5 = step(input, target) it_time = time.time() - end top1.record(to_python_float(prec1), bs) if logger is not None: logger.log_metric('val.top1', to_python_float(prec1)) logger.log_metric('val.top5', to_python_float(prec5)) logger.log_metric('val.loss', to_python_float(loss)) logger.log_metric('val.compute_ips', calc_ips(bs, it_time - data_time)) logger.log_metric('val.total_ips', calc_ips(bs, it_time)) logger.log_metric('val.data_time', data_time) logger.log_metric('val.compute_time', it_time - data_time) end = time.time() return top1.get_val() # Train loop {{{ def calc_ips(batch_size, time): world_size = torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1 tbs = world_size * batch_size return tbs/time def train_loop(model_and_loss, optimizer, lr_scheduler, train_loader, val_loader, epochs, fp16, logger, should_backup_checkpoint, print_freq, use_amp=False, batch_size_multiplier = 1, best_prec1 = 0, start_epoch = 0, prof = -1, skip_training = False, skip_validation = False, save_checkpoints = True, checkpoint_dir='./'): prec1 = -1 epoch_iter = range(start_epoch, epochs) if logger is not None: epoch_iter = logger.epoch_generator_wrapper(epoch_iter) for epoch in epoch_iter: if not skip_training: train(train_loader, model_and_loss, optimizer, lr_scheduler, fp16, logger, epoch, print_freq, use_amp = use_amp, prof = prof, register_metrics=epoch==start_epoch, batch_size_multiplier=batch_size_multiplier) if not skip_validation: prec1 = validate(val_loader, model_and_loss, fp16, logger, epoch, prof = prof, register_metrics=epoch==start_epoch) if save_checkpoints and (not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0): is_best = prec1 > best_prec1 best_prec1 = max(prec1, best_prec1) if should_backup_checkpoint(epoch): backup_filename = 'checkpoint-{}.pth.tar'.format(epoch + 1) else: backup_filename = None utils.save_checkpoint({ 'epoch': epoch + 1, 'arch': model_and_loss.arch, 'state_dict': model_and_loss.model.state_dict(), 'best_prec1': best_prec1, 'optimizer' : optimizer.state_dict(), }, is_best, checkpoint_dir=checkpoint_dir, backup_filename=backup_filename) # }}} def get_input_cov(model, train_loader, layer_names, max_batches=None): model = model.float() # hook to capture intermediate inputs def hook(module, input): x, = input b, c, h, w = x.shape x = x.permute(0, 2, 3, 1).reshape(b * h * w, c) if not hasattr(module, '_count'): module._count = 1 else: module._count += 1 # Compute the first moment E[X], averaged over batches. current_mean = x.mean(dim=0) if not hasattr(module, '_mean'): module._mean = current_mean else: module._mean += (current_mean - module._mean) / module._count # Compute the covariance (actually 2nd moment) E[X^T X], averaged over batches. current_cov = (x.t() @ x) / x.shape[0] if not hasattr(module, '_cov'): module._cov = current_cov else: module._cov += (current_cov - module._cov) / module._count module_dict = dict(model.named_modules()) hook_handles = [] for layer_name in layer_names: hook_handles.append(module_dict[layer_name].register_forward_pre_hook(hook)) model.eval() data_iter = enumerate(train_loader) for batch_idx, (input, _) in data_iter: with torch.no_grad(): model(input) if max_batches is not None and batch_idx >= max_batches: # DALI iterator doesn't support resetting before finishing the epoch # if hasattr(train_loader, 'dalipipeline'): # train_loader.dalipipeline.reset() break for h in hook_handles: h.remove() # mean = {layer_name + '.mean': module_dict[layer_name]._mean for layer_name in layer_names} cov = {layer_name: module_dict[layer_name]._cov for layer_name in layer_names} if torch.distributed.is_initialized(): cov = {layer_name: utils.reduce_tensor(c.data) for layer_name, c in cov.items()} return cov def butterfly_projection_cov(teacher_module, input_cov, butterfly_structure='odo_1', n_Adam_steps=20000, n_LBFGS_steps=50): teacher_module = teacher_module.float() input_cov = input_cov.float() try: in_channels = teacher_module.in_channels out_channels = teacher_module.out_channels except: raise ValueError("Only convolutional layers currently supported.") param = butterfly_structure.split('_')[0] nblocks = 0 if len(butterfly_structure.split('_')) <= 1 else int(butterfly_structure.split('_')[1]) residual = False if len(butterfly_structure.split('_')) <= 2 else (butterfly_structure.split('_')[2] == 'res') student_module = Butterfly1x1Conv(in_channels, out_channels, bias=False, tied_weight=False, ortho_init=True, param=param, nblocks=nblocks) student_module = student_module.to(input_cov.device) with torch.no_grad(): try: # torch.symeig sometimes fail to converge on CUDA Sigma, U = torch.symeig(input_cov, eigenvectors=True) except: # Move to CPU and use numpy's function Sigma, U = np.linalg.eigh(input_cov.cpu().numpy()) Sigma = torch.tensor(Sigma, dtype=input_cov.dtype, device=input_cov.device) U = torch.tensor(U, dtype=input_cov.dtype, device=input_cov.device) Sigma = Sigma.clamp(0) # avoid small negative eigenvalues input = torch.diag(Sigma.sqrt()) @ U.t() input = input.reshape(in_channels, in_channels, 1, 1) # To be compatible with conv2d target = teacher_module(input) # Normalize input so that output has MSE 1.0 input /= (target ** 2).mean().sqrt() target = teacher_module(input) def loss_fn(): output = student_module(input) if residual: if output.shape[1] == 2 * input.shape[1]: b, c, h, w = input.shape output = (output.reshape(b, 2, c, h, w) + input.reshape(b, 1, c, h, w)).reshape(b, 2 * c, h, w) else: output = output + input return F.mse_loss(output, target) optimizer = optim.Adam(student_module.parameters()) student_module.train() for _ in range(n_Adam_steps): optimizer.zero_grad() loss = loss_fn() loss.backward() optimizer.step() optimizer = optim.LBFGS(filter(lambda p: p.requires_grad, student_module.parameters()), tolerance_grad=1e-7, # Pytorch 1.2 sets this too high https://github.com/pytorch/pytorch/pull/25240 line_search_fn='strong_wolfe') def closure(): optimizer.zero_grad() loss = loss_fn() loss.backward() return loss for i in range(n_LBFGS_steps): loss = optimizer.step(closure) if torch.distributed.is_initialized(): # Get the model from the process with the lowest loss. # Losses could be different due to different initialization of student_module. all_losses = [torch.empty_like(loss) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(all_losses, loss) best_rank = min(range(len(all_losses)), key=lambda i: all_losses[i]) loss = all_losses[best_rank] for p in student_module.parameters(): torch.distributed.broadcast(p, best_rank) return student_module, loss.item()
butterfly-master
cnn/imagenet/training.py
import os, sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import math import unittest import torch from butterfly.permutation_multiply import permutation_mult_torch, permutation_mult from butterfly.permutation_multiply import permutation_mult_single_factor_torch, permutation_mult_single class PermutationMultTest(unittest.TestCase): def setUp(self): self.rtol = 1e-3 self.atol = 1e-5 def test_permutation_cpu(self): batch_size = 10 n = 4096 m = int(math.log2(n)) prob = torch.rand(m - 1, 3, requires_grad=True) for complex in [False, True]: for increasing_stride in [False, True]: input = torch.randn((batch_size, n) + (() if not complex else (2, )), requires_grad=True) output = permutation_mult(prob, input) output_torch = permutation_mult_torch(prob, input) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), (complex, (output - output_torch).abs().max().item())) grad = torch.randn_like(output_torch) d_prob, d_input = torch.autograd.grad(output, (prob, input), grad, retain_graph=True) d_prob_torch, d_input_torch = torch.autograd.grad(output_torch, (prob, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), (complex, (d_input - d_input_torch).abs().max().item())) # print((d_prob - d_prob_torch) / d_prob_torch) self.assertTrue(torch.allclose(d_prob, d_prob_torch, rtol=self.rtol, atol=self.atol), (complex, ((d_prob - d_prob_torch) / d_prob_torch).abs().max().item())) @unittest.skipIf(not torch.cuda.is_available(), "need CUDA") def test_permutation_cuda(self): batch_size = 10 n = 4096 m = int(math.log2(n)) prob = torch.rand(m - 1, 3, device='cuda', requires_grad=True) for complex in [False, True]: for increasing_stride in [False, True]: input = torch.randn((batch_size, n) + (() if not complex else (2, )), device=prob.device, requires_grad=True) output = permutation_mult(prob, input) output_torch = permutation_mult_torch(prob, input) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), (complex, (output - output_torch).abs().max().item())) grad = torch.randn_like(output_torch) d_prob, d_input = torch.autograd.grad(output, (prob, input), grad, retain_graph=True) d_prob_torch, d_input_torch = torch.autograd.grad(output_torch, (prob, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), (complex, (d_input - d_input_torch).abs().max().item())) # print((d_prob - d_prob_torch) / d_prob_torch) self.assertTrue(torch.allclose(d_prob, d_prob_torch, rtol=self.rtol, atol=self.atol), (complex, ((d_prob - d_prob_torch) / d_prob_torch).abs().max().item())) def test_permutation_single_cpu(self): batch_size = 10 n = 4096 m = int(math.log2(n)) prob = torch.rand(3, requires_grad=True) for complex in [False, True]: input = torch.randn((batch_size, n) + (() if not complex else (2, )), requires_grad=True) output = permutation_mult_single(prob, input) output_torch = permutation_mult_single_factor_torch(prob, input) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), (complex, (output - output_torch).abs().max().item())) grad = torch.randn_like(output_torch) d_prob, d_input = torch.autograd.grad(output, (prob, input), grad, retain_graph=True) d_prob_torch, d_input_torch = torch.autograd.grad(output_torch, (prob, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), (complex, (d_input - d_input_torch).abs().max().item())) # print((d_prob - d_prob_torch) / d_prob_torch) self.assertTrue(torch.allclose(d_prob, d_prob_torch, rtol=self.rtol, atol=self.atol), (complex, ((d_prob - d_prob_torch) / d_prob_torch).abs().max().item())) @unittest.skipIf(not torch.cuda.is_available(), "need CUDA") def test_permutation_single_cuda(self): batch_size = 10 n = 4096 m = int(math.log2(n)) prob = torch.rand(3, device='cuda', requires_grad=True) for complex in [False, True]: input = torch.randn((batch_size, n) + (() if not complex else (2, )), device=prob.device, requires_grad=True) output = permutation_mult_single(prob, input) output_torch = permutation_mult_single_factor_torch(prob, input) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), (complex, (output - output_torch).abs().max().item())) grad = torch.randn_like(output_torch) d_prob, d_input = torch.autograd.grad(output, (prob, input), grad, retain_graph=True) d_prob_torch, d_input_torch = torch.autograd.grad(output_torch, (prob, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), (complex, (d_input - d_input_torch).abs().max().item())) # print((d_prob - d_prob_torch) / d_prob_torch) self.assertTrue(torch.allclose(d_prob, d_prob_torch, rtol=self.rtol, atol=self.atol), (complex, ((d_prob - d_prob_torch) / d_prob_torch).abs().max().item())) if __name__ == "__main__": unittest.main()
butterfly-master
tests_old/test_permutation_multiply.py
import os, sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import math import unittest import numpy as np import torch from butterfly import Butterfly from butterfly.butterfly import ButterflyBmm from butterfly.butterfly_multiply import butterfly_ortho_mult_tied class ButterflyTest(unittest.TestCase): def test_butterfly(self): batch_size = 10 for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for in_size, out_size in [(7, 15), (15, 7)]: for complex in [False, True]: for tied_weight in [True, False]: for increasing_stride in [True, False]: for ortho_init in [False, True]: for param in ['regular'] if complex else ['regular', 'ortho', 'odo', 'obdobt']: for nblocks in [0, 1, 2, 3] if param in ['regular', 'ortho', 'odo', 'obdobt'] else [0]: for expansion in [1, 2]: if param in ['obdobt'] and tied_weight: continue if nblocks > 0 and complex: continue if not (nblocks > 0 and tied_weight and param in ['odo']): # Special case if nblocks > 0 and (tied_weight or param not in ['regular', 'ortho', 'odo', 'obdobt']): continue b = Butterfly(in_size, out_size, True, complex, tied_weight, increasing_stride, ortho_init, param, nblocks=nblocks, expansion=expansion).to(device) input = torch.randn((batch_size, in_size) + (() if not complex else (2,)), device=device) output = b(input) self.assertTrue(output.shape == (batch_size, out_size) + (() if not complex else (2,)), (output.shape, device, (in_size, out_size), complex, tied_weight, ortho_init, nblocks)) if ortho_init and param == 'regular': twiddle_np = b.twiddle.detach().to('cpu').numpy() if complex: twiddle_np = twiddle_np.view('complex64').squeeze(-1) twiddle_np = twiddle_np.reshape(-1, 2, 2) twiddle_norm = np.linalg.norm(twiddle_np, ord=2, axis=(1, 2)) self.assertTrue(np.allclose(twiddle_norm, 1), (twiddle_norm, device, (in_size, out_size), complex, tied_weight, ortho_init)) def test_butterfly_expansion(self): batch_size = 1 device = 'cpu' in_size, out_size = (16, 16) expansion = 4 b = Butterfly(in_size, out_size, bias=False, tied_weight=True, param='odo', expansion=expansion, diag_init='normal').to(device) input = torch.randn((batch_size, in_size), device=device) output = b(input) terms = [] for i in range(expansion): temp = butterfly_ortho_mult_tied(b.twiddle[[i]], input.unsqueeze(1), False) temp = temp * b.diag[i] temp = butterfly_ortho_mult_tied(b.twiddle1[[i]], temp, True) terms.append(temp) total = sum(terms) self.assertTrue(torch.allclose(output, total)) def test_butterfly_bmm(self): batch_size = 10 matrix_batch = 3 for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for in_size, out_size in [(7, 15), (15, 7)]: for complex in [False, True]: for tied_weight in [True, False]: for increasing_stride in [True, False]: for ortho_init in [False, True]: for param in ['regular'] if complex else ['regular', 'ortho', 'odo', 'obdobt']: for nblocks in [0, 1, 2, 3] if param in ['regular', 'ortho', 'odo', 'obdobt'] else [0]: for expansion in [1, 2]: if param in ['obdobt'] and tied_weight: continue if nblocks > 0 and complex: continue if not (nblocks > 0 and tied_weight and param in ['odo']): # Special case if nblocks > 0 and (tied_weight or param not in ['regular', 'ortho', 'odo', 'obdobt']): continue b_bmm = ButterflyBmm(in_size, out_size, matrix_batch, True, complex, tied_weight, increasing_stride, ortho_init, param, expansion=expansion).to(device) input = torch.randn((batch_size, matrix_batch, in_size) + (() if not complex else (2,)), device=device) output = b_bmm(input) self.assertTrue(output.shape == (batch_size, matrix_batch, out_size) + (() if not complex else (2,)), (output.shape, device, (in_size, out_size), complex, tied_weight, ortho_init)) # Check that the result is the same as looping over butterflies if param == 'regular': output_loop = [] for i in range(matrix_batch): b = Butterfly(in_size, out_size, True, complex, tied_weight, increasing_stride, ortho_init, expansion=expansion) b.twiddle = torch.nn.Parameter(b_bmm.twiddle[i * b_bmm.nstack:(i + 1) * b_bmm.nstack]) b.bias = torch.nn.Parameter(b_bmm.bias[i]) output_loop.append(b(input[:, i])) output_loop = torch.stack(output_loop, dim=1) self.assertTrue(torch.allclose(output, output_loop), ((output - output_loop).abs().max().item(), output.shape, device, (in_size, out_size), complex, tied_weight, ortho_init)) if ortho_init and param == 'regular': twiddle_np = b_bmm.twiddle.detach().to('cpu').numpy() if complex: twiddle_np = twiddle_np.view('complex64').squeeze(-1) twiddle_np = twiddle_np.reshape(-1, 2, 2) twiddle_norm = np.linalg.norm(twiddle_np, ord=2, axis=(1, 2)) self.assertTrue(np.allclose(twiddle_norm, 1), (twiddle_norm, device, (in_size, out_size), complex, tied_weight, ortho_init)) if __name__ == "__main__": unittest.main()
butterfly-master
tests_old/test_butterfly.py
import os, sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import math import unittest import torch from butterfly import Butterfly from butterfly.utils import twiddle_normal_to_fast_format from butterfly.butterfly_multiply import butterfly_mult_torch, butterfly_mult, butterfly_mult_factors from butterfly.butterfly_multiply import butterfly_mult_untied_torch, butterfly_mult_untied from butterfly.butterfly_multiply import butterfly_ortho_mult_tied_torch, butterfly_ortho_mult_tied from butterfly.butterfly_multiply import butterfly_ortho_mult_untied_torch, butterfly_ortho_mult_untied from butterfly.butterfly_multiply import bbt_mult_untied_torch, bbt_mult_untied from butterfly.butterfly_multiply import bbt_ortho_mult_untied_torch, bbt_ortho_mult_untied from butterfly.butterfly_multiply import butterfly_mult_conv2d_torch, butterfly_mult_conv2d from butterfly.butterfly_multiply import bbt_mult_conv2d_torch, bbt_mult_conv2d from factor_multiply import butterfly_multiply_untied_eval from factor_multiply_fast import butterfly_multiply_untied_forward_fast from factor_multiply_fast import butterfly_multiply_untied_forward_max5_fast from factor_multiply_fast import butterfly_multiply_untied_forward_backward_fast from factor_multiply_fast import butterfly_multiply_untied_forward_backward_max5_fast from factor_multiply_fast import butterfly_bbs_multiply_untied_forward_fast from factor_multiply_fast import butterfly_bbs_multiply_untied_forward_backward_fast from factor_multiply_fast import butterfly_ortho_multiply_untied_forward_fast from factor_multiply_fast import butterfly_ortho_multiply_untied_backward_fast from factor_multiply_fast import butterfly_odo_multiply_untied_forward_fast from factor_multiply_fast import butterfly_odo_multiply_untied_backward_fast from factor_multiply_fast import butterfly_odo_multiply_untied_forward_backward_fast class ButterflyMultTest(unittest.TestCase): def setUp(self): self.rtol = 1e-3 self.atol = 1e-5 def test_butterfly(self): batch_size = 10 n = 4096 nstack = 2 for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for complex in [False, True]: for increasing_stride in [True, False]: scaling = 1 / math.sqrt(2) if not complex else 1 / 2 twiddle = torch.randn((nstack, n - 1, 2, 2) + (() if not complex else (2, )), requires_grad=True, device=device) * scaling input = torch.randn((batch_size, nstack, n) + (() if not complex else (2, )), requires_grad=True, device=twiddle.device) output = butterfly_mult(twiddle, input, increasing_stride) output_torch = butterfly_mult_torch(twiddle, input, increasing_stride) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), ((output - output_torch).abs().max().item(), device, complex, increasing_stride)) grad = torch.randn_like(output_torch) d_twiddle, d_input = torch.autograd.grad(output, (twiddle, input), grad, retain_graph=True) d_twiddle_torch, d_input_torch = torch.autograd.grad(output_torch, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), ((d_input - d_input_torch).abs().max().item(), device, complex, increasing_stride)) # print((d_twiddle - d_twiddle_torch) / d_twiddle_torch) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_torch, rtol=self.rtol, atol=self.atol), (((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().max().item(), device, complex, increasing_stride)) def test_butterfly_untied(self): for batch_size, n in [(10, 4096), (8192, 256)]: # Test size smaller than 1024 and large batch size for race conditions m = int(math.log2(n)) nstack = 2 for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for complex in [False, True]: for increasing_stride in [True, False]: if batch_size > 1024 and (device == 'cpu' or complex): continue scaling = 1 / math.sqrt(2) if not complex else 1 / 2 twiddle = torch.randn((nstack, m, n // 2, 2, 2) + (() if not complex else (2, )), requires_grad=True, device=device) * scaling input = torch.randn((batch_size, nstack, n) + (() if not complex else (2, )), requires_grad=True, device=twiddle.device) output = butterfly_mult_untied(twiddle, input, increasing_stride) output_torch = butterfly_mult_untied_torch(twiddle, input, increasing_stride) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), ((output - output_torch).abs().max().item(), device, complex, increasing_stride)) grad = torch.randn_like(output_torch) d_twiddle, d_input = torch.autograd.grad(output, (twiddle, input), grad, retain_graph=True) d_twiddle_torch, d_input_torch = torch.autograd.grad(output_torch, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), ((d_input - d_input_torch).abs().max().item(), device, complex, increasing_stride)) # if device == 'cuda' and batch_size > 1024 and not complex and increasing_stride: # print((d_twiddle - d_twiddle_torch).abs().mean(dim=(0, 2, 3, 4))) # print(((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().mean(dim=(0, 2, 3, 4))) # i = ((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().argmax() # print(d_twiddle.flatten()[i]) # print(d_twiddle_torch.flatten()[i]) # print(d_twiddle.flatten()[i-5:i+5]) # print(d_twiddle_torch.flatten()[i-5:i+5]) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_torch, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), (((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().max().item(), (batch_size, n), device, complex, increasing_stride)) def test_butterfly_untied_eval(self): for batch_size, n in [(1, 256), (2, 512), (8, 512), (10, 512)]: m = int(math.log2(n)) nstack = 2 for device in ['cpu']: for complex in [ True]: for increasing_stride in [True, False]: scaling = 1 / math.sqrt(2) twiddle = torch.randn((nstack, m, n // 2, 2, 2), requires_grad=True, device=device) * scaling input = torch.randn((batch_size, nstack, n), requires_grad=True, device=twiddle.device) output = butterfly_multiply_untied_eval(twiddle, input, increasing_stride) output_torch = butterfly_mult_untied_torch(twiddle, input, increasing_stride) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), ((output - output_torch).abs().max().item(), device, complex, increasing_stride)) def test_butterfly_ortho_tied(self): for batch_size, n in [(10, 4096), (8192, 256)]: # Test size smaller than 1024 and large batch size for race conditions m = int(math.log2(n)) nstack = 2 for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for increasing_stride in [True, False]: if batch_size > 1024 and (device == 'cpu'): continue twiddle = torch.rand((nstack, n - 1), requires_grad=True, device=device) * 2 * math.pi input = torch.randn((batch_size, nstack, n), requires_grad=True, device=twiddle.device) output = butterfly_ortho_mult_tied(twiddle, input, increasing_stride) output_torch = butterfly_ortho_mult_tied_torch(twiddle, input, increasing_stride) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), ((output - output_torch).abs().max().item(), device, increasing_stride)) grad = torch.randn_like(output_torch) d_twiddle, d_input = torch.autograd.grad(output, (twiddle, input), grad, retain_graph=True) d_twiddle_torch, d_input_torch = torch.autograd.grad(output_torch, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), ((d_input - d_input_torch).abs().max().item(), device, increasing_stride)) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_torch, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), (((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().max().item(), (batch_size, n), device, increasing_stride)) def test_butterfly_ortho_untied(self): for batch_size, n in [(10, 4096), (8192, 256)]: # Test size smaller than 1024 and large batch size for race conditions m = int(math.log2(n)) nstack = 2 for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for increasing_stride in [True, False]: if batch_size > 1024 and (device == 'cpu'): continue twiddle = torch.rand((nstack, m, n // 2), requires_grad=True, device=device) * 2 * math.pi input = torch.randn((batch_size, nstack, n), requires_grad=True, device=twiddle.device) output = butterfly_ortho_mult_untied(twiddle, input, increasing_stride) output_torch = butterfly_ortho_mult_untied_torch(twiddle, input, increasing_stride) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), ((output - output_torch).abs().max().item(), device, increasing_stride)) grad = torch.randn_like(output_torch) d_twiddle, d_input = torch.autograd.grad(output, (twiddle, input), grad, retain_graph=True) d_twiddle_torch, d_input_torch = torch.autograd.grad(output_torch, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), ((d_input - d_input_torch).abs().max().item(), device, increasing_stride)) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_torch, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), (((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().max().item(), (batch_size, n), device, increasing_stride)) def test_bbt_untied(self): for batch_size, n in [(2048, 512), (10, 4096)]: for nblocks in list(range(1, 4)) + [10, 14]: # Test nblocks >= 7 m = int(math.log2(n)) nstack = 2 for device in ([] if not torch.cuda.is_available() else ['cuda']) + ['cpu']: if batch_size > 1024 and device == 'cpu': continue scaling = 1 / 2 twiddle = torch.randn((nstack, nblocks * 2 * m, n // 2, 2, 2), requires_grad=True, device=device) * scaling input = torch.randn((batch_size, nstack, n), requires_grad=True, device=twiddle.device) output = bbt_mult_untied(twiddle, input) output_torch = bbt_mult_untied_torch(twiddle, input) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), ((output - output_torch).abs().max().item(), nblocks, device)) grad = torch.randn_like(output_torch) d_twiddle, d_input = torch.autograd.grad(output, (twiddle, input), grad, retain_graph=True) d_twiddle_torch, d_input_torch = torch.autograd.grad(output_torch, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), ((d_input - d_input_torch).abs().max().item(), nblocks, device)) # if device == 'cuda' and batch_size > 1024 and not complex and increasing_stride: # print((d_twiddle - d_twiddle_torch).abs().mean(dim=(0, 2, 3, 4))) # print(((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().mean(dim=(0, 2, 3, 4))) # i = ((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().argmax() # print(d_twiddle.flatten()[i]) # print(d_twiddle_torch.flatten()[i]) # print(d_twiddle.flatten()[i-5:i+5]) # print(d_twiddle_torch.flatten()[i-5:i+5]) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_torch, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), (((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().max().item(), (batch_size, n), nblocks, device)) def test_bbt_ortho_untied(self): for batch_size, n in [(2048, 512), (10, 4096)]: for nblocks in list(range(1, 4)) + [10, 14]: # Test nblocks >= 7 m = int(math.log2(n)) nstack = 2 for device in ([] if not torch.cuda.is_available() else ['cuda']) + ['cpu']: if batch_size > 1024 and device == 'cpu': continue twiddle = torch.rand((nstack, nblocks * 2 * m, n // 2), requires_grad=True, device=device) * 2 * math.pi input = torch.randn((batch_size, nstack, n), requires_grad=True, device=twiddle.device) output = bbt_ortho_mult_untied(twiddle, input) output_torch = bbt_ortho_mult_untied_torch(twiddle, input) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), ((output - output_torch).abs().max().item(), (batch_size, n), nblocks, device)) grad = torch.randn_like(output_torch) d_twiddle, d_input = torch.autograd.grad(output, (twiddle, input), grad, retain_graph=True) d_twiddle_torch, d_input_torch = torch.autograd.grad(output_torch, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), ((d_input - d_input_torch).abs().max().item(), (batch_size, n), nblocks, device)) # if device == 'cuda' and batch_size > 1024 and nblocks == 14: # print((d_twiddle - d_twiddle_torch).abs().mean(dim=(0, 2))) # print(((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().mean(dim=(0, 2))) # i = ((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().argmax() # print(d_twiddle.flatten()[i]) # print(d_twiddle_torch.flatten()[i]) # print(d_twiddle.flatten()[i-5:i+5]) # print(d_twiddle_torch.flatten()[i-5:i+5]) # Seems to fail for large nblocks because there's likely to be a d_twiddle that's really small. # I guess it's fine. self.assertTrue(torch.allclose(d_twiddle, d_twiddle_torch, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), (((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().max().item(), (batch_size, n), nblocks, device)) def test_butterfly_factors(self): batch_size = 10 n = 4096 nstack = 1 # Does not support nstack for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for complex in [False, True]: for increasing_stride in [True, False]: scaling = 1 / math.sqrt(2) if not complex else 1 / 2 twiddle = torch.randn((nstack, n - 1, 2, 2) + (() if not complex else (2, )), requires_grad=True, device=device) * scaling input = torch.randn((batch_size, nstack, n) + (() if not complex else (2, )), requires_grad=True, device=twiddle.device) output = butterfly_mult_factors(twiddle.squeeze(0), input.squeeze(1), increasing_stride=increasing_stride) output_torch = butterfly_mult_torch(twiddle, input, increasing_stride=increasing_stride).squeeze(1) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), ((output - output_torch).abs().max().item(), device, complex, increasing_stride)) grad = torch.randn_like(output_torch) d_twiddle, d_input = torch.autograd.grad(output, (twiddle, input), grad, retain_graph=True) d_twiddle_torch, d_input_torch = torch.autograd.grad(output_torch, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), ((d_input - d_input_torch).abs().max().item(), device, complex, increasing_stride)) # print((d_twiddle - d_twiddle_torch) / d_twiddle_torch) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_torch, rtol=self.rtol, atol=self.atol), (((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().max().item(), device, complex, increasing_stride)) def test_butterfly_conv2d(self): device = 'cuda' c_in = 256 kernel_size = 3 batch_size = 128 f_dim = 8 padding = 1 for c_out in [c_in, 2*c_in]: nstack = c_out // c_in * kernel_size * kernel_size m = int(math.log2(c_in)) for increasing_stride in [True, False]: scaling = 1 / math.sqrt(2) twiddle = torch.randn((nstack, m, c_in // 2, 2, 2), requires_grad=True, device=device) * scaling input_ = torch.randn(batch_size, c_in, f_dim, f_dim, requires_grad=True).to(device) # test forward pass output_torch = butterfly_mult_conv2d_torch(twiddle, input_, kernel_size, padding, increasing_stride) output = butterfly_mult_conv2d(twiddle, input_, kernel_size, padding, increasing_stride) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), ((output - output_torch).abs().max().item(), device, c_out, increasing_stride)) # test backward pass grad = torch.randn_like(output_torch) d_twiddle, d_input = torch.autograd.grad(output, (twiddle, input_), grad, retain_graph=True) d_twiddle_torch, d_input_torch = torch.autograd.grad(output_torch, (twiddle, input_), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), ((d_input - d_input_torch).abs().max().item(), device, c_out, increasing_stride)) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_torch, rtol=self.rtol * 10, atol=self.atol * 10), (((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().max().item(), device, c_out, increasing_stride)) def test_bbt_conv2d(self): device = 'cuda' c_in = 256 kernel_size = 3 batch_size = 128 f_dim = 8 padding = 1 for c_out in [c_in, 2*c_in]: nstack = c_out // c_in * kernel_size * kernel_size m = int(math.log2(c_in)) # for nblocks in list(range(1, 4)) + [10, 14]: # Test nblocks >= 7 for nblocks in list(range(1, 3)): # Test nblocks >= 7 scaling = 1 / math.sqrt(2) twiddle = torch.randn((nstack, nblocks * 2 * m, c_in // 2, 2, 2), requires_grad=True, device=device) * scaling input_ = torch.randn(batch_size, c_in, f_dim, f_dim, requires_grad=True).to(device) # test forward pass output_torch = bbt_mult_conv2d_torch(twiddle, input_, kernel_size, padding) output = bbt_mult_conv2d(twiddle, input_, kernel_size, padding) self.assertTrue(torch.allclose(output, output_torch, rtol=self.rtol, atol=self.atol), ((output - output_torch).abs().max().item(), device, nblocks, c_out)) # test backward pass grad = torch.randn_like(output_torch) d_twiddle, d_input = torch.autograd.grad(output, (twiddle, input_), grad, retain_graph=True) d_twiddle_torch, d_input_torch = torch.autograd.grad(output_torch, (twiddle, input_), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_torch, rtol=self.rtol, atol=self.atol), ((d_input - d_input_torch).abs().max().item(), device, nblocks, c_out)) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_torch, rtol=self.rtol * 10, atol=self.atol * 10), (((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().max().item(), device, nblocks, c_out)) def test_butterfly_untied_fast(self): for batch_size, n in [(2048, 512)]: m = int(math.log2(n)) nstack = 1 # for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for device in ['cuda']: # for complex in [False, True]: for complex in [False]: for increasing_stride in [True, False]: # for increasing_stride in [False]: if batch_size > 1024 and (device == 'cpu' or complex): continue scaling = 1 / math.sqrt(2) if not complex else 1 / 2 twiddle = torch.randn((nstack, m, n // 2, 2, 2) + (() if not complex else (2, )), requires_grad=True, device=device) * scaling # twiddle = torch.arange(2 * n, dtype=torch.float, device=device, requires_grad=True).reshape(n // 2, 2, 2).unsqueeze(0).repeat(m, 1, 1, 1).unsqueeze(0) twiddle_fast = twiddle_normal_to_fast_format(twiddle) if not increasing_stride: twiddle_fast = twiddle_fast.flip(1) input = torch.randn((batch_size, nstack, n) + (() if not complex else (2, )), requires_grad=True, device=twiddle.device) # input = torch.arange(n, dtype=torch.float, device=device, requires_grad=True).unsqueeze(0).unsqueeze(1).expand(batch_size, -1, -1) output = butterfly_multiply_untied_forward_fast(twiddle_fast, input, increasing_stride) # output_old = butterfly_mult_untied_torch(twiddle, input, increasing_stride) output_old = butterfly_mult_untied(twiddle, input, increasing_stride) self.assertTrue(torch.allclose(output, output_old, rtol=self.rtol, atol=self.atol), ((output - output_old).abs().max().item(), device, complex, increasing_stride)) if n > 4096: continue grad = torch.randn_like(output) d_twiddle, d_input = butterfly_multiply_untied_forward_backward_fast(twiddle_fast, input, grad, increasing_stride) # d_twiddle, d_input = torch.autograd.grad(output, (twiddle_fast, input), grad, retain_graph=True) d_twiddle_old, d_input_old = torch.autograd.grad(output_old, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_old, rtol=self.rtol, atol=self.atol), ((d_input - d_input_old).abs().max().item(), device, complex, increasing_stride)) # # if device == 'cuda' and batch_size > 1024 and not complex and increasing_stride: # # print((d_twiddle - d_twiddle_torch).abs().mean(dim=(0, 2, 3, 4))) # # print(((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().mean(dim=(0, 2, 3, 4))) # # i = ((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().argmax() # # print(d_twiddle.flatten()[i]) # # print(d_twiddle_torch.flatten()[i]) # # print(d_twiddle.flatten()[i-5:i+5]) # # print(d_twiddle_torch.flatten()[i-5:i+5]) d_twiddle_old = twiddle_normal_to_fast_format(d_twiddle_old) if not increasing_stride: d_twiddle_old = d_twiddle_old.flip(1) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_old, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), (((d_twiddle - d_twiddle_old) / d_twiddle_old).abs().max().item(), (batch_size, n), device, complex, increasing_stride)) def test_butterfly_untied_max5_fast(self): # for batch_size, n in [(2048, 256), (32768, 256)]: for batch_size, n in [(2048, 512)]: # for batch_size, n in [(32768, 2), (32768, 4), (32768, 8), (32768, 16), (32668, 32), (2048, 64), (2048, 128), (2048, 256), (2048, 512), (2048, 1024)]: # for batch_size, n in [(1, 512)]: m = int(math.log2(n)) nstack = 1 # for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for device in ['cuda']: # for complex in [False, True]: for complex in [False]: for increasing_stride in [True, False]: # for increasing_stride in [True]: if batch_size > 1024 and (device == 'cpu' or complex): continue scaling = 1 / math.sqrt(2) if not complex else 1 / 2 twiddle = torch.randn((nstack, m, n // 2, 2, 2) + (() if not complex else (2, )), requires_grad=True, device=device) * scaling # twiddle = torch.arange(2 * n, dtype=torch.float, device=device, requires_grad=True).reshape(n // 2, 2, 2).unsqueeze(0).repeat(m, 1, 1, 1).unsqueeze(0) # twiddle = torch.arange(m * n * 2, dtype=torch.float, device=device, requires_grad=True).reshape(m, n // 2, 2, 2).unsqueeze(0) # twiddle_fast = twiddle_normal_to_fast_format(twiddle) twiddle_fast = twiddle # twiddle_fast = torch.randn((nstack, m, 2, n), requires_grad=True, device=device) * scaling if not increasing_stride: twiddle_fast = twiddle_fast.flip(1) input = torch.randn((batch_size, nstack, n) + (() if not complex else (2, )), requires_grad=True, device=twiddle.device) # input = torch.randn((n, batch_size) + (() if not complex else (2, )), requires_grad=True, device=twiddle.device).t().unsqueeze(1) # input = torch.arange(n, dtype=torch.float, device=device, requires_grad=True).unsqueeze(0).unsqueeze(1).expand(batch_size, -1, -1) output = butterfly_multiply_untied_forward_max5_fast(twiddle_fast, input, increasing_stride) # output_reshape = output.reshape(batch_size * 16, 1, 32) # output_old = butterfly_mult_untied_torch(twiddle, input, increasing_stride) output_old = butterfly_mult_untied(twiddle, input, increasing_stride) # output_old = butterfly_mult_untied_torch(twiddle, input, increasing_stride, True)[1] self.assertTrue(torch.allclose(output, output_old, rtol=self.rtol, atol=self.atol), ((output - output_old).abs().max().item(), device, complex, increasing_stride)) grad = torch.randn_like(output) d_twiddle, d_input = butterfly_multiply_untied_forward_backward_max5_fast(twiddle_fast, input, grad, increasing_stride) # d_twiddle, d_input = torch.autograd.grad(output, (twiddle_fast, input), grad, retain_graph=True) d_twiddle_old, d_input_old = torch.autograd.grad(output_old, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_old, rtol=self.rtol, atol=self.atol), ((d_input - d_input_old).abs().max().item(), device, complex, increasing_stride)) # # if device == 'cuda' and batch_size > 1024 and not complex and increasing_stride: # # print((d_twiddle - d_twiddle_torch).abs().mean(dim=(0, 2, 3, 4))) # # print(((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().mean(dim=(0, 2, 3, 4))) # # i = ((d_twiddle - d_twiddle_torch) / d_twiddle_torch).abs().argmax() # # print(d_twiddle.flatten()[i]) # # print(d_twiddle_torch.flatten()[i]) # # print(d_twiddle.flatten()[i-5:i+5]) # # print(d_twiddle_torch.flatten()[i-5:i+5]) d_twiddle_old = twiddle_normal_to_fast_format(d_twiddle_old) if not increasing_stride: d_twiddle_old = d_twiddle_old.flip(1) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_old, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), (((d_twiddle - d_twiddle_old) / d_twiddle_old).abs().max().item(), (batch_size, n), device, complex, increasing_stride)) def test_butterfly_bbs_untied_fast(self): for batch_size, n in [(2048, 512)]: m = int(math.log2(n)) nstack = 1 nblocks = 3 # for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for device in ['cuda']: if batch_size > 1024 and (device == 'cpu'): continue scaling = 1 / math.sqrt(2) twiddle = torch.randn((nstack, nblocks * 2 * m, n // 2, 2, 2), requires_grad=True, device=device) * scaling # twiddle = torch.arange(16.0, requires_grad=True, device=device).view(nstack, nblocks * 2 * m, n // 2, 2, 2) input = torch.randn((batch_size, nstack, n), requires_grad=True, device=twiddle.device) # input = torch.arange(2.0, requires_grad=True, device=twiddle.device).view(batch_size, nstack, n) twiddle_fast = [] for i, chunk in enumerate(twiddle.chunk(nblocks * 2, dim=1)): chunk_fast = twiddle_normal_to_fast_format(chunk) if i % 2 == 0: chunk_fast = chunk_fast.flip(1) twiddle_fast.append(chunk_fast) twiddle_fast = torch.cat(twiddle_fast, dim=1) output = butterfly_bbs_multiply_untied_forward_fast(twiddle_fast, input) output_old = input for block in range(nblocks): output_old = butterfly_mult_untied(twiddle[:, block * 2 * m:(block * 2 + 1) * m], output_old, False) output_old = butterfly_mult_untied(twiddle[:, (block * 2 + 1) * m:(block + 1) * 2 * m], output_old, True) self.assertTrue(torch.allclose(output, output_old, rtol=self.rtol, atol=self.atol), ((output - output_old).abs().max().item(), device)) grad = torch.randn_like(output) # grad = input.clone() d_twiddle, d_input = butterfly_bbs_multiply_untied_forward_backward_fast(twiddle_fast, input, grad) # d_twiddle, d_input = torch.autograd.grad(output, (twiddle_fast, input), grad, retain_graph=True) d_twiddle_old, d_input_old = torch.autograd.grad(output_old, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_old, rtol=self.rtol, atol=self.atol), ((d_input - d_input_old).abs().max().item(), device)) d_twiddle_temp = [] for i, chunk in enumerate(d_twiddle_old.chunk(nblocks * 2, dim=1)): chunk_fast = twiddle_normal_to_fast_format(chunk) if i % 2 == 0: chunk_fast = chunk_fast.flip(1) d_twiddle_temp.append(chunk_fast) d_twiddle_old = torch.cat(d_twiddle_temp, dim=1) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_old, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), (((d_twiddle - d_twiddle_old) / d_twiddle_old).abs().max().item(), (batch_size, n), device)) def test_butterfly_bbs_untied_max5_fast(self): for batch_size, n in [(2048, 512)]: m = int(math.log2(n)) nstack = 1 nblocks = 3 # for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for device in ['cuda']: if batch_size > 1024 and (device == 'cpu'): continue scaling = 1 / math.sqrt(2) twiddle = torch.randn((nstack, nblocks * 2 * m, n // 2, 2, 2), requires_grad=True, device=device) * scaling # twiddle = torch.arange(16.0, requires_grad=True, device=device).view(nstack, nblocks * 2 * m, n // 2, 2, 2) input = torch.randn((batch_size, nstack, n), requires_grad=True, device=twiddle.device) # input = torch.arange(2.0, requires_grad=True, device=twiddle.device).view(batch_size, nstack, n) twiddle_fast = [] for i, chunk in enumerate(twiddle.chunk(nblocks * 2, dim=1)): chunk_fast = twiddle_normal_to_fast_format(chunk) if i % 2 == 0: chunk_fast = chunk_fast.flip(1) twiddle_fast.append(chunk_fast) twiddle_fast = torch.cat(twiddle_fast, dim=1) output = butterfly_multiply_untied_forward_max5_fast(twiddle_fast, input, False) output_old = input for block in range(nblocks): output_old = butterfly_mult_untied(twiddle[:, block * 2 * m:(block * 2 + 1) * m], output_old, False) output_old = butterfly_mult_untied(twiddle[:, (block * 2 + 1) * m:(block + 1) * 2 * m], output_old, True) self.assertTrue(torch.allclose(output, output_old, rtol=self.rtol, atol=self.atol), ((output - output_old).abs().max().item(), device)) grad = torch.randn_like(output) # grad = input.clone() d_twiddle, d_input = butterfly_multiply_untied_forward_backward_max5_fast(twiddle_fast, input, grad, False) # d_twiddle, d_input = torch.autograd.grad(output, (twiddle_fast, input), grad, retain_graph=True) d_twiddle_old, d_input_old = torch.autograd.grad(output_old, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_old, rtol=self.rtol, atol=self.atol), ((d_input - d_input_old).abs().max().item(), device)) d_twiddle_temp = [] for i, chunk in enumerate(d_twiddle_old.chunk(nblocks * 2, dim=1)): chunk_fast = twiddle_normal_to_fast_format(chunk) if i % 2 == 0: chunk_fast = chunk_fast.flip(1) d_twiddle_temp.append(chunk_fast) d_twiddle_old = torch.cat(d_twiddle_temp, dim=1) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_old, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), (((d_twiddle - d_twiddle_old) / d_twiddle_old).abs().max().item(), (batch_size, n), device)) def test_butterfly_ortho_untied_fast(self): for batch_size, n in [(2048, 4096)]: m = int(math.log2(n)) nstack = 1 # for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for device in ['cuda']: for increasing_stride in [True, False]: if batch_size > 1024 and (device == 'cpu'): continue twiddle = torch.rand((nstack, m, n // 2), requires_grad=True, device=device) * 2 * math.pi # twiddle = torch.ones((nstack, m, n // 2), requires_grad=True, device=device) * 2 * math.pi * 0.3 twiddle_fast = twiddle if increasing_stride else twiddle.flip(1) input = torch.randn((batch_size, nstack, n), requires_grad=True, device=twiddle.device) twiddle_fast_cos, twiddle_fast_sin = twiddle_fast.cos(), twiddle_fast.sin() output = butterfly_ortho_multiply_untied_forward_fast(twiddle_fast_cos, twiddle_fast_sin, input, increasing_stride) # output_old = butterfly_ortho_mult_untied_torch(twiddle, input) output_old = butterfly_ortho_mult_untied(twiddle, input, increasing_stride) self.assertTrue(torch.allclose(output, output_old, rtol=self.rtol, atol=self.atol), ((output - output_old).abs().max().item(), device, increasing_stride)) grad = torch.randn_like(output) d_twiddle, d_input = butterfly_ortho_multiply_untied_backward_fast(twiddle_fast_cos, twiddle_fast_sin, output, grad, increasing_stride) # d_twiddle, d_input = torch.autograd.grad(output, (twiddle_fast, input), grad, retain_graph=True) d_twiddle_old, d_input_old = torch.autograd.grad(output_old, (twiddle, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_old, rtol=self.rtol, atol=self.atol), ((d_input - d_input_old).abs().max().item(), device, increasing_stride)) if not increasing_stride: d_twiddle_old = d_twiddle_old.flip(1) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_old, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), (((d_twiddle - d_twiddle_old) / d_twiddle_old).abs().max().item(), (batch_size, n), device, increasing_stride)) def test_butterfly_odo_untied_fast(self): for batch_size, n in [(2048, 512)]: m = int(math.log2(n)) nstack = 1 nblocks = 4 # for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for device in ['cuda']: if batch_size > 1024 and (device == 'cpu'): continue twiddle = torch.rand((nstack, nblocks * 2 * m, n // 2), requires_grad=True, device=device) * 2 * math.pi # diagonal = torch.randn((nstack, nblocks, n), requires_grad=True, device=device) # Not numerically stable so we need diagonals to be away from zero diagonal = torch.rand((nstack, nblocks, n), requires_grad=True, device=device) + 0.1 # diagonal = torch.ones((nstack, nblocks, n), requires_grad=True, device=device) * 0.1 input = torch.randn((batch_size, nstack, n), requires_grad=True, device=twiddle.device) twiddle_fast_cos, twiddle_fast_sin = twiddle.cos(), twiddle.sin() output = butterfly_odo_multiply_untied_forward_fast(twiddle_fast_cos, twiddle_fast_sin, diagonal, input) # output_old = butterfly_odo_mult_untied_torch(twiddle, input) output_old = input for block in range(nblocks): output_old = butterfly_ortho_mult_untied(twiddle[:, block * 2 * m:(block * 2 + 1) * m].flip(1), output_old, False) output_old = output_old * diagonal[:, block] output_old = butterfly_ortho_mult_untied(twiddle[:, (block * 2 + 1) * m:(block + 1) * 2 * m], output_old, True) self.assertTrue(torch.allclose(output, output_old, rtol=self.rtol, atol=self.atol), ((output - output_old).abs().max().item(), device)) grad = torch.randn_like(output) # d_twiddle, d_diagonal, d_input = butterfly_odo_multiply_untied_backward_fast(twiddle_fast_cos, twiddle_fast_sin, # diagonal, output, grad) d_twiddle, d_diagonal, d_input = butterfly_odo_multiply_untied_forward_backward_fast(twiddle_fast_cos, twiddle_fast_sin, diagonal, input, grad) # d_twiddle, d_input = torch.autograd.grad(output, (twiddle_fast, input), grad, retain_graph=True) d_twiddle_old, d_diagonal_old, d_input_old = torch.autograd.grad(output_old, (twiddle, diagonal, input), grad, retain_graph=True) self.assertTrue(torch.allclose(d_input, d_input_old, rtol=self.rtol, atol=self.atol), ((d_input - d_input_old).abs().max().item(), device)) self.assertTrue(torch.allclose(d_diagonal, d_diagonal_old, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), ((d_diagonal - d_diagonal_old).abs().max().item(), device)) self.assertTrue(torch.allclose(d_twiddle, d_twiddle_old, rtol=self.rtol * (10 if batch_size > 1024 else 1), atol=self.atol * (10 if batch_size > 1024 else 1)), (((d_twiddle - d_twiddle_old) / d_twiddle_old).abs().max().item(), (batch_size, n), device)) if __name__ == "__main__": unittest.main()
butterfly-master
tests_old/test_butterfly_multiply.py
import os, sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import math import unittest import numpy as np import torch from butterfly.permutation import Permutation, FixedPermutation, PermutationFactor class PermutationTest(unittest.TestCase): def test_permutation(self): batch_size = 10 size = 16 for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for complex in [False, True]: for share_logit in [False, True]: for increasing_stride in [False, True]: perm = Permutation(size, share_logit, increasing_stride).to(device) input = torch.randn((batch_size, size) + (() if not complex else (2,)), device=device) output = perm(input) self.assertTrue(output.shape == (batch_size, size) + (() if not complex else (2,)), (output.shape, device, (size, size), complex, share_logit, increasing_stride)) self.assertTrue(perm.argmax().dtype == torch.int64) fixed_perm = FixedPermutation(perm.argmax()) output = fixed_perm(input) self.assertTrue(output.shape == (batch_size, size) + (() if not complex else (2,)), (output.shape, device, (size, size), complex, share_logit, increasing_stride)) def test_permutation_single(self): batch_size = 10 size = 16 for device in ['cpu'] + ([] if not torch.cuda.is_available() else ['cuda']): for complex in [False, True]: perm = PermutationFactor(size).to(device) input = torch.randn((batch_size, size) + (() if not complex else (2,)), device=device) output = perm(input) self.assertTrue(output.shape == (batch_size, size) + (() if not complex else (2,)), (output.shape, device, (size, size), complex)) self.assertTrue(perm.argmax().dtype == torch.int64) fixed_perm = FixedPermutation(perm.argmax()) output = fixed_perm(input) self.assertTrue(output.shape == (batch_size, size) + (() if not complex else (2,)), (output.shape, device, (size, size), complex)) if __name__ == "__main__": unittest.main()
butterfly-master
tests_old/test_permutation.py
import math import numpy as np import torch from torch import nn from torch.utils.dlpack import to_dlpack, from_dlpack # Check if cupy is available if torch.cuda.is_available(): use_cupy = True try: import cupy as cp except: use_cupy = False # import warnings # warnings.warn("Cupy isn't installed or isn't working properly. Will use Pytorch's complex matmul, which is slower.") else: use_cupy = False real_dtype_to_complex = {torch.float32: torch.complex64, torch.float64: torch.complex128} complex_torch_dtype_to_np = {torch.complex64: np.complex64, torch.complex128: np.complex128} if use_cupy: complex_np_dtype_to_real = {np.complex64: np.float32, np.complex128: np.float64, cp.dtype('complex64'): np.float32, cp.dtype('complex128'): np.float64} def torch2cp(tensor): # Need contiguous, or else it will error return cp.fromDlpack(to_dlpack(torch.view_as_real(tensor.cuda().contiguous()))).view( complex_torch_dtype_to_np[tensor.dtype]).squeeze(-1) def cp2torch(tensor): return torch.view_as_complex(from_dlpack(cp.ascontiguousarray(tensor)[..., None].view( complex_np_dtype_to_real[tensor.dtype]).toDlpack())) def complex_matmul_torch(X, Y): # return X.real @ Y.real - X.imag @ Y.imag + 1j * (X.real @ Y.imag + X.imag @ Y.real) return torch.view_as_complex(torch.stack([X.real @ Y.real - X.imag @ Y.imag, X.real @ Y.imag + X.imag @ Y.real], dim=-1)) class ComplexMatmul(torch.autograd.Function): @staticmethod def forward(ctx, X, Y): ctx.save_for_backward(X, Y) # return torch.view_as_complex(torch.stack([X.real @ Y.real - X.imag @ Y.imag, # X.real @ Y.imag + X.imag @ Y.real], dim=-1)) # return complex_matmul_torch(X, Y) if not X.is_cuda: return X @ Y else: return (cp2torch(torch2cp(X) @ torch2cp(Y)) if use_cupy else complex_matmul_torch(X, Y)) @staticmethod def backward(ctx, grad): X, Y = ctx.saved_tensors grad_X, grad_Y = None, None if ctx.needs_input_grad[0]: Y_t = Y.transpose(-1, -2) # grad_X = (grad @ Y_t.conj()).sum_to_size(*X.shape) # grad_X = torch.view_as_complex( # torch.stack([grad.real @ Y_t.real + grad.imag @ Y_t.imag, # -grad.real @ Y_t.imag + grad.imag @ Y_t.real], dim=-1) # ).sum_to_size(*X.shape) # grad_X = complex_matmul_torch(grad, Y_t.conj()).sum_to_size(*X.shape) if not Y.is_cuda: grad_X = (grad @ Y_t.conj()).sum_to_size(*X.shape) else: grad_X = (cp2torch(torch2cp(grad) @ torch2cp(Y_t.conj())) if use_cupy else complex_matmul_torch(grad, Y_t.conj())).sum_to_size(*X.shape) if ctx.needs_input_grad[1]: X_t = X.transpose(-1, -2) # grad_Y = (X_t.conj() @ grad).sum_to_size(*Y.shape) # grad_Y = torch.view_as_complex( # torch.stack([X_t.real @ grad.real + X_t.imag @ grad.imag, # X_t.real @ grad.imag - X_t.imag @ grad.real], dim=-1) # ).sum_to_size(*Y.shape) # grad_Y = complex_matmul_torch(X_t.conj(), grad).sum_to_size(*Y.shape) if not X.is_cuda: grad_Y = (X_t.conj() @ grad).sum_to_size(*Y.shape) else: grad_Y = (cp2torch(torch2cp(X_t.conj()) @ torch2cp(grad)) if use_cupy else complex_matmul_torch(X_t.conj(), grad)).sum_to_size(*Y.shape) return grad_X, grad_Y def complex_matmul(X, Y): return X @ Y if not X.is_complex() else ComplexMatmul.apply(X, Y) # Implement backward pass of real2complex explicitly to avoid annoying (but harmless) warning # "Casting complex values to real discards the imaginary part", as of Pytorch 1.7 class Real2ComplexFn(torch.autograd.Function): @staticmethod def forward(ctx, X): return X.to(real_dtype_to_complex[X.dtype]) @staticmethod def backward(ctx, grad): return grad.real real2complex = Real2ComplexFn.apply # nn.Module form just to support convenient use of nn.Sequential class Real2Complex(nn.Module): def forward(self, input): return real2complex(input) class Complex2Real(nn.Module): def forward(self, input): return input.real # Pytorch 1.7 doesn't have indexing_backward for complex so we have to write the backward # pass explicitly class IndexLastDim(torch.autograd.Function): @staticmethod def forward(ctx, X, permutation): ctx.save_for_backward(permutation) return X[..., permutation] @staticmethod def backward(ctx, grad): permutation, = ctx.saved_tensors output = torch.empty_like(grad) output[..., permutation] = grad return output, None index_last_dim = IndexLastDim.apply # Pytorch 1.7 doesn't support complex reshape backward for non-contiguous tensors (fixed in nightly) def complex_reshape(x, *shape): if not x.is_complex(): return x.reshape(*shape) else: return torch.view_as_complex(torch.view_as_real(x).reshape(*shape, 2)) class ComplexLinear(nn.Module): def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None: super().__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.empty(out_features, in_features, dtype=torch.complex64)) if bias: self.bias = nn.Parameter(torch.empty(out_features, dtype=torch.complex64)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self) -> None: nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) # Uniform random doesn't account for complex so the variance is larger by factor of sqrt(2) with torch.no_grad(): weight /= math.sqrt(2) if self.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) nn.init.uniform_(self.bias, -bound, bound) def forward(self, input: torch.Tensor) -> torch.Tensor: output = complex_reshape(input, -1, input.size(-1)) output = complex_matmul(output, self.weight.t()) output = output.reshape(*input.shape[:-1], output.shape[-1]) return output if self.bias is None else output + self.bias def extra_repr(self) -> str: return 'in_features={}, out_features={}, bias={}'.format( self.in_features, self.out_features, self.bias is not None )
butterfly-master
torch_butterfly/complex_utils.py
import math import numbers import torch from torch import nn import torch.nn.functional as F import torch_butterfly from torch_butterfly.multiply import butterfly_multiply from torch_butterfly.multiply import butterfly_multiply_torch from torch_butterfly.complex_utils import real_dtype_to_complex, complex_reshape from torch_butterfly.multiply_base4 import twiddle_base2_to_base4 class Butterfly(nn.Module): """Product of log N butterfly factors, each is a block 2x2 of diagonal matrices. Compatible with torch.nn.Linear. Parameters: in_size: size of input out_size: size of output bias: If set to False, the layer will not learn an additive bias. Default: ``True`` complex: whether complex or real increasing_stride: whether the first butterfly block will multiply with increasing stride (e.g. 1, 2, ..., n/2) or decreasing stride (e.g., n/2, n/4, ..., 1). init: a torch.Tensor, or 'randn', 'ortho', 'identity', 'fft_no_br', or 'ifft_no_br'. Whether the weight matrix should be initialized to from randn twiddle, or to be randomly orthogonal/unitary, or to be the identity matrix, or the normalized FFT/iFFT twiddle (without the bit-reversal permutation). nblocks: number of B or B^T blocks. The B and B^T will alternate. """ def __init__(self, in_size, out_size, bias=True, complex=False, increasing_stride=True, init='randn', nblocks=1): super().__init__() self.in_size = in_size self.log_n = log_n = int(math.ceil(math.log2(in_size))) self.n = n = 1 << log_n self.out_size = out_size self.nstacks = int(math.ceil(out_size / self.n)) self.complex = complex self.increasing_stride = increasing_stride assert nblocks >= 1 self.nblocks = nblocks dtype = torch.get_default_dtype() if not self.complex else real_dtype_to_complex[torch.get_default_dtype()] twiddle_shape = (self.nstacks, nblocks, log_n, n // 2, 2, 2) if isinstance(init, torch.Tensor): self.init = None assert init.shape == twiddle_shape assert init.dtype == dtype self.twiddle = nn.Parameter(init.clone()) else: assert init in ['empty', 'randn', 'ortho', 'identity', 'fft_no_br', 'ifft_no_br'] self.init = init self.twiddle = nn.Parameter(torch.empty(twiddle_shape, dtype=dtype)) if bias: self.bias = nn.Parameter(torch.empty(out_size, dtype=dtype)) else: self.register_parameter('bias', None) self.twiddle._is_structured = True # Flag to avoid weight decay self.reset_parameters() def reset_parameters(self): """Initialize bias the same way as torch.nn.Linear.""" if self.bias is not None: bound = 1 / math.sqrt(self.in_size) nn.init.uniform_(self.bias, -bound, bound) twiddle = self.twiddle if self.init is None or self.init == 'empty': return elif self.init == 'randn': # complex randn already has the correct scaling of stddev=1.0 scaling = 1.0 / math.sqrt(2) with torch.no_grad(): twiddle.copy_(torch.randn(twiddle.shape, dtype=twiddle.dtype) * scaling) elif self.init == 'ortho': twiddle_core_shape = twiddle.shape[:-2] if not self.complex: theta = torch.rand(twiddle_core_shape) * math.pi * 2 c, s = torch.cos(theta), torch.sin(theta) det = torch.randint(0, 2, twiddle_core_shape, dtype=c.dtype) * 2 - 1 # Rotation (+1) or reflection (-1) with torch.no_grad(): twiddle.copy_(torch.stack((torch.stack((det * c, -det * s), dim=-1), torch.stack((s, c), dim=-1)), dim=-2)) else: # Sampling from the Haar measure on U(2) is a bit subtle. # Using the parameterization here: http://home.lu.lv/~sd20008/papers/essays/Random%20unitary%20[paper].pdf phi = torch.asin(torch.sqrt(torch.rand(twiddle_core_shape))) c, s = torch.cos(phi), torch.sin(phi) alpha, psi, chi = torch.rand((3, ) + twiddle_core_shape) * math.pi * 2 A = torch.exp(1j * (alpha + psi)) * c B = torch.exp(1j * (alpha + chi)) * s C = -torch.exp(1j * (alpha - chi)) * s D = torch.exp(1j * (alpha - psi)) * c with torch.no_grad(): twiddle.copy_(torch.stack((torch.stack((A, B), dim=-1), torch.stack((C, D), dim=-1)), dim=-2)) elif self.init == 'identity': twiddle_eye = torch.eye(2, dtype=twiddle.dtype).reshape(1, 1, 1, 1, 2, 2) twiddle_eye = twiddle_eye.expand(*twiddle.shape).contiguous() with torch.no_grad(): twiddle.copy_(twiddle_eye) elif self.init in ['fft_no_br', 'ifft_no_br']: assert self.complex, 'fft_no_br/ifft_no_br init requires Butterfly to be complex' special_fn = (torch_butterfly.special.fft if self.init == 'fft_no_br' else torch_butterfly.special.ifft) b_fft = special_fn(self.n, normalized=True, br_first=self.increasing_stride, with_br_perm=False) with torch.no_grad(): twiddle[:, 0] = b_fft.twiddle if self.nblocks > 1: twiddle_eye = torch.eye(2, dtype=twiddle.dtype).reshape(1, 1, 1, 1, 2, 2) twiddle_eye = twiddle_eye.expand(*twiddle[:, 1:].shape).contiguous() with torch.no_grad(): twiddle[:, 1:] = twiddle_eye def forward(self, input, transpose=False, conjugate=False, subtwiddle=False): """ Parameters: input: (batch, *, in_size) transpose: whether the butterfly matrix should be transposed. conjugate: whether the butterfly matrix should be conjugated. subtwiddle: allow using only part of the parameters for smaller input. Could be useful for weight sharing. out_size is set to self.nstacks * self.n in this case Return: output: (batch, *, out_size) """ twiddle = self.twiddle output = self.pre_process(input) output_size = self.out_size if self.nstacks == 1 else None if subtwiddle: log_n = int(math.ceil(math.log2(input.size(-1)))) n = 1 << log_n twiddle = (twiddle[:, :, :log_n, :n // 2] if self.increasing_stride else twiddle[:, :, -log_n:, :n // 2]) output_size = None if conjugate and self.complex: twiddle = twiddle.conj() if not transpose: output = butterfly_multiply(twiddle, output, self.increasing_stride, output_size) else: twiddle = twiddle.transpose(-1, -2).flip([1, 2]) last_increasing_stride = self.increasing_stride != ((self.nblocks - 1) % 2 == 1) output = butterfly_multiply(twiddle, output, not last_increasing_stride, output_size) if not subtwiddle: return self.post_process(input, output) else: return self.post_process(input, output, out_size=output.size(-1)) def pre_process(self, input): # Reshape to (N, in_size) input_size = input.size(-1) output = complex_reshape(input, -1, input_size) batch = output.shape[0] output = output.unsqueeze(1).expand(batch, self.nstacks, input_size) return output def post_process(self, input, output, out_size=None): if out_size is None: out_size = self.out_size batch = output.shape[0] output = output.view(batch, self.nstacks * output.size(-1)) if out_size != output.shape[-1]: # Take top rows output = output[:, :out_size] if self.bias is not None: output = output + self.bias[:out_size] return output.view(*input.size()[:-1], out_size) def __imul__(self, scale): """In-place multiply the whole butterfly matrix by some scale factor, by multiplying the twiddle. Scale must be nonnegative """ assert isinstance(scale, numbers.Number) assert scale >= 0 self.twiddle *= scale ** (1.0 / self.twiddle.shape[1] / self.twiddle.shape[2]) return self def diagonal_multiply_(self, diagonal, diag_first): """ Combine a Butterfly and a diagonal into another Butterfly. Only support nstacks==1 for now. Parameters: diagonal: size (in_size,) if diag_first, else (out_size,). Should be of type complex if butterfly.complex == True. diag_first: If True, the map is input -> diagonal -> butterfly. If False, the map is input -> butterfly -> diagonal. """ return torch_butterfly.combine.diagonal_butterfly(self, diagonal, diag_first, inplace=True) def to_base4(self): with torch.no_grad(): twiddle4, twiddle2 = twiddle_base2_to_base4(self.twiddle, self.increasing_stride) new = torch_butterfly.ButterflyBase4(self.in_size, self.out_size, self.bias is not None, self.complex, self.increasing_stride, init=(twiddle4, twiddle2), nblocks=self.nblocks).to(self.twiddle.device) if new.bias is not None: with torch.no_grad(): new.bias.copy_(self.bias) return new def extra_repr(self): s = 'in_size={}, out_size={}, bias={}, complex={}, increasing_stride={}, init={}, nblocks={}'.format( self.in_size, self.out_size, self.bias is not None, self.complex, self.increasing_stride, self.init, self.nblocks,) return s class ButterflyUnitary(Butterfly): """Same as Butterfly, but constrained to be unitary Compatible with torch.nn.Linear. Parameters: in_size: size of input out_size: size of output bias: If set to False, the layer will not learn an additive bias. Default: ``True`` increasing_stride: whether the first butterfly block will multiply with increasing stride (e.g. 1, 2, ..., n/2) or decreasing stride (e.g., n/2, n/4, ..., 1). nblocks: number of B or B^T blocks. The B and B^T will alternate. """ def __init__(self, in_size, out_size, bias=True, increasing_stride=True, nblocks=1): nn.Module.__init__(self) self.in_size = in_size self.log_n = log_n = int(math.ceil(math.log2(in_size))) self.n = n = 1 << log_n # Will zero-pad input if in_size is not a power of 2 self.out_size = out_size self.nstacks = int(math.ceil(out_size / self.n)) self.complex = True self.increasing_stride = increasing_stride assert nblocks >= 1 self.nblocks = nblocks complex_dtype = real_dtype_to_complex[torch.get_default_dtype()] twiddle_shape = (self.nstacks, nblocks, log_n, n // 2, 4) self.init = 'ortho' self.twiddle = nn.Parameter(torch.empty(twiddle_shape)) if bias: self.bias = nn.Parameter(torch.empty(out_size, dtype=complex_dtype)) else: self.register_parameter('bias', None) self.twiddle._is_structured = True # Flag to avoid weight decay self.reset_parameters() def reset_parameters(self): """Initialize bias the same way as torch.nn.Linear.""" # Sampling from the Haar measure on U(2) is a bit subtle. # Using the parameterization here: http://home.lu.lv/~sd20008/papers/essays/Random%20unitary%20[paper].pdf twiddle_core_shape = self.twiddle.shape[:-1] phi = torch.asin(torch.sqrt(torch.rand(twiddle_core_shape))) alpha, psi, chi = torch.rand((3, ) + twiddle_core_shape) * math.pi * 2 with torch.no_grad(): self.twiddle.copy_(torch.stack([phi, alpha, psi, chi], dim=-1)) if self.bias is not None: bound = 1 / math.sqrt(self.in_size) nn.init.uniform_(self.bias, -bound, bound) def forward(self, input, transpose=False, conjugate=False, subtwiddle=False): """ Parameters: input: (batch, *, in_size) transpose: whether the butterfly matrix should be transposed. conjugate: whether the butterfly matrix should be conjugated. subtwiddle: allow using only part of the parameters for smaller input. Could be useful for weight sharing. out_size is set to self.nstacks * self.n in this case Return: output: (batch, *, out_size) """ phi, alpha, psi, chi = torch.unbind(self.twiddle, -1) c, s = torch.cos(phi), torch.sin(phi) # Pytorch 1.7 doesn't support complex exp backward so we have to use cos/sin A = torch.stack((c * torch.cos(alpha + psi), c * torch.sin(alpha + psi)), dim=-1) B = torch.stack((s * torch.cos(alpha + chi), s * torch.sin(alpha + chi)), dim=-1) C = torch.stack((-s * torch.cos(alpha - chi), -s * torch.sin(alpha - chi)), dim=-1) D = torch.stack((c * torch.cos(alpha - psi), c * torch.sin(alpha - psi)), dim=-1) twiddle = torch.stack([torch.stack([A, B], dim=-2), torch.stack([C, D], dim=-2)], dim=-3) twiddle = torch.view_as_complex(twiddle) output = self.pre_process(input) output_size = self.out_size if self.nstacks == 1 else None if subtwiddle: log_n = int(math.ceil(math.log2(input.size(-1)))) n = 1 << log_n twiddle = (twiddle[:, :, :log_n, :n // 2] if self.increasing_stride else twiddle[:, :, -log_n:, :n // 2]) output_size = None if conjugate and self.complex: twiddle = twiddle.conj() if not transpose: output = butterfly_multiply(twiddle, output, self.increasing_stride, output_size) else: twiddle = twiddle.transpose(-1, -2).flip([1, 2]) last_increasing_stride = self.increasing_stride != ((self.nblocks - 1) % 2 == 1) output = butterfly_multiply(twiddle, output, not last_increasing_stride, output_size) if not subtwiddle: return self.post_process(input, output) else: return self.post_process(input, output, out_size=output.size(-1)) __imul__ = None to_base4 = None def extra_repr(self): s = 'in_size={}, out_size={}, bias={}, increasing_stride={}, nblocks={}'.format( self.in_size, self.out_size, self.bias is not None, self.increasing_stride, self.nblocks,) return s class ButterflyBmm(Butterfly): """Same as Butterfly, but performs batched matrix multiply. Compatible with torch.nn.Linear. Parameters: in_size: size of input out_size: size of output matrix_batch: how many butterfly matrices bias: If set to False, the layer will not learn an additive bias. Default: ``True`` complex: whether complex or real increasing_stride: whether the first butterfly block will multiply with increasing stride (e.g. 1, 2, ..., n/2) or decreasing stride (e.g., n/2, n/4, ..., 1). init: 'randn', 'ortho', or 'identity'. Whether the weight matrix should be initialized to from randn twiddle, or to be randomly orthogonal/unitary, or to be the identity matrix. nblocks: number of B or B^T blocks. The B and B^T will alternate. """ def __init__(self, in_size, out_size, matrix_batch=1, bias=True, complex=False, increasing_stride=True, init='randn', nblocks=1): nn.Module.__init__(self) self.in_size = in_size self.log_n = log_n = int(math.ceil(math.log2(in_size))) self.n = n = 1 << log_n self.out_size = out_size self.matrix_batch = matrix_batch self.nstacks = int(math.ceil(out_size / self.n)) self.complex = complex self.increasing_stride = increasing_stride assert nblocks >= 1 self.nblocks = nblocks dtype = torch.get_default_dtype() if not self.complex else real_dtype_to_complex[torch.get_default_dtype()] twiddle_shape = (self.matrix_batch * self.nstacks, nblocks, log_n, n // 2, 2, 2) if isinstance(init, torch.Tensor): self.init = None assert init.shape == twiddle_shape assert init.dtype == dtype self.twiddle = nn.Parameter(init.clone()) else: assert init in ['randn', 'ortho', 'identity', 'fft_no_br', 'ifft_no_br'] self.init = init self.twiddle = nn.Parameter(torch.empty(twiddle_shape, dtype=dtype)) if bias: self.bias = nn.Parameter(torch.empty(self.matrix_batch, out_size, dtype=dtype)) else: self.register_parameter('bias', None) self.twiddle._is_structured = True # Flag to avoid weight decay self.reset_parameters() def forward(self, input, transpose=False, conjugate=False): """ Parameters: input: (batch, *, matrix_batch, in_size) transpose: whether the butterfly matrix should be transposed. conjugate: whether the butterfly matrix should be conjugated. Return: output: (batch, *, matrix_batch, out_size) """ return super().forward(input, transpose, conjugate, subtwiddle=False) def pre_process(self, input): # Reshape to (N, matrix_batch, in_size) input_size = input.size(-1) assert input.size(-2) == self.matrix_batch output = complex_reshape(input, -1, self.matrix_batch, input_size) batch = output.shape[0] output = output.unsqueeze(2).expand(batch, self.matrix_batch, self.nstacks, input_size) output = output.reshape(batch, self.matrix_batch * self.nstacks, input_size) return output def post_process(self, input, output, out_size=None): if out_size is None: out_size = self.out_size batch = output.shape[0] output = output.view(batch, self.matrix_batch, self.nstacks * output.size(-1)) if out_size != output.shape[-1]: # Take top rows output = output[:, :, :out_size] if self.bias is not None: output = output + self.bias[:, :out_size] return output.view(*input.size()[:-2], self.matrix_batch, self.out_size) to_base4 = None def extra_repr(self): s = 'in_size={}, out_size={}, matrix_batch={}, bias={}, complex={}, increasing_stride={}, init={}, nblocks={}'.format( self.in_size, self.out_size, self.matrix_batch, self.bias is not None, self.complex, self.increasing_stride, self.init, self.nblocks,) return s
butterfly-master
torch_butterfly/butterfly.py
import copy import torch from torch import nn from torch.nn import functional as F from torch_butterfly import Butterfly from torch_butterfly.permutation import FixedPermutation, bitreversal_permutation def diagonal_butterfly(butterfly: Butterfly, diagonal: torch.Tensor, diag_first: bool, inplace: bool = True) -> Butterfly: """ Combine a Butterfly and a diagonal into another Butterfly. Only support nstacks==1 for now. Parameters: butterfly: Butterfly(in_size, out_size) diagonal: size (in_size,) if diag_first, else (out_size,). Should be of type complex if butterfly.complex == True. diag_first: If True, the map is input -> diagonal -> butterfly. If False, the map is input -> butterfly -> diagonal. inplace: whether to modify the input Butterfly """ assert butterfly.nstacks == 1 assert butterfly.bias is None twiddle = butterfly.twiddle.clone() n = 1 << twiddle.shape[2] if diagonal.shape[-1] < n: diagonal = F.pad(diagonal, (0, n - diagonal.shape[-1]), value=1) if diag_first: if butterfly.increasing_stride: twiddle[:, 0, 0, :, :, 0] *= diagonal[::2].unsqueeze(-1) twiddle[:, 0, 0, :, :, 1] *= diagonal[1::2].unsqueeze(-1) else: n = diagonal.shape[-1] twiddle[:, 0, 0, :, :, 0] *= diagonal[:n // 2].unsqueeze(-1) twiddle[:, 0, 0, :, :, 1] *= diagonal[n // 2:].unsqueeze(-1) else: # Whether the last block is increasing or decreasing stride increasing_stride = butterfly.increasing_stride != ((butterfly.nblocks - 1) % 2 == 1) if increasing_stride: n = diagonal.shape[-1] twiddle[:, -1, -1, :, 0, :] *= diagonal[:n // 2].unsqueeze(-1) twiddle[:, -1, -1, :, 1, :] *= diagonal[n // 2:].unsqueeze(-1) else: twiddle[:, -1, -1, :, 0, :] *= diagonal[::2].unsqueeze(-1) twiddle[:, -1, -1, :, 1, :] *= diagonal[1::2].unsqueeze(-1) out_butterfly = butterfly if inplace else copy.deepcopy(butterfly) with torch.no_grad(): out_butterfly.twiddle.copy_(twiddle) return out_butterfly def butterfly_product(butterfly1: Butterfly, butterfly2: Butterfly) -> Butterfly: """ Combine product of two butterfly matrices into one Butterfly. """ assert butterfly1.bias is None and butterfly2.bias is None assert butterfly1.complex == butterfly2.complex assert butterfly1.nstacks == butterfly2.nstacks assert butterfly1.log_n == butterfly2.log_n b1_end_increasing_stride = butterfly1.increasing_stride != (butterfly1.nblocks % 2 == 1) if b1_end_increasing_stride != butterfly2.increasing_stride: # Need to insert an Identity block identity = Butterfly(butterfly1.in_size, butterfly1.out_size, bias=False, complex=butterfly1.complex, increasing_stride=b1_end_increasing_stride, init='identity') butterfly1 = butterfly_product(butterfly1, identity) new_twiddle = torch.cat((butterfly1.twiddle, butterfly2.twiddle), dim=1) b = Butterfly(1 << butterfly1.log_n, 1 << butterfly1.log_n, bias=False, complex=butterfly1.complex, increasing_stride=butterfly1.increasing_stride, init=new_twiddle, nblocks=butterfly1.nblocks + butterfly2.nblocks) b.in_size = butterfly1.in_size b.out_size = butterfly2.out_size return b class TensorProduct(nn.Module): def __init__(self, map1, map2) -> None: """Perform map1 on the last dimension of the input and then map2 on the next to last dimension. """ super().__init__() self.map1 = map1 self.map2 = map2 def forward(self, input: torch.Tensor) -> torch.Tensor: """ Parameter: input: (*, n2, n1) Return: output: (*, n2, n1) """ out = self.map1(input) return self.map2(out.transpose(-1, -2)).transpose(-1, -2) def butterfly_kronecker(butterfly1: Butterfly, butterfly2: Butterfly) -> Butterfly: """Combine two butterflies of size n1 and n2 into their Kronecker product of size n1 * n2. They must both have increasing_stride=True or increasing_stride=False. If butterfly1 or butterfly2 has padding, then the kronecker product (after flattening input) will not produce the same result unless the input is padding in the same way before flattening. Only support nstacks==1, nblocks==1 for now. """ assert butterfly1.increasing_stride == butterfly2.increasing_stride assert butterfly1.complex == butterfly2.complex assert not butterfly1.bias and not butterfly2.bias assert butterfly1.nstacks == 1 and butterfly2.nstacks == 1 assert butterfly1.nblocks == 1 and butterfly2.nblocks == 1 increasing_stride = butterfly1.increasing_stride complex = butterfly1.complex log_n1 = butterfly1.twiddle.shape[2] log_n2 = butterfly2.twiddle.shape[2] log_n = log_n1 + log_n2 n = 1 << log_n twiddle1 = butterfly1.twiddle twiddle2 = butterfly2.twiddle twiddle1 = twiddle1.detach().repeat(1, 1, 1, 1 << log_n2, 1, 1) twiddle2 = twiddle2.detach().repeat_interleave(1 << log_n1, dim=3) twiddle = (torch.cat((twiddle1, twiddle2), dim=2) if increasing_stride else torch.cat((twiddle2, twiddle1), dim=2)) b = Butterfly(n, n, bias=False, complex=complex, increasing_stride=increasing_stride, init=twiddle) b.in_size = butterfly1.in_size * butterfly2.in_size b.out_size = butterfly1.out_size * butterfly2.out_size return b def permutation_kronecker(perm1: FixedPermutation, perm2: FixedPermutation) -> FixedPermutation: """Combine two permutations of size n1 and n2 into their Kronecker product of size n1 * n2. """ n1, n2 = perm1.permutation.shape[-1], perm2.permutation.shape[-1] x = torch.arange(n2 * n1, device=perm1.permutation.device).reshape(n2, n1) perm = perm2(perm1(x).t()).t().reshape(-1) return FixedPermutation(perm) def flip_increasing_stride(butterfly: Butterfly) -> nn.Module: """Convert a Butterfly with increasing_stride=True/False to a Butterfly with increasing_stride=False/True, along with 2 bit-reversal permutations. Follows the proof of Lemma G.4. """ assert butterfly.bias is None assert butterfly.in_size == 1 << butterfly.log_n assert butterfly.out_size == 1 << butterfly.log_n n = butterfly.in_size new_butterfly = copy.deepcopy(butterfly) new_butterfly.increasing_stride = not butterfly.increasing_stride br = bitreversal_permutation( n, pytorch_format=True) br_half = bitreversal_permutation(n // 2, pytorch_format=True) with torch.no_grad(): new_butterfly.twiddle.copy_(new_butterfly.twiddle[:, :, :, br_half]) return nn.Sequential(FixedPermutation(br), new_butterfly, FixedPermutation(br))
butterfly-master
torch_butterfly/combine.py
import math from typing import Tuple, Optional import torch from torch.nn import functional as F @torch.jit.script def butterfly_multiply_fw(twiddle: torch.Tensor, input: torch.Tensor, increasing_stride: bool, output_size: Optional[int] = None) -> torch.Tensor: return torch.ops.torch_butterfly.butterfly_multiply_fw(twiddle, input, increasing_stride, output_size) @torch.jit.script def butterfly_multiply_bw(twiddle: torch.Tensor, input: torch.Tensor, grad: torch.Tensor, increasing_stride: bool) -> Tuple[torch.Tensor, torch.Tensor]: return torch.ops.torch_butterfly.butterfly_multiply_bw(twiddle, input, grad, increasing_stride) @torch.jit.script def butterfly_multiply(twiddle: torch.Tensor, input: torch.Tensor, increasing_stride: bool, output_size: Optional[int] = None) -> torch.Tensor: return torch.ops.torch_butterfly.butterfly_multiply(twiddle, input, increasing_stride, output_size) def butterfly_multiply_torch(twiddle, input, increasing_stride=True, output_size=None): batch_size, nstacks, input_size = input.shape nblocks = twiddle.shape[1] log_n = twiddle.shape[2] n = 1 << log_n assert twiddle.shape == (nstacks, nblocks, log_n, n // 2, 2, 2) # Pad or trim input to size n input = F.pad(input, (0, n - input_size)) if input_size < n else input[:, :, :n] output_size = n if output_size is None else output_size assert output_size <= n output = input.contiguous() cur_increasing_stride = increasing_stride for block in range(nblocks): for idx in range(log_n): log_stride = idx if cur_increasing_stride else log_n - 1 - idx stride = 1 << log_stride # shape (nstacks, n // (2 * stride), 2, 2, stride) t = twiddle[:, block, idx].view( nstacks, n // (2 * stride), stride, 2, 2).permute(0, 1, 3, 4, 2) output_reshape = output.view( batch_size, nstacks, n // (2 * stride), 1, 2, stride) output = (t * output_reshape).sum(dim=4) cur_increasing_stride = not cur_increasing_stride return output.view(batch_size, nstacks, n)[:, :, :output_size]
butterfly-master
torch_butterfly/multiply.py
import importlib from pathlib import Path import torch __version__ = '0.0.0' for library in ['_version', '_butterfly']: torch.ops.load_library(importlib.machinery.PathFinder().find_spec( # need str(Path) otherwise it can't find it library, [str(Path(__file__).absolute().parent)]).origin) def check_cuda_version(): if torch.version.cuda is not None: # pragma: no cover cuda_version = torch.ops.torch_butterfly.cuda_version() if cuda_version == -1: major = minor = 0 elif cuda_version < 10000: major, minor = int(str(cuda_version)[0]), int(str(cuda_version)[2]) else: major, minor = int(str(cuda_version)[0:2]), int(str(cuda_version)[3]) t_major, t_minor = [int(x) for x in torch.version.cuda.split('.')] if t_major != major or t_minor != minor: raise RuntimeError( f'Detected that PyTorch and torch_butterfly were compiled with ' f'different CUDA versions. PyTorch has CUDA version ' f'{t_major}.{t_minor} and torch_butterfly has CUDA version ' f'{major}.{minor}. Please reinstall the torch_butterfly that ' f'matches your PyTorch install.') check_cuda_version() from .butterfly import Butterfly, ButterflyUnitary, ButterflyBmm # noqa from .butterfly_base4 import ButterflyBase4 # noqa from .multiply import butterfly_multiply # noqa from . import combine from . import complex_utils from . import diagonal from . import permutation from . import special from . import multiply_base4 __all__ = [ 'Butterfly', 'ButterflyUnitary', 'ButterflyBmm', 'ButterflyBase4', 'butterfly_multiply', '__version__', ]
butterfly-master
torch_butterfly/__init__.py
import math import torch import torch.nn.functional as F from torch_butterfly.multiply import butterfly_multiply from benchmark_utils import benchmark, benchmark_fw_bw batch_size = 2048 n = 512 log_n = int(math.log2(n)) assert n == 1 << log_n input_size = n - 7 output_size = n - 5 input = torch.randn(batch_size, 1, input_size, device='cuda', requires_grad=True) twiddle = torch.randn(1, 1, log_n, n // 2, 2, 2, device='cuda', requires_grad=True) / math.sqrt(2) def fn(twiddle, input): return butterfly_multiply(twiddle, input, True, output_size) def fn_padded(twiddle, input): input_padded = F.pad(input, (0, n - input_size)) return butterfly_multiply(twiddle, input_padded, True)[:, :, :output_size] print(benchmark_fw_bw(fn, (twiddle, input), 10000)) print(benchmark_fw_bw(fn_padded, (twiddle, input), 10000))
butterfly-master
torch_butterfly/input_padding_benchmark.py
import math import torch from torch.nn import functional as F def butterfly_multiply_base4_torch(twiddle4, twiddle2, input, increasing_stride=True, output_size=None): batch_size, nstacks, input_size = input.shape nblocks = twiddle4.shape[1] log_n = twiddle4.shape[2] * 2 + twiddle2.shape[2] n = 1 << log_n if log_n // 2 > 0: assert twiddle4.shape == (nstacks, nblocks, log_n // 2, n // 4, 4, 4) if log_n % 2 == 1: assert twiddle2.shape == (nstacks, nblocks, 1, n // 2, 2, 2) # Pad or trim input to size n input = F.pad(input, (0, n - input_size)) if input_size < n else input[:, :, :n] output_size = n if output_size is None else output_size assert output_size <= n output = input.contiguous() cur_increasing_stride = increasing_stride for block in range(nblocks): for idx in range(log_n // 2): log2_stride = 2 * idx if cur_increasing_stride else log_n - 2 - 2 * idx stride = 1 << (log2_stride) # shape (nstacks, n // (4 * stride), 4, 4, stride) t = twiddle4[:, block, idx].view( nstacks, n // (4 * stride), stride, 4, 4).permute(0, 1, 3, 4, 2) output_reshape = output.view(batch_size, nstacks, n // (4 * stride), 1, 4, stride) output = (t * output_reshape).sum(dim=4) if log_n % 2 == 1: log2_stride = log_n - 1 if cur_increasing_stride else 0 stride = 1 << log2_stride # shape (nstacks, n // (2 * stride), 2, 2, stride) t = twiddle2[:, block, 0].view( nstacks, n // (2 * stride), stride, 2, 2).permute(0, 1, 3, 4, 2) output_reshape = output.view(batch_size, nstacks, n // (2 * stride), 1, 2, stride) output = (t * output_reshape).sum(dim=4) cur_increasing_stride = not cur_increasing_stride return output.view(batch_size, nstacks, n)[:, :, :output_size] def twiddle_base2_to_base4(twiddle, increasing_stride=True): nstacks, nblocks, log_n = twiddle.shape[:3] n = 1 << log_n assert twiddle.shape == (nstacks, nblocks, log_n, n // 2, 2, 2) twiddle2 = (twiddle[:, :, -1:] if log_n % 2 == 1 else torch.empty(nstacks, nblocks, 0, n // 2, 2, 2, dtype=twiddle.dtype, device=twiddle.device)) twiddle4 = torch.empty(nstacks, nblocks, log_n // 2, n // 4, 4, 4, dtype=twiddle.dtype, device=twiddle.device) cur_increasing_stride = increasing_stride for block in range(nblocks): for idx in range(log_n // 2): log2_stride = 2 * idx if cur_increasing_stride else log_n - 2 - 2 * idx stride = 1 << (log2_stride) # Warning: All this dimension manipulation (transpose and unsqueeze) is super tricky. # I'm not even sure why it works (I figured it out with trial and error). even = twiddle[:, block, 2 * idx].view( nstacks, n // (4 * stride), 2, stride, 2, 2).transpose(-3, -4) odd = twiddle[:, block, 2 * idx + 1].view( nstacks, n // (4 * stride), 2, stride, 2, 2).transpose(-3, -4) if cur_increasing_stride: prod = odd.transpose(-2, -3).unsqueeze(-1) * even.transpose(-2, -3).unsqueeze(-4) else: prod = odd.unsqueeze(-2) * even.permute(0, 1, 2, 4, 5, 3).unsqueeze(-3) prod = prod.reshape(nstacks, n // 4, 4, 4) twiddle4[:, block, idx].copy_(prod) cur_increasing_stride = not cur_increasing_stride return twiddle4, twiddle2
butterfly-master
torch_butterfly/multiply_base4.py
import math import numpy as np import torch from torch import nn from torch_butterfly.complex_utils import real_dtype_to_complex class Diagonal(nn.Module): def __init__(self, size=None, complex=False, diagonal_init=None): """Multiply by diagonal matrix Parameter: size: int diagonal_init: (n, ) """ super().__init__() if diagonal_init is not None: self.size = diagonal_init.shape self.diagonal = nn.Parameter(diagonal_init.detach().clone()) self.complex = self.diagonal.is_complex() else: assert size is not None self.size = size dtype = torch.get_default_dtype() if not complex else real_dtype_to_complex[torch.get_default_dtype()] self.diagonal = nn.Parameter(torch.randn(size, dtype=dtype)) self.complex = complex def forward(self, input): """ Parameters: input: (batch, *, size) Return: output: (batch, *, size) """ return input * self.diagonal
butterfly-master
torch_butterfly/diagonal.py
import math from typing import List, Tuple, Union import numpy as np import scipy.linalg import torch from torch import nn from torch_butterfly import Butterfly from torch_butterfly.complex_utils import index_last_dim, real2complex def bitreversal_permutation(n, pytorch_format=False): """Return the bit reversal permutation used in FFT. By default, the permutation is stored in numpy array. Parameter: n: integer, must be a power of 2. pytorch_format: whether the permutation is stored as numpy array or pytorch tensor. Return: perm: bit reversal permutation, numpy array of size n """ log_n = int(math.log2(n)) assert n == 1 << log_n, 'n must be a power of 2' perm = np.arange(n).reshape(n, 1) for i in range(log_n): n1 = perm.shape[0] // 2 perm = np.hstack((perm[:n1], perm[n1:])) perm = perm.squeeze(0) return perm if not pytorch_format else torch.tensor(perm) def wavelet_permutation(n, pytorch_format=False): """Return the bit reversal permutation used in discrete wavelet transform. Example: [0, 1, ..., 7] -> [0, 4, 2, 6, 1, 3, 5, 7] By default, the permutation is stored in numpy array. Parameter: n: integer, must be a power of 2. pytorch_format: whether the permutation is stored as numpy array or pytorch tensor. Return: perm: numpy array of size n """ log_n = int(math.log2(n)) assert n == 1 << log_n, 'n must be a power of 2' perm = np.arange(n) head, tail = perm[:], perm[:0] # empty tail for i in range(log_n): even, odd = head[::2], head[1::2] head = even tail = np.hstack((odd, tail)) perm = np.hstack((head, tail)) return perm if not pytorch_format else torch.tensor(perm) class FixedPermutation(nn.Module): def __init__(self, permutation: torch.Tensor) -> None: """Fixed permutation. Parameter: permutation: (n, ) tensor of ints """ super().__init__() self.register_buffer('permutation', permutation) def forward(self, input: torch.Tensor) -> torch.Tensor: """ Parameters: input: (batch, *, size) Return: output: (batch, *, size) """ # return input[..., self.permutation] # Pytorch 1.7 doesn't have indexing_backward for complex. # So we use our own backward return index_last_dim(input, self.permutation) def to_butterfly(self, complex=False, increasing_stride=False): return perm2butterfly(self.permutation, complex, increasing_stride) def invert(perm: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]: """Get the inverse of a given permutation vector. Equivalent to converting a permutation vector from left-multiplication format to right multiplication format. Work with both numpy array and Pytorch Tensor. """ assert isinstance(perm, (np.ndarray, torch.Tensor)) n = perm.shape[-1] if isinstance(perm, np.ndarray): result = np.empty(n, dtype=int) result[perm] = np.arange(n, dtype=int) else: result = torch.empty(n, dtype=int, device=perm.device) result[perm] = torch.arange(n, dtype=int) return result def perm_vec_to_mat(p: np.ndarray, left: bool = False) -> np.ndarray: """Convert a permutation vector to a permutation matrix. Parameters: p: a vector storing the permutation. left: whether it's in left- or right-multiplication format. """ n = len(p) matrix = np.zeros((n, n), dtype=int) matrix[p, np.arange(n, dtype=int)] = 1 # Left-multiplication by the resulting matrix will result in the desired permutation. return matrix if not left else matrix.T def perm_mat_to_vec(m, left=False): """Convert a permutation matrix to a permutation vector. Parameters: p: a matrix storing the permutation. left: whether it's in left- or right-multiplication format. """ input = np.arange(m.shape[0]) return m @ input if left else m.T @ input def is_2x2_block_diag(mat: np.ndarray) -> bool: """Check that each of the 4 blocks of a matrix is diagonal (in other words, that the matrix is a butterfly factor). Assumes that the matrix is square with even dimension. """ nh = mat.shape[0] // 2 for block in [mat[:nh, :nh], mat[:nh, nh:], mat[nh:, :nh], mat[nh:, nh:]]: if np.count_nonzero(block - np.diag(np.diagonal(block))): return False # there's a nonzero off-diagonal entry return True def is_butterfly_factor(mat: np.ndarray, k: int) -> bool: """Checks whether "mat" is in B_k. """ assert k > 1 and k == 1 << int(math.log2(k)) n = mat.shape[0] assert n >= k and n == 1 << int(math.log2(n)) z = np.zeros(mat.shape) for i in range(n//k): # Iterate through each diagonal block of the matrix, # and check that it is a butterfly factor. block = mat[i * k:(i + 1) * k, i * k:(i + 1) * k] if not is_2x2_block_diag(block): return False z[i * k:(i + 1) * k, i * k:(i + 1) * k] = block # Check whether there are any nonzeros in off-diagonal blocks. return np.count_nonzero(mat - z) == 0 def matrix_to_butterfly_factor(mat, log_k, pytorch_format=False, check_input=False): """Converts a matrix to a butterfly factor B_k. Assumes that it indeed has the correct sparsity pattern. """ k = 1 << log_k if check_input: assert is_butterfly_factor(mat, k) n = mat.shape[0] out = np.zeros((n // 2, 2, 2)) for block in range(n // 2): base = (2 * block // k) * k + (block % (k // 2)) for i, j in np.ndindex((2, 2)): out[block, i, j] = mat[base + i * k // 2, base + j * k//2] if pytorch_format: out = torch.tensor(out, dtype=torch.float32) return out class Node: def __init__(self, value): self.value = value self.in_edges = [] self.out_edges = [] def half_balance( v: np.ndarray, return_swap_locations: bool = False ) -> Tuple[Union[np.ndarray, torch.Tensor], np.ndarray]: """Return the permutation vector that makes the permutation vector v n//2-balanced. Directly follows the proof of Lemma G.2. Parameters: v: the permutation as a vector, stored in right-multiplication format. """ n = len(v) assert n % 2 == 0 nh = n // 2 nodes = [Node(i) for i in range(nh)] # Build the graph for i in range(nh): # There is an edge from s to t s, t = nodes[v[i] % nh], nodes[v[i + nh] % nh] s.out_edges.append((t, i)) t.in_edges.append((s, i + nh)) # Each node has undirected degree exactly 2 assert all(len(node.in_edges) + len(node.out_edges) == 2 for node in nodes) swap_low_locs = [] swap_high_locs = [] while len(nodes): # Pick a random node. start_node, start_loc = nodes[-1], n - 1 next_node = None # Follow undirected edges until rereaching start_node. # As every node has undirected degree 2, this will find # all cycles in the graph. Reverse edges as needed to # make the cycle a directed cycle. while next_node != start_node: if next_node is None: next_node, next_loc = start_node, start_loc old_node, old_loc = next_node, next_loc if old_node.out_edges: # If there's an out-edge from old_node, follow it. next_node, old_loc = old_node.out_edges.pop() next_loc = old_loc + nh next_node.in_edges.remove((old_node, next_loc)) else: # If there's no out-edge, there must be an in-edge. next_node, old_loc = old_node.in_edges.pop() next_loc = old_loc - nh next_node.out_edges.remove((old_node, next_loc)) swap_low_locs.append(next_loc) swap_high_locs.append(old_loc) nodes.remove(old_node) perm = np.arange(n, dtype=int) perm[swap_low_locs], perm[swap_high_locs] = swap_high_locs, swap_low_locs if not return_swap_locations: return perm, v[perm] else: return swap_low_locs, v[perm] def modular_balance(v: np.ndarray) -> Tuple[List[np.ndarray], np.ndarray]: """ Returns the sequence of permutations to transform permutation vector v into a modular-balanced matrix, as well as the resultant modular-balanced permutation vector. Directly follows the proof of Lemma G.3. Parameters: v: a permutation vector corresponding to a permutation matrix P, stored in right-multiplication format. """ t = n = len(v) perms = [] while t >= 2: # Balance each chunk of the vector independently. chunks = np.split(v, n // t) # Adjust indices of the permutation swap_perm = np.hstack([half_balance(chunk)[0] + chunk_idx * t for chunk_idx, chunk in enumerate(chunks)]) v = v[swap_perm] perms.append(swap_perm) t //= 2 return perms, v def is_modular_balanced(perm): """Corresponds to Definition G.1 in the paper. perm is stored in right-multiplication format, either as a vector or a matrix. """ if isinstance(perm, np.ndarray) and len(perm.shape) > 1: perm = perm_mat_to_vec(perm) n = len(perm) log_n = int(math.log2(n)) assert n == 1 << log_n for j in (1 << k for k in range(1, log_n + 1)): for chunk in range(n // j): mod_vals = set(perm[i] % j for i in range(chunk * j, (chunk + 1) * j)) if len(mod_vals) != j: return False return True def modular_balanced_to_butterfly_factor(L: np.ndarray) -> List[np.ndarray]: """Returns a sequence of butterfly factors that, when multiplied together, create L. Assumptions: L is a modular-balanced permutation matrix. Directly follows the proof of Lemma G.1. Optimized for readability, not efficiency. Parameters: L: a modular-balanced permutation matrix, stored in the right-multiplication format. (i.e. applying L to a vector x is equivalent to x @ L). Can also be stored as a vector (again in right-multiplication format). Return: butterflies: a list of butterfly factors, stored as matrices (not in twiddle format). The matrices are permutation matrices stored in right-multiplication format. """ if isinstance(L, list) or len(L.shape) == 1: L = perm_vec_to_mat(L) n = L.shape[0] if n == 2: return [L.copy()] # L is its own inverse, and is already a butterfly. nh = n//2 L1 = L[:nh, :nh] + L[nh:, :nh] L2 = L[:nh, nh:] + L[nh:, nh:] perms = [] Lp = scipy.linalg.block_diag(L1, L2) # By construction, Bn @ Lp = L. Bn = L @ Lp.T perms1 = modular_balanced_to_butterfly_factor(L1) perms2 = modular_balanced_to_butterfly_factor(L2) # Combine the individual permutation matrices of size n/2 # into a block-diagonal permutation matrix of size n. return [Bn] + [scipy.linalg.block_diag(p1, p2) for p1, p2 in zip(perms1, perms2)] def perm2butterfly_slow(v: Union[np.ndarray, torch.Tensor], complex: bool = False, increasing_stride: bool = False) -> Butterfly: """ Convert a permutation to a Butterfly that performs the same permutation. This implementation is slower but follows the proofs in Appendix G more closely. Parameter: v: a permutation, stored as a vector, in left-multiplication format. (i.e., applying v to a vector x is equivalent to x[p]) complex: whether the Butterfly is complex or real. increasing_stride: whether the returned Butterfly should have increasing_stride=False or True. False corresponds to Lemma G.3 and True corresponds to Lemma G.6. Return: b: a Butterfly that performs the same permutation as v. """ if isinstance(v, torch.Tensor): v = v.detach().cpu().numpy() n = len(v) log_n = int(math.ceil(math.log2(n))) if n < 1 << log_n: # Pad permutation to the next power-of-2 size v = np.concatenate([v, np.arange(n, 1 << log_n)]) if increasing_stride: # Follow proof of Lemma G.6 br = bitreversal_permutation(1 << log_n) b = perm2butterfly_slow(br[v[br]], complex=complex, increasing_stride=False) b.increasing_stride=True br_half = bitreversal_permutation((1 << log_n) // 2, pytorch_format=True) with torch.no_grad(): b.twiddle.copy_(b.twiddle[:, :, :, br_half]) b.in_size = b.out_size = n return b # modular_balance expects right-multiplication format so we convert the format of v. Rinv_perms, L_vec = modular_balance(invert(v)) L_perms = list(reversed(modular_balanced_to_butterfly_factor(L_vec))) R_perms = [perm_vec_to_mat(invert(p), left=True) for p in reversed(Rinv_perms)] # Stored in increasing_stride=True twiddle format. # Need to take transpose because the matrices are in right-multiplication format. L_twiddle = torch.stack([matrix_to_butterfly_factor(l.T, log_k=i+1, pytorch_format=True) for i, l in enumerate(L_perms)]) # Stored in increasing_stride=False twiddle format so we need to flip the order R_twiddle = torch.stack([matrix_to_butterfly_factor(r, log_k=i+1, pytorch_format=True) for i, r in enumerate(R_perms)]).flip([0]) twiddle = torch.stack([R_twiddle, L_twiddle]).unsqueeze(0) b = Butterfly(n, n, bias=False, complex=complex, increasing_stride=False, init=twiddle if not complex else real2complex(twiddle), nblocks=2) return b def swap_locations_to_twiddle_factor(n: int, swap_locations: np.ndarray) -> torch.Tensor: twiddle = torch.eye(2).expand(n // 2, 2, 2).contiguous() swap_matrix = torch.tensor([[0, 1], [1, 0]], dtype=torch.float) twiddle[swap_locations] = swap_matrix.unsqueeze(0) return twiddle def outer_twiddle_factors(v: np.ndarray) -> Tuple[torch.Tensor, torch.Tensor, np.ndarray]: """ Decompose the permutations v to get the right/right twiddle factor, and new permutations that only permute elements that are size//2 indices apart. Parameters: v: (batch_size, size), each is a permutation vector of size @size, in left-multiplication format. Return: twiddle_right_factor: (batch_size * size // 2, 2, 2) twiddle_left_factor: (batch_size * size // 2, 2, 2) new_v: (batch_size * 2, size // 2) """ # Convert to right-multiplication format since that's what half_balance expects batch_size, size = v.shape assert size >= 2 v_right = np.vstack([invert(chunk) for chunk in v]) half_balance_results = [half_balance(chunk, return_swap_locations=True) for chunk in v_right] twiddle_right_factor = torch.cat( [swap_locations_to_twiddle_factor(size, swap_low_locs) for swap_low_locs, _ in half_balance_results] ) v_right = np.vstack([v_permuted for _, v_permuted in half_balance_results]) v_left = np.vstack([invert(perm) for perm in v_right]) size_half = size // 2 swap_low_x, swap_low_y = np.nonzero(v_left[:, :size_half] // size_half == 1) swap_low_locs_flat = swap_low_y + swap_low_x * size // 2 twiddle_left_factor = swap_locations_to_twiddle_factor(batch_size * size, swap_low_locs_flat) v_left[swap_low_x, swap_low_y], v_left[swap_low_x, swap_low_y + size_half] = ( v_left[swap_low_x, swap_low_y + size // 2], v_left[swap_low_x, swap_low_y] ) new_v = (v_left % size_half).reshape(batch_size * 2, size // 2) # Check that each new vector is a permutation assert np.allclose(np.sort(new_v), np.arange(size // 2)) return twiddle_right_factor, twiddle_left_factor, new_v def perm2butterfly(v: Union[np.ndarray, torch.Tensor], complex: bool = False, increasing_stride: bool = False) -> Butterfly: """ Parameter: v: a permutation, stored as a vector, in left-multiplication format. (i.e., applying v to a vector x is equivalent to x[p]) complex: whether the Butterfly is complex or real. increasing_stride: whether the returned Butterfly should have increasing_stride=False or True. False corresponds to Lemma G.3 and True corresponds to Lemma G.6. Return: b: a Butterfly that performs the same permutation as v. """ if isinstance(v, torch.Tensor): v = v.detach().cpu().numpy() n = len(v) log_n = int(math.ceil(math.log2(n))) if n < 1 << log_n: # Pad permutation to the next power-of-2 size v = np.concatenate([v, np.arange(n, 1 << log_n)]) if increasing_stride: # Follow proof of Lemma G.6 br = bitreversal_permutation(1 << log_n) b = perm2butterfly(br[v[br]], complex=complex, increasing_stride=False) b.increasing_stride=True br_half = bitreversal_permutation((1 << log_n) // 2, pytorch_format=True) with torch.no_grad(): b.twiddle.copy_(b.twiddle[:, :, :, br_half]) b.in_size = b.out_size = n return b v = v[None] twiddle_right_factors, twiddle_left_factors = [], [] for _ in range(log_n): right_factor, left_factor, v = outer_twiddle_factors(v) twiddle_right_factors.append(right_factor) twiddle_left_factors.append(left_factor) twiddle = torch.stack([torch.stack(twiddle_right_factors), torch.stack(twiddle_left_factors).flip([0])]).unsqueeze(0) b = Butterfly(n, n, bias=False, complex=complex, increasing_stride=False, init=twiddle if not complex else real2complex(twiddle), nblocks=2) return b
butterfly-master
torch_butterfly/permutation.py
import math import numbers import torch from torch import nn import torch.nn.functional as F from torch_butterfly import Butterfly from torch_butterfly.multiply_base4 import butterfly_multiply_base4_torch from torch_butterfly.multiply_base4 import twiddle_base2_to_base4 from torch_butterfly.complex_utils import real_dtype_to_complex from torch_butterfly.complex_utils import complex_matmul class ButterflyBase4(Butterfly): """Product of log N butterfly factors, each is a block 2x2 of diagonal matrices. Compatible with torch.nn.Linear. Parameters: in_size: size of input out_size: size of output bias: If set to False, the layer will not learn an additive bias. Default: ``True`` complex: whether complex or real increasing_stride: whether the first butterfly block will multiply with increasing stride (e.g. 1, 2, ..., n/2) or decreasing stride (e.g., n/2, n/4, ..., 1). init: 'randn', 'ortho', or 'identity'. Whether the weight matrix should be initialized to from randn twiddle, or to be randomly orthogonal/unitary, or to be the identity matrix. nblocks: number of B or B^T blocks. The B and B^T will alternate. """ def __init__(self, *args, **kwargs): init = kwargs.get('init', None) if (isinstance(init, tuple) and len(init) == 2 and isinstance(init[0], torch.Tensor) and isinstance(init[1], torch.Tensor)): twiddle4, twiddle2 = init[0].clone(), init[1].clone() kwargs['init'] = 'empty' super().__init__(*args, **kwargs) else: super().__init__(*args, **kwargs) with torch.no_grad(): twiddle4, twiddle2 = twiddle_base2_to_base4(self.twiddle, self.increasing_stride) del self.twiddle self.twiddle4 = nn.Parameter(twiddle4) self.twiddle2 = nn.Parameter(twiddle2) self.twiddle4._is_structured = True # Flag to avoid weight decay self.twiddle2._is_structured = True # Flag to avoid weight decay def forward(self, input): """ Parameters: input: (batch, *, in_size) Return: output: (batch, *, out_size) """ output = self.pre_process(input) output_size = self.out_size if self.nstacks == 1 else None output = butterfly_multiply_base4_torch(self.twiddle4, self.twiddle2, output, self.increasing_stride, output_size) return self.post_process(input, output) def __imul__(self, scale): """In-place multiply the whole butterfly matrix by some scale factor, by multiplying the twiddle. Scale must be nonnegative """ assert isinstance(scale, numbers.Number) assert scale >= 0 scale_per_entry = scale ** (1.0 / self.nblocks / self.log_n) self.twiddle4 *= scale_per_entry ** 2 self.twiddle2 *= scale_per_entry return self
butterfly-master
torch_butterfly/butterfly_base4.py
from functools import partial import numpy as np import torch def benchmark(fn, nrepeats=7): res = [] for _ in range(nrepeats): start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() fn() end.record() torch.cuda.synchronize() res.append(start.elapsed_time(end)) return np.median(res), np.std(res) def benchmark_fw_bw(fn, inputs, nrepeats=7): fw = partial(fn, *inputs) out = fw() g = torch.randn_like(out).contiguous() # bw = lambda: out.backward(g, retain_graph=True) bw = lambda: torch.autograd.grad(out, inputs, g, retain_graph=True) # fw_bw = lambda: fw().backward(g) fw_bw = lambda: torch.autograd.grad(fw(), inputs, g) return benchmark(fw, nrepeats), benchmark(bw, nrepeats), benchmark(fw_bw, nrepeats)
butterfly-master
torch_butterfly/benchmark_utils.py
import math from functools import reduce import torch from torch import nn from torch.nn import functional as F import torch.fft from torch_butterfly.butterfly import Butterfly, ButterflyUnitary from torch_butterfly.permutation import FixedPermutation, bitreversal_permutation, invert from torch_butterfly.permutation import wavelet_permutation from torch_butterfly.diagonal import Diagonal from torch_butterfly.complex_utils import real2complex, Real2Complex, Complex2Real from torch_butterfly.complex_utils import index_last_dim from torch_butterfly.combine import diagonal_butterfly, TensorProduct, butterfly_product from torch_butterfly.combine import butterfly_kronecker, permutation_kronecker def fft(n, normalized=False, br_first=True, with_br_perm=True) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs the FFT. Parameters: n: size of the FFT. Must be a power of 2. normalized: if True, corresponds to the unitary FFT (i.e. multiplied by 1/sqrt(n)) br_first: which decomposition of FFT. br_first=True corresponds to decimation-in-time. br_first=False corresponds to decimation-in-frequency. with_br_perm: whether to return both the butterfly and the bit reversal permutation. """ log_n = int(math.ceil(math.log2(n))) assert n == 1 << log_n, 'n must be a power of 2' factors = [] for log_size in range(1, log_n + 1): size = 1 << log_size exp = torch.exp(-2j * math.pi * torch.arange(0.0, size // 2) / size) o = torch.ones_like(exp) twiddle_factor = torch.stack((torch.stack((o, exp), dim=-1), torch.stack((o, -exp), dim=-1)), dim=-2) factors.append(twiddle_factor.repeat(n // size, 1, 1)) twiddle = torch.stack(factors, dim=0).unsqueeze(0).unsqueeze(0) if not br_first: # Take conjugate transpose of the BP decomposition of ifft twiddle = twiddle.transpose(-1, -2).flip([2]) # Divide the whole transform by sqrt(n) by dividing each factor by n^(1/2 log_n) = sqrt(2) if normalized: twiddle /= math.sqrt(2) b = Butterfly(n, n, bias=False, complex=True, increasing_stride=br_first, init=twiddle) if with_br_perm: br_perm = FixedPermutation(bitreversal_permutation(n, pytorch_format=True)) return nn.Sequential(br_perm, b) if br_first else nn.Sequential(b, br_perm) else: return b def fft_unitary(n, br_first=True, with_br_perm=True) -> nn.Module: """ Construct an nn.Module based on ButterflyUnitary that exactly performs the FFT. Since it's unitary, it corresponds to normalized=True. Parameters: n: size of the FFT. Must be a power of 2. br_first: which decomposition of FFT. br_first=True corresponds to decimation-in-time. br_first=False corresponds to decimation-in-frequency. with_br_perm: whether to return both the butterfly and the bit reversal permutation. """ log_n = int(math.ceil(math.log2(n))) assert n == 1 << log_n, 'n must be a power of 2' factors = [] for log_size in range(1, log_n + 1): size = 1 << log_size angle = -2 * math.pi * torch.arange(0.0, size // 2) / size phi = torch.ones_like(angle) * math.pi / 4 alpha = angle / 2 + math.pi / 2 psi = -angle / 2 - math.pi / 2 if br_first: chi = angle / 2 - math.pi / 2 else: # Take conjugate transpose of the BP decomposition of ifft, which works out to this, # plus the flip later. chi = -angle / 2 - math.pi / 2 twiddle_factor = torch.stack([phi, alpha, psi, chi], dim=-1) factors.append(twiddle_factor.repeat(n // size, 1)) twiddle = torch.stack(factors, dim=0).unsqueeze(0).unsqueeze(0) if not br_first: twiddle = twiddle.flip([2]) b = ButterflyUnitary(n, n, bias=False, increasing_stride=br_first) with torch.no_grad(): b.twiddle.copy_(twiddle) if with_br_perm: br_perm = FixedPermutation(bitreversal_permutation(n, pytorch_format=True)) return nn.Sequential(br_perm, b) if br_first else nn.Sequential(b, br_perm) else: return b def ifft(n, normalized=False, br_first=True, with_br_perm=True) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs the inverse FFT. Parameters: n: size of the iFFT. Must be a power of 2. normalized: if True, corresponds to unitary iFFT (i.e. multiplied by 1/sqrt(n), not 1/n) br_first: which decomposition of iFFT. True corresponds to decimation-in-frequency. False corresponds to decimation-in-time. with_br_perm: whether to return both the butterfly and the bit reversal permutation. """ log_n = int(math.ceil(math.log2(n))) assert n == 1 << log_n, 'n must be a power of 2' factors = [] for log_size in range(1, log_n + 1): size = 1 << log_size exp = torch.exp(2j * math.pi * torch.arange(0.0, size // 2) / size) o = torch.ones_like(exp) twiddle_factor = torch.stack((torch.stack((o, exp), dim=-1), torch.stack((o, -exp), dim=-1)), dim=-2) factors.append(twiddle_factor.repeat(n // size, 1, 1)) twiddle = torch.stack(factors, dim=0).unsqueeze(0).unsqueeze(0) if not br_first: # Take conjugate transpose of the BP decomposition of fft twiddle = twiddle.transpose(-1, -2).flip([2]) # Divide the whole transform by sqrt(n) by dividing each factor by n^(1/2 log_n) = sqrt(2) if normalized: twiddle /= math.sqrt(2) else: twiddle /= 2 b = Butterfly(n, n, bias=False, complex=True, increasing_stride=br_first, init=twiddle) if with_br_perm: br_perm = FixedPermutation(bitreversal_permutation(n, pytorch_format=True)) return nn.Sequential(br_perm, b) if br_first else nn.Sequential(b, br_perm) else: return b def ifft_unitary(n, br_first=True, with_br_perm=True) -> nn.Module: """ Construct an nn.Module based on ButterflyUnitary that exactly performs the iFFT. Since it's unitary, it corresponds to normalized=True. Parameters: n: size of the iFFT. Must be a power of 2. br_first: which decomposition of iFFT. br_first=True corresponds to decimation-in-time. br_first=False corresponds to decimation-in-frequency. with_br_perm: whether to return both the butterfly and the bit reversal permutation. """ log_n = int(math.ceil(math.log2(n))) assert n == 1 << log_n, 'n must be a power of 2' factors = [] for log_size in range(1, log_n + 1): size = 1 << log_size angle = 2 * math.pi * torch.arange(0.0, size // 2) / size phi = torch.ones_like(angle) * math.pi / 4 alpha = angle / 2 + math.pi / 2 psi = -angle / 2 - math.pi / 2 if br_first: chi = angle / 2 - math.pi / 2 else: # Take conjugate transpose of the BP decomposition of fft, which works out to this, # plus the flip later. chi = -angle / 2 - math.pi / 2 twiddle_factor = torch.stack([phi, alpha, psi, chi], dim=-1) factors.append(twiddle_factor.repeat(n // size, 1)) twiddle = torch.stack(factors, dim=0).unsqueeze(0).unsqueeze(0) if not br_first: twiddle = twiddle.flip([2]) b = ButterflyUnitary(n, n, bias=False, increasing_stride=br_first) with torch.no_grad(): b.twiddle.copy_(twiddle) if with_br_perm: br_perm = FixedPermutation(bitreversal_permutation(n, pytorch_format=True)) return nn.Sequential(br_perm, b) if br_first else nn.Sequential(b, br_perm) else: return b def dct(n: int, type: int = 2, normalized: bool = False) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs the DCT. Parameters: n: size of the DCT. Must be a power of 2. type: either 2, 3, or 4. These are the only types supported. See scipy.fft.dct's notes. normalized: if True, corresponds to the orthogonal DCT (see scipy.fft.dct's notes) """ assert type in [2, 3, 4] # Construct the permutation before the FFT: separate the even and odd and then reverse the odd # e.g., [0, 1, 2, 3] -> [0, 2, 3, 1]. perm = torch.arange(n) perm = torch.cat((perm[::2], perm[1::2].flip([0]))) br = bitreversal_permutation(n, pytorch_format=True) postprocess_diag = 2 * torch.exp(-1j * math.pi * torch.arange(0.0, n) / (2 * n)) if type in [2, 4]: b = fft(n, normalized=normalized, br_first=True, with_br_perm=False) if type == 4: even_mul = torch.exp(-1j * math.pi / (2 * n) * (torch.arange(0.0, n, 2) + 0.5)) odd_mul = torch.exp(1j * math.pi / (2 * n) * (torch.arange(1.0, n, 2) + 0.5)) preprocess_diag = torch.stack((even_mul, odd_mul), dim=-1).flatten() # This proprocess_diag is before the permutation. # To move it after the permutation, we have to permute the diagonal b = diagonal_butterfly(b, preprocess_diag[perm[br]], diag_first=True) if normalized: if type in [2, 3]: postprocess_diag[0] /= 2.0 postprocess_diag[1:] /= math.sqrt(2) elif type == 4: postprocess_diag /= math.sqrt(2) b = diagonal_butterfly(b, postprocess_diag, diag_first=False) return nn.Sequential(FixedPermutation(perm[br]), Real2Complex(), b, Complex2Real()) else: assert type == 3 b = ifft(n, normalized=normalized, br_first=False, with_br_perm=False) postprocess_diag[0] /= 2.0 if normalized: postprocess_diag[1:] /= math.sqrt(2) else: # We want iFFT with the scaling of 1.0 instead of 1 / n with torch.no_grad(): b.twiddle *= 2 b = diagonal_butterfly(b, postprocess_diag.conj(), diag_first=True) perm_inverse = invert(perm) return nn.Sequential(Real2Complex(), b, Complex2Real(), FixedPermutation(br[perm_inverse])) def dst(n: int, type: int = 2, normalized: bool = False) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs the DST. Parameters: n: size of the DST. Must be a power of 2. type: either 2 or 4. These are the only types supported. See scipy.fft.dst's notes. normalized: if True, corresponds to the orthogonal DST (see scipy.fft.dst's notes) """ assert type in [2, 4] b = fft(n, normalized=normalized, br_first=True, with_br_perm=False) # Construct the permutation before the FFT: separate the even and odd and then reverse the odd # e.g., [0, 1, 2, 3] -> [0, 2, 3, 1]. perm = torch.arange(n) perm = torch.cat((perm[::2], perm[1::2].flip([0]))) br = bitreversal_permutation(n, pytorch_format=True) if type == 2: even_mul = torch.exp(-1j * math.pi * torch.arange(0.0, n, 2) / n) odd_mul = -torch.exp(1j * math.pi * (torch.arange(1.0, n, 2) + 1) / n) elif type == 4: even_mul = torch.exp(-1j * math.pi * torch.arange(0.0, n, 2) / (2 * n)) odd_mul = -torch.exp(1j * math.pi * (torch.arange(1.0, n, 2) + 1) / (2 * n)) preprocess_diag = torch.stack((even_mul, odd_mul), dim=-1).flatten() # This proprocess_diag is before the permutation. # To move it after the permutation, we have to permute the diagonal b = diagonal_butterfly(b, preprocess_diag[perm[br]], diag_first=True) if type == 2: postprocess_diag = 2j * torch.exp(-1j * math.pi * (torch.arange(0.0, n) + 1) / (2 * n)) elif type == 4: postprocess_diag = 2j * torch.exp(-1j * math.pi * (torch.arange(0.0, n) + 0.5) / (2 * n)) if normalized: if type == 2: postprocess_diag[0] /= 2.0 postprocess_diag[1:] /= math.sqrt(2) elif type == 4: postprocess_diag /= math.sqrt(2) b = diagonal_butterfly(b, postprocess_diag, diag_first=False) return nn.Sequential(FixedPermutation(perm[br]), Real2Complex(), b, Complex2Real()) def circulant(col, transposed=False, separate_diagonal=True) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs circulant matrix multiplication. Parameters: col: torch.Tensor of size (n, ). The first column of the circulant matrix. transposed: if True, then the circulant matrix is transposed, i.e. col is the first *row* of the matrix. separate_diagonal: if True, the returned nn.Module is Butterfly, Diagonal, Butterfly. if False, the diagonal is combined into the Butterfly part. """ assert col.dim() == 1, 'Vector col must have dimension 1' complex = col.is_complex() n = col.shape[0] log_n = int(math.ceil(math.log2(n))) # For non-power-of-2, maybe there's a way to only pad up to size 1 << log_n? # I've only figured out how to pad to size 1 << (log_n + 1). # e.g., [a, b, c] -> [a, b, c, 0, 0, a, b, c] n_extended = n if n == 1 << log_n else 1 << (log_n + 1) b_fft = fft(n_extended, normalized=True, br_first=False, with_br_perm=False).to(col.device) b_fft.in_size = n b_ifft = ifft(n_extended, normalized=True, br_first=True, with_br_perm=False).to(col.device) b_ifft.out_size = n if n < n_extended: col_0 = F.pad(col, (0, 2 * ((1 << log_n) - n))) col = torch.cat((col_0, col)) if not col.is_complex(): col = real2complex(col) # This fft must have normalized=False for the correct scaling. These are the eigenvalues of the # circulant matrix. col_f = torch.fft.fft(col, norm=None) if transposed: # We could have just transposed the iFFT * Diag * FFT to get FFT * Diag * iFFT. # Instead we use the fact that row is the reverse of col, but the 0-th element stays put. # This corresponds to the same reversal in the frequency domain. # https://en.wikipedia.org/wiki/Discrete_Fourier_transform#Time_and_frequency_reversal col_f = torch.cat((col_f[:1], col_f[1:].flip([0]))) br_perm = bitreversal_permutation(n_extended, pytorch_format=True).to(col.device) # diag = col_f[..., br_perm] diag = index_last_dim(col_f, br_perm) if separate_diagonal: if not complex: return nn.Sequential(Real2Complex(), b_fft, Diagonal(diagonal_init=diag), b_ifft, Complex2Real()) else: return nn.Sequential(b_fft, Diagonal(diagonal_init=diag), b_ifft) else: # Combine the diagonal with the last twiddle factor of b_fft with torch.no_grad(): b_fft = diagonal_butterfly(b_fft, diag, diag_first=False, inplace=True) # Combine the b_fft and b_ifft into one Butterfly (with nblocks=2). b = butterfly_product(b_fft, b_ifft) b.in_size = n b.out_size = n return b if complex else nn.Sequential(Real2Complex(), b, Complex2Real()) def toeplitz(col, row=None, separate_diagonal=True) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs Toeplitz matrix multiplication. Parameters: col: torch.Tensor of size (n, ). The first column of the Toeplitz matrix. row: torch.Tensor of size (n, ). The first row of the Toeplitz matrix. If None, assume row == col.conj(). The first element of row will be ignored. separate_diagonal: if True, the returned nn.Module is Butterfly, Diagonal, Butterfly. if False, the diagonal is combined into the Butterfly part. """ if row is None: row = col.conj() assert col.dim() == 1, 'Vector col must have dimension 1' complex = col.is_complex() n, = col.shape m, = row.shape log_n_m = int(math.ceil(math.log2(n + m - 1))) n_extended = 1 << log_n_m # Extend to a circulant matrix if n + m - 1 < n_extended: col = F.pad(col, (0, n_extended - (n + m - 1))) col = torch.cat([col, row[1:].flip([0])]) b = circulant(col, separate_diagonal=separate_diagonal) # Adjust in_size = m and out_size = n if separate_diagonal: if not complex: b[1].in_size = m b[3].out_size = n else: b[0].in_size = m b[2].out_size = n else: if not complex: b[1].in_size = m b[1].out_size = n else: b.in_size = m b.out_size = n return b def hadamard(n, normalized=False, increasing_stride=True) -> Butterfly: """ Construct an nn.Module based on Butterfly that exactly performs the Hadamard transform. Parameters: n: size of the Hadamard transform. Must be a power of 2. normalized: if True, corresponds to the orthogonal Hadamard transform (i.e. multiplied by 1/sqrt(n)) increasing_stride: whether the returned Butterfly has increasing stride. """ log_n = int(math.ceil(math.log2(n))) assert n == 1 << log_n, 'n must be a power of 2' twiddle = torch.tensor([[1, 1], [1, -1]], dtype=torch.float) # Divide the whole transform by sqrt(n) by dividing each factor by n^(1/2 log_n) = sqrt(2) if normalized: twiddle /= math.sqrt(2) twiddle = twiddle.reshape(1, 1, 1, 1, 2, 2).expand((1, 1, log_n, n // 2, 2, 2)) b = Butterfly(n, n, bias=False, increasing_stride=increasing_stride, init=twiddle) return b def hadamard_diagonal(diagonals: torch.Tensor, normalized: bool = False, increasing_stride: bool = True, separate_diagonal: bool = True) -> nn.Module: """ Construct an nn.Module based on Butterfly that performs multiplication by H D H D ... H D, where H is the Hadamard matrix and D is a diagonal matrix Parameters: diagonals: (k, n), where k is the number of diagonal matrices and n is the dimension of the Hadamard transform. normalized: if True, corresponds to the orthogonal Hadamard transform (i.e. multiplied by 1/sqrt(n)) increasing_stride: whether the returned Butterfly has increasing stride. separate_diagonal: if False, the diagonal is combined into the Butterfly part. """ k, n = diagonals.shape if not separate_diagonal: butterflies = [] for i, diagonal in enumerate(diagonals.unbind()): cur_increasing_stride = increasing_stride != (i % 2 == 1) h = hadamard(n, normalized, cur_increasing_stride) butterflies.append(diagonal_butterfly(h, diagonal, diag_first=True)) return reduce(butterfly_product, butterflies) else: modules = [] for i, diagonal in enumerate(diagonals.unbind()): modules.append(Diagonal(diagonal_init=diagonal)) cur_increasing_stride = increasing_stride != (i % 2 == 1) h = hadamard(n, normalized, cur_increasing_stride) modules.append(h) return nn.Sequential(*modules) def conv1d_circular_singlechannel(n, weight, separate_diagonal=True) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs nn.Conv1d with a single in-channel and single out-channel, with circular padding. The output of nn.Conv1d must have the same size as the input (i.e. kernel size must be 2k + 1, and padding k for some integer k). Parameters: n: size of the input. weight: torch.Tensor of size (1, 1, kernel_size). Kernel_size must be odd, and smaller than n. Padding is assumed to be (kernel_size - 1) // 2. separate_diagonal: if True, the returned nn.Module is Butterfly, Diagonal, Butterfly. if False, the diagonal is combined into the Butterfly part. """ assert weight.dim() == 3, 'Weight must have dimension 3' kernel_size = weight.shape[-1] assert kernel_size < n assert kernel_size % 2 == 1, 'Kernel size must be odd' assert weight.shape[:2] == (1, 1), 'Only support single in-channel and single out-channel' padding = (kernel_size - 1) // 2 col = F.pad(weight.flip([-1]), (0, n - kernel_size)).roll(-padding, dims=-1) return circulant(col.squeeze(1).squeeze(0), separate_diagonal=separate_diagonal) # We write this as an nn.Module just to use nn.Sequential class DiagonalMultiplySum(nn.Module): def __init__(self, diagonal_init): """ Parameters: diagonal_init: (out_channels, in_channels, size) """ super().__init__() self.diagonal = nn.Parameter(diagonal_init.detach().clone()) self.complex = self.diagonal.is_complex() def forward(self, input): """ Parameters: input: (batch, in_channels, size) Return: output: (batch, out_channels, size) """ return (input.unsqueeze(1) * self.diagonal).sum(dim=2) def conv1d_circular_multichannel(n, weight) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs nn.Conv1d with multiple in/out channels, with circular padding. The output of nn.Conv1d must have the same size as the input (i.e. kernel size must be 2k + 1, and padding k for some integer k). Parameters: n: size of the input. weight: torch.Tensor of size (out_channels, in_channels, kernel_size). Kernel_size must be odd, and smaller than n. Padding is assumed to be (kernel_size - 1) // 2. """ assert weight.dim() == 3, 'Weight must have dimension 3' kernel_size = weight.shape[-1] assert kernel_size < n assert kernel_size % 2 == 1, 'Kernel size must be odd' out_channels, in_channels = weight.shape[:2] padding = (kernel_size - 1) // 2 col = F.pad(weight.flip([-1]), (0, n - kernel_size)).roll(-padding, dims=-1) # From here we mimic the circulant construction, but the diagonal multiply is replaced with # multiply and then sum across the in-channels. complex = col.is_complex() log_n = int(math.ceil(math.log2(n))) # For non-power-of-2, maybe there's a way to only pad up to size 1 << log_n? # I've only figured out how to pad to size 1 << (log_n + 1). # e.g., [a, b, c] -> [a, b, c, 0, 0, a, b, c] n_extended = n if n == 1 << log_n else 1 << (log_n + 1) b_fft = fft(n_extended, normalized=True, br_first=False, with_br_perm=False).to(col.device) b_fft.in_size = n b_ifft = ifft(n_extended, normalized=True, br_first=True, with_br_perm=False).to(col.device) b_ifft.out_size = n if n < n_extended: col_0 = F.pad(col, (0, 2 * ((1 << log_n) - n))) col = torch.cat((col_0, col), dim=-1) if not col.is_complex(): col = real2complex(col) # This fft must have normalized=False for the correct scaling. These are the eigenvalues of the # circulant matrix. col_f = torch.fft.fft(col, norm=None) br_perm = bitreversal_permutation(n_extended, pytorch_format=True).to(col.device) # col_f = col_f[..., br_perm] col_f = index_last_dim(col_f, br_perm) # We just want (input_f.unsqueeze(1) * col_f).sum(dim=2). # This can be written as a complex matrix multiply as well. if not complex: return nn.Sequential(Real2Complex(), b_fft, DiagonalMultiplySum(col_f), b_ifft, Complex2Real()) else: return nn.Sequential(b_fft, DiagonalMultiplySum(col_f), b_ifft) def fft2d(n1: int, n2: int, normalized: bool = False, br_first: bool = True, with_br_perm: bool = True, flatten=False) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs the 2D FFT. Parameters: n1: size of the FFT on the last input dimension. Must be a power of 2. n2: size of the FFT on the second to last input dimension. Must be a power of 2. normalized: if True, corresponds to the unitary FFT (i.e. multiplied by 1/sqrt(n)) br_first: which decomposition of FFT. br_first=True corresponds to decimation-in-time. br_first=False corresponds to decimation-in-frequency. with_br_perm: whether to return both the butterfly and the bit reversal permutation. flatten: whether to combine the 2 butterflies into 1 with Kronecker product. """ b_fft1 = fft(n1, normalized=normalized, br_first=br_first, with_br_perm=False) b_fft2 = fft(n2, normalized=normalized, br_first=br_first, with_br_perm=False) b = TensorProduct(b_fft1, b_fft2) if not flatten else butterfly_kronecker(b_fft1, b_fft2) if with_br_perm: br_perm1 = FixedPermutation(bitreversal_permutation(n1, pytorch_format=True)) br_perm2 = FixedPermutation(bitreversal_permutation(n2, pytorch_format=True)) br_perm = (TensorProduct(br_perm1, br_perm2) if not flatten else permutation_kronecker(br_perm1, br_perm2)) if not flatten: return nn.Sequential(br_perm, b) if br_first else nn.Sequential(b, br_perm) else: return (nn.Sequential(nn.Flatten(start_dim=-2), br_perm, b, nn.Unflatten(-1, (n2, n1))) if br_first else nn.Sequential(nn.Flatten(start_dim=-2), b, br_perm, nn.Unflatten(-1, (n2, n1)))) else: return b if not flatten else nn.Sequential(nn.Flatten(start_dim=-2), b, nn.Unflatten(-1, (n2, n1))) def fft2d_unitary(n1: int, n2: int, br_first: bool = True, with_br_perm: bool = True) -> nn.Module: """ Construct an nn.Module based on ButterflyUnitary that exactly performs the 2D FFT. Corresponds to normalized=True. Does not support flatten for now. Parameters: n1: size of the FFT on the last input dimension. Must be a power of 2. n2: size of the FFT on the second to last input dimension. Must be a power of 2. br_first: which decomposition of FFT. br_first=True corresponds to decimation-in-time. br_first=False corresponds to decimation-in-frequency. with_br_perm: whether to return both the butterfly and the bit reversal permutation. """ b_fft1 = fft_unitary(n1, br_first=br_first, with_br_perm=False) b_fft2 = fft_unitary(n2, br_first=br_first, with_br_perm=False) b = TensorProduct(b_fft1, b_fft2) if with_br_perm: br_perm1 = FixedPermutation(bitreversal_permutation(n1, pytorch_format=True)) br_perm2 = FixedPermutation(bitreversal_permutation(n2, pytorch_format=True)) br_perm = TensorProduct(br_perm1, br_perm2) return nn.Sequential(br_perm, b) if br_first else nn.Sequential(b, br_perm) else: return b def ifft2d(n1: int, n2: int, normalized: bool = False, br_first: bool = True, with_br_perm: bool = True, flatten=False) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs the 2D iFFT. Parameters: n1: size of the iFFT on the last input dimension. Must be a power of 2. n2: size of the iFFT on the second to last input dimension. Must be a power of 2. normalized: if True, corresponds to the unitary iFFT (i.e. multiplied by 1/sqrt(n)) br_first: which decomposition of iFFT. True corresponds to decimation-in-frequency. False corresponds to decimation-in-time. with_br_perm: whether to return both the butterfly and the bit reversal permutation. flatten: whether to combine the 2 butterflies into 1 with Kronecker product. """ b_ifft1 = ifft(n1, normalized=normalized, br_first=br_first, with_br_perm=False) b_ifft2 = ifft(n2, normalized=normalized, br_first=br_first, with_br_perm=False) b = TensorProduct(b_ifft1, b_ifft2) if not flatten else butterfly_kronecker(b_ifft1, b_ifft2) if with_br_perm: br_perm1 = FixedPermutation(bitreversal_permutation(n1, pytorch_format=True)) br_perm2 = FixedPermutation(bitreversal_permutation(n2, pytorch_format=True)) br_perm = (TensorProduct(br_perm1, br_perm2) if not flatten else permutation_kronecker(br_perm1, br_perm2)) if not flatten: return nn.Sequential(br_perm, b) if br_first else nn.Sequential(b, br_perm) else: return (nn.Sequential(nn.Flatten(start_dim=-2), br_perm, b, nn.Unflatten(-1, (n2, n1))) if br_first else nn.Sequential(nn.Flatten(start_dim=-2), b, br_perm, nn.Unflatten(-1, (n2, n1)))) else: return b if not flatten else nn.Sequential(nn.Flatten(start_dim=-2), b, nn.Unflatten(-1, (n2, n1))) def ifft2d_unitary(n1: int, n2: int, br_first: bool = True, with_br_perm: bool = True) -> nn.Module: """ Construct an nn.Module based on ButterflyUnitary that exactly performs the 2D iFFT. Corresponds to normalized=True. Does not support flatten for now. Parameters: n1: size of the iFFT on the last input dimension. Must be a power of 2. n2: size of the iFFT on the second to last input dimension. Must be a power of 2. br_first: which decomposition of iFFT. True corresponds to decimation-in-frequency. False corresponds to decimation-in-time. with_br_perm: whether to return both the butterfly and the bit reversal permutation. """ b_ifft1 = ifft_unitary(n1, br_first=br_first, with_br_perm=False) b_ifft2 = ifft_unitary(n2, br_first=br_first, with_br_perm=False) b = TensorProduct(b_ifft1, b_ifft2) if with_br_perm: br_perm1 = FixedPermutation(bitreversal_permutation(n1, pytorch_format=True)) br_perm2 = FixedPermutation(bitreversal_permutation(n2, pytorch_format=True)) br_perm = TensorProduct(br_perm1, br_perm2) return nn.Sequential(br_perm, b) if br_first else nn.Sequential(b, br_perm) else: return b def conv2d_circular_multichannel(n1: int, n2: int, weight: torch.Tensor, flatten: bool=False) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs nn.Conv2d with multiple in/out channels, with circular padding. The output of nn.Conv2d must have the same size as the input (i.e. kernel size must be 2k + 1, and padding k for some integer k). Parameters: n1: size of the last dimension of the input. n2: size of the second to last dimension of the input. weight: torch.Tensor of size (out_channels, in_channels, kernel_size2, kernel_size1). Kernel_size must be odd, and smaller than n1/n2. Padding is assumed to be (kernel_size - 1) // 2. flatten: whether to internally flatten the last 2 dimensions of the input. Only support n1 and n2 being powers of 2. """ assert weight.dim() == 4, 'Weight must have dimension 4' kernel_size2, kernel_size1 = weight.shape[-2], weight.shape[-1] assert kernel_size1 < n1, kernel_size2 < n2 assert kernel_size1 % 2 == 1 and kernel_size2 % 2 == 1, 'Kernel size must be odd' out_channels, in_channels = weight.shape[:2] padding1 = (kernel_size1 - 1) // 2 padding2 = (kernel_size2 - 1) // 2 col = F.pad(weight.flip([-1]), (0, n1 - kernel_size1)).roll(-padding1, dims=-1) col = F.pad(col.flip([-2]), (0, 0, 0, n2 - kernel_size2)).roll(-padding2, dims=-2) # From here we mimic the circulant construction, but the diagonal multiply is replaced with # multiply and then sum across the in-channels. complex = col.is_complex() log_n1 = int(math.ceil(math.log2(n1))) log_n2 = int(math.ceil(math.log2(n2))) if flatten: assert n1 == 1 << log_n1, n2 == 1 << log_n2 # For non-power-of-2, maybe there's a way to only pad up to size 1 << log_n1? # I've only figured out how to pad to size 1 << (log_n1 + 1). # e.g., [a, b, c] -> [a, b, c, 0, 0, a, b, c] n_extended1 = n1 if n1 == 1 << log_n1 else 1 << (log_n1 + 1) n_extended2 = n2 if n2 == 1 << log_n2 else 1 << (log_n2 + 1) b_fft = fft2d(n_extended1, n_extended2, normalized=True, br_first=False, with_br_perm=False, flatten=flatten).to(col.device) if not flatten: b_fft.map1.in_size = n1 b_fft.map2.in_size = n2 else: b_fft = b_fft[1] # Ignore the nn.Flatten and nn.Unflatten b_ifft = ifft2d(n_extended1, n_extended2, normalized=True, br_first=True, with_br_perm=False, flatten=flatten).to(col.device) if not flatten: b_ifft.map1.out_size = n1 b_ifft.map2.out_size = n2 else: b_ifft = b_ifft[1] # Ignore the nn.Flatten and nn.Unflatten if n1 < n_extended1: col_0 = F.pad(col, (0, 2 * ((1 << log_n1) - n1))) col = torch.cat((col_0, col), dim=-1) if n2 < n_extended2: col_0 = F.pad(col, (0, 0, 0, 2 * ((1 << log_n2) - n2))) col = torch.cat((col_0, col), dim=-2) if not col.is_complex(): col = real2complex(col) # This fft must have normalized=False for the correct scaling. These are the eigenvalues of the # circulant matrix. col_f = torch.fft.fftn(col, dim=(-1, -2), norm=None) br_perm1 = bitreversal_permutation(n_extended1, pytorch_format=True).to(col.device) br_perm2 = bitreversal_permutation(n_extended2, pytorch_format=True).to(col.device) # col_f[..., br_perm2, br_perm1] would error "shape mismatch: indexing tensors could not be # broadcast together" # col_f = col_f[..., br_perm2, :][..., br_perm1] col_f = torch.view_as_complex(torch.view_as_real(col_f)[..., br_perm2, :, :][..., br_perm1, :]) if flatten: col_f = col_f.reshape(*col_f.shape[:-2], col_f.shape[-2] * col_f.shape[-1]) # We just want (input_f.unsqueeze(1) * col_f).sum(dim=2). # This can be written as a complex matrix multiply as well. if not complex: if not flatten: return nn.Sequential(Real2Complex(), b_fft, DiagonalMultiplySum(col_f), b_ifft, Complex2Real()) else: return nn.Sequential(Real2Complex(), nn.Flatten(start_dim=-2), b_fft, DiagonalMultiplySum(col_f), b_ifft, nn.Unflatten(-1, (n2, n1)), Complex2Real()) else: if not flatten: return nn.Sequential(b_fft, DiagonalMultiplySum(col_f), b_ifft) else: return nn.Sequential(nn.Flatten(start_dim=-2), b_fft, DiagonalMultiplySum(col_f), b_ifft, nn.Unflatten(-1, (n2, n1))) def fastfood(diag1: torch.Tensor, diag2: torch.Tensor, diag3: torch.Tensor, permutation: torch.Tensor, normalized: bool = False, increasing_stride: bool = True, separate_diagonal: bool = True) -> nn.Module: """ Construct an nn.Module based on Butterfly that performs Fastfood multiplication: x -> Diag3 @ H @ Diag2 @ P @ H @ Diag1, where H is the Hadamard matrix and P is a permutation matrix. Parameters: diag1: (n,), where n is a power of 2. diag2: (n,) diag3: (n,) permutation: (n,) normalized: if True, corresponds to the orthogonal Hadamard transform (i.e. multiplied by 1/sqrt(n)) increasing_stride: whether the first Butterfly in the sequence has increasing stride. separate_diagonal: if False, the diagonal is combined into the Butterfly part. """ n, = diag1.shape assert diag2.shape == diag3.shape == permutation.shape == (n,) h1 = hadamard(n, normalized, increasing_stride) h2 = hadamard(n, normalized, not increasing_stride) if not separate_diagonal: h1 = diagonal_butterfly(h1, diag1, diag_first=True) h2 = diagonal_butterfly(h2, diag2, diag_first=True) h2 = diagonal_butterfly(h2, diag3, diag_first=False) return nn.Sequential(h1, FixedPermutation(permutation), h2) else: return nn.Sequential(Diagonal(diagonal_init=diag1), h1, FixedPermutation(permutation), Diagonal(diagonal_init=diag2), h2, Diagonal(diagonal_init=diag3)) def acdc(diag1: torch.Tensor, diag2: torch.Tensor, dct_first: bool = True, separate_diagonal: bool = True) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs either the multiplication: x -> diag2 @ iDCT @ diag1 @ DCT @ x or x -> diag2 @ DCT @ diag1 @ iDCT @ x. In the paper [1], the math describes the 2nd type while the implementation uses the 1st type. Note that the DCT and iDCT are normalized. [1] Marcin Moczulski, Misha Denil, Jeremy Appleyard, Nando de Freitas. ACDC: A Structured Efficient Linear Layer. http://arxiv.org/abs/1511.05946 Parameters: diag1: (n,), where n is a power of 2. diag2: (n,), where n is a power of 2. dct_first: if True, uses the first type above; otherwise use the second type. separate_diagonal: if False, the diagonal is combined into the Butterfly part. """ n, = diag1.shape assert diag2.shape == (n,) assert n == 1 << int(math.ceil(math.log2(n))), 'n must be a power of 2' # Construct the permutation before the FFT: separate the even and odd and then reverse the odd # e.g., [0, 1, 2, 3] -> [0, 2, 3, 1]. # This permutation is actually in B (not just B^T B or B B^T). This can be checked with # perm2butterfly. perm = torch.arange(n) perm = torch.cat((perm[::2], perm[1::2].flip([0]))) perm_inverse = invert(perm) br = bitreversal_permutation(n, pytorch_format=True) postprocess_diag = 2 * torch.exp(-1j * math.pi * torch.arange(0.0, n) / (2 * n)) # Normalize postprocess_diag[0] /= 2.0 postprocess_diag[1:] /= math.sqrt(2) if dct_first: b_fft = fft(n, normalized=True, br_first=False, with_br_perm=False) b_ifft = ifft(n, normalized=True, br_first=True, with_br_perm=False) b1 = diagonal_butterfly(b_fft, postprocess_diag[br], diag_first=False) b2 = diagonal_butterfly(b_ifft, postprocess_diag.conj()[br], diag_first=True) if not separate_diagonal: b1 = diagonal_butterfly(b_fft, diag1[br], diag_first=False) b2 = diagonal_butterfly(b2, diag2[perm], diag_first=False) return nn.Sequential(FixedPermutation(perm), Real2Complex(), b1, Complex2Real(), Real2Complex(), b2, Complex2Real(), FixedPermutation(perm_inverse)) else: return nn.Sequential(FixedPermutation(perm), Real2Complex(), b1, Complex2Real(), Diagonal(diagonal_init=diag1[br]), Real2Complex(), b2, Complex2Real(), Diagonal(diagonal_init=diag2[perm]), FixedPermutation(perm_inverse)) else: b_fft = fft(n, normalized=True, br_first=True, with_br_perm=False) b_ifft = ifft(n, normalized=True, br_first=False, with_br_perm=False) b1 = diagonal_butterfly(b_ifft, postprocess_diag.conj(), diag_first=True) b2 = diagonal_butterfly(b_fft, postprocess_diag, diag_first=False) if not separate_diagonal: b1 = diagonal_butterfly(b1, diag1[perm][br], diag_first=False) b2 = diagonal_butterfly(b_fft, diag2, diag_first=False) return nn.Sequential(Real2Complex(), b1, Complex2Real(), Real2Complex(), b2, Complex2Real()) else: return nn.Sequential(Real2Complex(), b1, Complex2Real(), Diagonal(diagonal_init=diag1[perm][br]), Real2Complex(), b2, Complex2Real(), Diagonal(diagonal_init=diag2)) def wavelet_haar(n, with_perm=True) -> nn.Module: """ Construct an nn.Module based on Butterfly that exactly performs the multilevel discrete wavelet transform with the Haar wavelet. Parameters: n: size of the discrete wavelet transform. Must be a power of 2. with_perm: whether to return both the butterfly and the wavelet rearrangement permutation. """ log_n = int(math.ceil(math.log2(n))) assert n == 1 << log_n, 'n must be a power of 2' factors = [] for log_size in range(1, log_n + 1): size = 1 << log_size factor = torch.tensor([[1, 1], [1, -1]], dtype=torch.float).reshape(1, 2, 2) / math.sqrt(2) identity = torch.eye(2).reshape(1, 2, 2) num_identity = size // 2 - 1 twiddle_factor = torch.cat((factor, identity.expand(num_identity, 2, 2))) factors.append(twiddle_factor.repeat(n // size, 1, 1)) twiddle = torch.stack(factors, dim=0).unsqueeze(0).unsqueeze(0) b = Butterfly(n, n, bias=False, increasing_stride=True, init=twiddle) if with_perm: perm = FixedPermutation(wavelet_permutation(n, pytorch_format=True)) return nn.Sequential(b, perm) else: return b
butterfly-master
torch_butterfly/special.py
"""My torch implementation of permutations and sinkhorn balancing ops. A torch library of operations and sampling with permutations and their approximation with doubly-stochastic matrices, through Sinkhorn balancing """ import numpy as np from scipy.optimize import linear_sum_assignment from scipy.stats import kendalltau import torch #from torch.distributions import Bernoulli device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def my_sample_gumbel(shape, eps=1e-10): """Samples arbitrary-shaped standard gumbel variables. Args: shape: list of integers eps: float, for numerical stability Returns: A sample of standard Gumbel random variables """ #Sample from Gumbel(0, 1) U = torch.rand(shape, dtype=torch.float, device=device) return -torch.log(eps - torch.log(U + eps)) def simple_sinkhorn(MatrixA, n_iter = 20): #performing simple Sinkhorn iterations. for i in range(n_iter): MatrixA /= MatrixA.sum(dim=1, keepdim=True) MatrixA /= MatrixA.sum(dim=2, keepdim=True) return MatrixA def my_sinkhorn(log_alpha, n_iters = 20): # torch version """Performs incomplete Sinkhorn normalization to log_alpha. By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix with positive entries can be turned into a doubly-stochastic matrix (i.e. its rows and columns add up to one) via the successive row and column normalization. -To ensure positivity, the effective input to sinkhorn has to be exp(log_alpha) (element wise). -However, for stability, sinkhorn works in the log-space. It is only at return time that entries are exponentiated. [1] Sinkhorn, Richard and Knopp, Paul. Concerning nonnegative matrices and doubly stochastic matrices. Pacific Journal of Mathematics, 1967 Args: log_alpha: a 2D tensor of shape [N, N] n_iters: number of sinkhorn iterations (in practice, as little as 20 iterations are needed to achieve decent convergence for N~100) Returns: A 3D tensor of close-to-doubly-stochastic matrices (2D tensors are converted to 3D tensors with batch_size equals to 1) """ n = log_alpha.size()[1] log_alpha = log_alpha.view(-1, n, n) for i in range(n_iters): # torch.logsumexp(input, dim, keepdim, out=None) #Returns the log of summed exponentials of each row of the input tensor in the given dimension dim #log_alpha -= (torch.logsumexp(log_alpha, dim=2, keepdim=True)).view(-1, n, 1) #log_alpha -= (torch.logsumexp(log_alpha, dim=1, keepdim=True)).view(-1, 1, n) #avoid in-place log_alpha = log_alpha - (torch.logsumexp(log_alpha, dim=2, keepdim=True)).view(-1, n, 1) log_alpha = log_alpha - (torch.logsumexp(log_alpha, dim=1, keepdim=True)).view(-1, 1, n) return torch.exp(log_alpha) def my_gumbel_sinkhorn(log_alpha, temp=1.0, n_samples=1, noise_factor=1.0, n_iters=20, squeeze=True): """Random doubly-stochastic matrices via gumbel noise. In the zero-temperature limit sinkhorn(log_alpha/temp) approaches a permutation matrix. Therefore, for low temperatures this method can be seen as an approximate sampling of permutation matrices, where the distribution is parameterized by the matrix log_alpha The deterministic case (noise_factor=0) is also interesting: it can be shown that lim t->0 sinkhorn(log_alpha/t) = M, where M is a permutation matrix, the solution of the matching problem M=arg max_M sum_i,j log_alpha_i,j M_i,j. Therefore, the deterministic limit case of gumbel_sinkhorn can be seen as approximate solving of a matching problem, otherwise solved via the Hungarian algorithm. Warning: the convergence holds true in the limit case n_iters = infty. Unfortunately, in practice n_iter is finite which can lead to numerical instabilities, mostly if temp is very low. Those manifest as pseudo-convergence or some row-columns to fractional entries (e.g. a row having two entries with 0.5, instead of a single 1.0) To minimize those effects, try increasing n_iter for decreased temp. On the other hand, too-low temperature usually lead to high-variance in gradients, so better not choose too low temperatures. Args: log_alpha: 2D tensor (a matrix of shape [N, N]) or 3D tensor (a batch of matrices of shape = [batch_size, N, N]) temp: temperature parameter, a float. n_samples: number of samples noise_factor: scaling factor for the gumbel samples. Mostly to explore different degrees of randomness (and the absence of randomness, with noise_factor=0) n_iters: number of sinkhorn iterations. Should be chosen carefully, in inverse correspondence with temp to avoid numerical instabilities. squeeze: a boolean, if True and there is a single sample, the output will remain being a 3D tensor. Returns: sink: a 4D tensor of [batch_size, n_samples, N, N] i.e. batch_size *n_samples doubly-stochastic matrices. If n_samples = 1 and squeeze = True then the output is 3D. log_alpha_w_noise: a 4D tensor of [batch_size, n_samples, N, N] of noisy samples of log_alpha, divided by the temperature parameter. Ifmy_invert_listperm n_samples = 1 then the output is 3D. """ n = log_alpha.size()[1] log_alpha = log_alpha.view(-1, n, n) batch_size = log_alpha.size()[0] #log_alpha_w_noise = log_alpha[:,None,:,:].expand(batch_size, n_samples, n, n) log_alpha_w_noise = log_alpha.repeat(n_samples, 1, 1) if noise_factor == 0: noise = 0.0 else: noise = my_sample_gumbel([n_samples*batch_size, n, n])*noise_factor log_alpha_w_noise = log_alpha_w_noise + noise log_alpha_w_noise = log_alpha_w_noise / temp my_log_alpha_w_noise = log_alpha_w_noise.clone() sink = my_sinkhorn(my_log_alpha_w_noise) if n_samples > 1 or squeeze is False: sink = sink.view(n_samples, batch_size, n, n) sink = torch.transpose(sink, 1, 0) log_alpha_w_noise = log_alpha_w_noise.view(n_samples, batch_size, n, n) log_alpha_w_noise = torch.transpose(log_alpha_w_noise, 1, 0) return sink, log_alpha_w_noise def my_sample_uniform_and_order(n_lists, n_numbers, prob_inc): """Samples uniform random numbers, return sorted lists and the indices of their original values Returns a 2-D tensor of n_lists lists of n_numbers sorted numbers in the [0,1] interval, each of them having n_numbers elements. Lists are increasing with probability prob_inc. It does so by first sampling uniform random numbers, and then sorting them. Therefore, sorted numbers follow the distribution of the order statistics of a uniform distribution. It also returns the random numbers and the lists of permutations p such p(sorted) = random. Notice that if one ones to build sorted numbers in different intervals, one might just want to re-scaled this canonical form. Args: n_lists: An int,the number of lists to be sorted. n_numbers: An int, the number of elements in the permutation. prob_inc: A float, the probability that a list of numbers will be sorted in increasing order. Returns: ordered: a 2-D float tensor with shape = [n_list, n_numbers] of sorted lists of numbers. random: a 2-D float tensor with shape = [n_list, n_numbers] of uniform random numbers. permutations: a 2-D int tensor with shape = [n_list, n_numbers], row i satisfies ordered[i, permutations[i]) = random[i,:]. """ # sample n_lists samples from Bernoulli with probability of prob_inc my_bern = torch.distributions.Bernoulli(torch.tensor([prob_inc])).sample([n_lists]) sign = -1*((my_bern * 2) -torch.ones([n_lists,1])) sign = sign.type(torch.float32) random =(torch.empty(n_lists, n_numbers).uniform_(0, 1)) random =random.type(torch.float32) # my change #random_with_sign = random * sign #Finds sorted values and indices of the k largest entries for the last dimension. #sorted – controls whether to return the elements in sorted order #ordered, permutations = torch.topk(random_with_sign, k = n_numbers, sorted = True) # my change ordered, permutations = torch.sort(random, descending=True) #my change #ordered = ordered * sign return ordered, random, permutations def my_sample_permutations(n_permutations, n_objects): """Samples a batch permutations from the uniform distribution. Returns a sample of n_permutations permutations of n_objects indices. Permutations are assumed to be represented as lists of integers (see 'listperm2matperm' and 'matperm2listperm' for conversion to alternative matricial representation). It does so by sampling from a continuous distribution and then ranking the elements. By symmetry, the resulting distribution over permutations must be uniform. Args: n_permutations: An int, the number of permutations to sample. n_objects: An int, the number of elements in the permutation. the embedding sources. Returns: A 2D integer tensor with shape [n_permutations, n_objects], where each row is a permutation of range(n_objects) """ random_pre_perm = torch.empty(n_permutations, n_objects).uniform_(0, 1) _, permutations = torch.topk(random_pre_perm, k = n_objects) return permutations def my_permute_batch_split(batch_split, permutations): """Scrambles a batch of objects according to permutations. It takes a 3D tensor [batch_size, n_objects, object_size] and permutes items in axis=1 according to the 2D integer tensor permutations, (with shape [batch_size, n_objects]) a list of permutations expressed as lists. For many dimensional-objects (e.g. images), objects have to be flattened so they will respect the 3D format, i.e. tf.reshape( batch_split, [batch_size, n_objects, -1]) Args: batch_split: 3D tensor with shape = [batch_size, n_objects, object_size] of splitted objects permutations: a 2D integer tensor with shape = [batch_size, n_objects] of permutations, so that permutations[n] is a permutation of range(n_objects) Returns: A 3D tensor perm_batch_split with the same shape as batch_split, so that perm_batch_split[n, j,:] = batch_split[n, perm[n,j],:] """ batch_size= permutations.size()[0] n_objects = permutations.size()[1] permutations = permutations.view(batch_size, n_objects, -1) perm_batch_split = torch.gather(batch_split, 1, permutations) return perm_batch_split def my_listperm2matperm(listperm): """Converts a batch of permutations to its matricial form. Args: listperm: 2D tensor of permutations of shape [batch_size, n_objects] so that listperm[n] is a permutation of range(n_objects). Returns: a 3D tensor of permutations matperm of shape = [batch_size, n_objects, n_objects] so that matperm[n, :, :] is a permutation of the identity matrix, with matperm[n, i, listperm[n,i]] = 1 """ n_objects = listperm.size()[1] eye = torch.eye(n_objects, dtype=torch.int, device=listperm.device)[listperm] # eye= torch.tensor(eye, dtype=torch.int32) return eye def my_matperm2listperm(matperm): """Converts a batch of permutations to its enumeration (list) form. Args: matperm: a 3D tensor of permutations of shape = [batch_size, n_objects, n_objects] so that matperm[n, :, :] is a permutation of the identity matrix. If the input is 2D, it is reshaped to 3D with batch_size = 1. dtype: output_type (int32, int64) Returns: A 2D tensor of permutations listperm, where listperm[n,i] is the index of the only non-zero entry in matperm[n, i, :] """ batch_size = matperm.size()[0] n_objects = matperm.size()[1] matperm = matperm.view(-1, n_objects, n_objects) #argmax is the index location of each maximum value found(argmax) _, argmax = torch.max(matperm, dim=2, keepdim= True) argmax = argmax.view(batch_size, n_objects) return argmax def my_invert_listperm(listperm): """Inverts a batch of permutations. Args: listperm: a 2D integer tensor of permutations listperm of shape = [batch_size, n_objects] so that listperm[n] is a permutation of range(n_objects) Returns: A 2D tensor of permutations listperm, where listperm[n,i] is the index of the only non-zero entry in matperm[n, i, :] """ return my_matperm2listperm(torch.transpose(my_listperm2matperm(listperm), 1, 2)) def my_matching(matrix_batch): """Solves a matching problem for a batch of matrices. This is a wrapper for the scipy.optimize.linear_sum_assignment function. It solves the optimization problem max_P sum_i,j M_i,j P_i,j with P a permutation matrix. Notice the negative sign; the reason, the original function solves a minimization problem Args: matrix_batch: A 3D tensor (a batch of matrices) with shape = [batch_size, N, N]. If 2D, the input is reshaped to 3D with batch_size = 1. Returns: listperms, a 2D integer tensor of permutations with shape [batch_size, N] so that listperms[n, :] is the permutation of range(N) that solves the problem max_P sum_i,j M_i,j P_i,j with M = matrix_batch[n, :, :]. """ def hungarian(x): if x.ndim == 2: x = np.reshape(x, [1, x.shape[0], x.shape[1]]) sol = np.zeros((x.shape[0], x.shape[1]), dtype=np.int32) for i in range(x.shape[0]): sol[i, :] = linear_sum_assignment(-x[i, :])[1].astype(np.int32) return sol listperms = hungarian(matrix_batch.cpu().detach().numpy()) # listperms = torch.from_numpy(listperms) listperms = torch.tensor(listperms, dtype=torch.long) return listperms def my_kendall_tau(batch_perm1, batch_perm2): """Wraps scipy.stats kendalltau function. Args: batch_perm1: A 2D tensor (a batch of matrices) with shape = [batch_size, N] batch_perm2: same as batch_perm1 Returns: A list of Kendall distances between each of the elements of the batch. """ def kendalltau_batch(x, y): if x.ndim == 1: x = np.reshape(x, [1, x.shape[0]]) if y.ndim == 1: y = np.reshape(y, [1, y.shape[0]]) kendall = np.zeros((x.shape[0], 1), dtype=np.float32) for i in range(x.shape[0]): kendall[i, :] = kendalltau(x[i, :], y[i, :])[0] return kendall listkendall = kendalltau_batch(batch_perm1.cpu().numpy(), batch_perm2.cpu().numpy()) listkendall = torch.from_numpy(listkendall) return listkendall
butterfly-master
gumbel-sinkhorn/my_sinkhorn_ops.py
"""Model class for sorting numbers.""" import torch.nn as nn class Features(nn.Module): def __init__(self, latent_dim, output_dim, dropout_prob): """ In the constructor we instantiate two nn.Linear modules and assign them as member variables. This Feature extractor class takes an input and constructs a feature vector. It can be applied independently to all elements of the input sequence in_flattened_vector: input flattened vector latent_dim: number of neurons in latent layer output_dim: dimension of log alpha square matrix """ super().__init__() # net: output of the first neural network that connects numbers to a # 'latent' representation. # activation_fn: ReLU is default hence it is specified here # dropout p – probability of an element to be zeroed self.linear1 = nn.Linear(1, latent_dim) self.relu1 = nn.ReLU() self.d1 = nn.Dropout(p = dropout_prob) # now those latent representation are connected to rows of the matrix # log_alpha. self.linear2 = nn.Linear(latent_dim, output_dim) self.d2 = nn.Dropout(p=dropout_prob) def forward(self, x): """ In the forward function we accept a Variable of input data and we must return a Variable of output data. We can use Modules defined in the constructor as well as arbitrary operators on Variables. x: Tensor of shape (batch_size, 1) """ # activation_fn: ReLU x = self.d1(self.relu1(self.linear1(x))) # no activation function is enabled x = self.d2(self.linear2(x)) return x class Sinkhorn_Net(nn.Module): def __init__(self, latent_dim, output_dim, dropout_prob): super().__init__() self.output_dim = output_dim self.features = Features(latent_dim, output_dim, dropout_prob) def forward(self, x): """ x: Tensor of length (batch, sequence_length) Note that output_dim should correspond to the intended sequence length """ # each number is processed with the same network, so data is reshaped # so that numbers occupy the 'batch' position. x = x.view(-1, 1) x = self.features(x) #reshape to cubic for sinkhorn operation x = x.reshape(-1, self.output_dim, self.output_dim) return x
butterfly-master
gumbel-sinkhorn/my_sorting_model.py
import torch import numpy import torch.nn as nn from torch.autograd import Variable import matplotlib.pyplot as plt import os import argh import my_sorting_model import my_sinkhorn_ops dir_path = os.path.dirname(os.path.realpath(__file__)) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def make_random_batch(batch_size, n_numbers, prob_inc, samples_per_num): train_ordered, train_random, train_hard_perms = my_sinkhorn_ops.my_sample_uniform_and_order(batch_size, n_numbers, prob_inc) train_ordered = train_ordered.to(device) train_random = train_random.to(device) train_hard_perms = train_hard_perms.to(device) # tiled variables, to compare to many permutations train_ordered_tiled = train_ordered.repeat(samples_per_num, 1).unsqueeze(-1) train_random_tiled = train_random.repeat(samples_per_num, 1).unsqueeze(-1) return train_ordered, train_random, train_hard_perms, train_ordered_tiled, train_random_tiled def inv_soft_perms_flattened(soft_perms_inf): n_numbers = soft_perms_inf.size(-1) inv_soft_perms = torch.transpose(soft_perms_inf, 2, 3) inv_soft_perms = torch.transpose(inv_soft_perms, 0, 1) inv_soft_perms_flat = inv_soft_perms.view(-1, n_numbers, n_numbers) return inv_soft_perms_flat def build_l2s_loss(ordered_tiled, random_tiled, soft_perms_inf, n_numbers): """Builds loss tensor with soft permutations, for training.""" '''Am not using htis function explicitly in the training, decided to incorporate it inside the training code itself. Keeping for reference''' print("soft_perms_inf size", soft_perms_inf.size()) inv_soft_perms = torch.transpose(soft_perms_inf, 2, 3) inv_soft_perms = torch.transpose(inv_soft_perms, 0, 1) inv_soft_perms_flat = inv_soft_perms.view(-1, n_numbers, n_numbers) ordered_tiled = ordered_tiled.view(-1, n_numbers, 1) random_tiled = random_tiled.view(-1, n_numbers, 1) # squared l2 loss diff = ordered_tiled - torch.matmul(inv_soft_perms_flat, random_tiled) l2s_diff = torch.mean(torch.mul(diff, diff)) print("l2s_diff", l2s_diff) def train_model(n_numbers = 50, lr = 0.1, temperature = 1.0, batch_size = 500, prob_inc = 1.0, samples_per_num = 5, n_iter_sinkhorn = 20, n_units = 32, noise_factor = 1.0, optimizer = 'adam', keep_prob = 1., num_iters = 500, n_epochs = 500, fixed_data = True): # Create the neural network model = my_sorting_model.Sinkhorn_Net(latent_dim= n_units, output_dim= n_numbers, dropout_prob = 1. - keep_prob) model.to(device) model.train() # count number of parameters n_params = 0 for p in model.parameters(): n_params += numpy.prod(p.size()) print('# of parameters: {}'.format(n_params)) # We use mean square error loss here. criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=lr, eps=1e-8) # Start training (old train_model function) train_ordered, train_random, train_hard_perms, train_ordered_tiled, train_random_tiled = make_random_batch(batch_size, n_numbers, prob_inc, samples_per_num) loss_history = [] epoch_history = [] for epoch in range(n_epochs): if not fixed_data: train_ordered, train_random, train_hard_perms, train_ordered_tiled, train_random_tiled = make_random_batch(batch_size, n_numbers, prob_inc) optimizer.zero_grad() #obtain log alpha log_alpha = model(train_random) #apply the gumbel sinkhorn on log alpha soft_perms_inf, log_alpha_w_noise = my_sinkhorn_ops.my_gumbel_sinkhorn(log_alpha, temperature, samples_per_num, noise_factor, n_iter_sinkhorn, squeeze=False) inv_soft_perms_flat = inv_soft_perms_flattened(soft_perms_inf) loss= criterion(train_ordered_tiled, torch.matmul(inv_soft_perms_flat, train_random_tiled)) loss.backward() optimizer.step() epoch_history.append(epoch) loss_history.append(loss.item()) # Update the progress bar. print("Epoch {0:03d}: l2 loss={1:.4f}".format(epoch + 1, loss_history[-1])) #save the model for evaluation torch.save(model.state_dict(), os.path.join(dir_path, 'trained_model')) print('Training completed') # return loss_history, epoch_history ###### Done training plt.plot(epoch_history, loss_history) plt.xlabel('epochs') plt.ylabel('loss') plt.title('Training Loss') plt.grid(True) plt.savefig("training_loss.png") plt.show() if __name__ == '__main__': _parser = argh.ArghParser() _parser.set_default_command(train_model) _parser.dispatch()
butterfly-master
gumbel-sinkhorn/my_sorting_train.py
import torch import numpy import torch.nn as nn import os import argh import my_sorting_model import my_sinkhorn_ops from my_sorting_train import make_random_batch dir_path = os.path.dirname(os.path.realpath(__file__)) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Test process def test_model( n_numbers = 50, temperature = 1.0, batch_size = 50, prob_inc = 1.0, samples_per_num = 5, n_iter_sinkhorn = 10, n_units = 32, noise_factor = 0.0, keep_prob = 1., scale = 1.0, shift = 0.0 ): #load the trained model model = my_sorting_model.Sinkhorn_Net(latent_dim= n_units, output_dim= n_numbers, dropout_prob = 1. - keep_prob) model.load_state_dict(torch.load(os.path.join(dir_path, 'trained_model'))) model.to(device) model.eval() # generate test set # validation variables test_ordered, test_random, test_hard_perms, test_ordered_tiled, test_random_tiled = make_random_batch(batch_size, n_numbers, prob_inc, samples_per_num) # transform to different out-of-domain interval test_ordered = test_ordered * scale + shift test_random = test_random * scale + shift #obtain log alpha log_alpha = model(test_random) #apply the gumbel sinkhorn on log alpha soft_perms_inf, log_alpha_w_noise = my_sinkhorn_ops.my_gumbel_sinkhorn(log_alpha, temperature, samples_per_num, noise_factor, n_iter_sinkhorn, squeeze=False) l1_loss, l2_loss, prop_wrong, prop_any_wrong, kendall_tau = build_hard_losses(log_alpha_w_noise, test_random_tiled, test_ordered_tiled, test_hard_perms, n_numbers, samples_per_num) print("samples_per_num", samples_per_num) print("l1 loss", l1_loss) print("l2 loss",l2_loss) print("prop_wrong", prop_wrong) print("prop any wrong", prop_any_wrong) print("Kendall's tau", kendall_tau) print('Test completed') def build_hard_losses(log_alpha_w_noise, random_tiled, ordered_tiled, hard_perms, n_numbers, samples_per_num): """Losses based on hard reconstruction. Only for evaluation. Doubly stochastic matrices are rounded with the matching function. """ log_alpha_w_noise_flat = torch.transpose(log_alpha_w_noise, 0, 1) log_alpha_w_noise_flat = log_alpha_w_noise_flat.view(-1, n_numbers, n_numbers) hard_perms_inf = my_sinkhorn_ops.my_matching(log_alpha_w_noise_flat) # matching was done in numpy so convert back to device hard_perms_inf = hard_perms_inf.to(device) inverse_hard_perms_inf = my_sinkhorn_ops.my_invert_listperm(hard_perms_inf) # TODO: what's the point of inverting the above? hard_perms_tiled = hard_perms.repeat(samples_per_num, 1) # import pdb; pdb.set_trace() # The 3D output of permute_batch_split must be squeezed ordered_inf_tiled = my_sinkhorn_ops.my_permute_batch_split(random_tiled, inverse_hard_perms_inf) ordered_inf_tiled = ordered_inf_tiled.view(-1, n_numbers) #my addition ordered_tiled = ordered_tiled.view(-1, n_numbers) l_diff = ordered_tiled - ordered_inf_tiled l1_diff = torch.mean(torch.abs(l_diff)) l2_diff = torch.mean(torch.mul(l_diff, l_diff)) diff_perms = torch.abs(hard_perms_tiled - inverse_hard_perms_inf) diff_perms = diff_perms.type(torch.float32) # import pdb; pdb.set_trace() prop_wrong = torch.mean(torch.sign(diff_perms)) prop_any_wrong = torch.mean(torch.sign(torch.sum(diff_perms, dim = 1))) kendall_tau = torch.mean(my_sinkhorn_ops.my_kendall_tau(hard_perms_tiled, inverse_hard_perms_inf)) return l1_diff, l2_diff, prop_wrong, prop_any_wrong, kendall_tau if __name__ == '__main__': _parser = argh.ArghParser() # _parser.add_commands([run]) # _parser.dispatch() _parser.set_default_command(test_model) _parser.dispatch()
butterfly-master
gumbel-sinkhorn/my_sinkhorn_eval.py
import io import glob import os from shutil import move from os.path import join from os import listdir, rmdir target_folder = './val/' test_folder = './test/' os.mkdir(test_folder) val_dict = {} with open('./val/val_annotations.txt', 'r') as f: for line in f.readlines(): split_line = line.split('\t') val_dict[split_line[0]] = split_line[1] paths = glob.glob('./val/images/*') for path in paths: file = path.split('/')[-1] folder = val_dict[file] if not os.path.exists(target_folder + str(folder)): os.mkdir(target_folder + str(folder)) os.mkdir(target_folder + str(folder) + '/images') if not os.path.exists(test_folder + str(folder)): os.mkdir(test_folder + str(folder)) os.mkdir(test_folder + str(folder) + '/images') for path in paths: file = path.split('/')[-1] folder = val_dict[file] if len(glob.glob(target_folder + str(folder) + '/images/*')) <25: dest = target_folder + str(folder) + '/images/' + str(file) else: dest = test_folder + str(folder) + '/images/' + str(file) move(path, dest) rmdir('./val/images')
butterfly-master
data/tiny-imagenet-200/val_format.py
from pathlib import Path project_root = Path(__file__).parent.absolute() import os import random import math from collections.abc import Sequence from functools import partial import torch import pytorch_lightning as pl from pytorch_lightning.callbacks import Callback from munch import Munch import ray from ray import tune from ray.tune import Trainable, Experiment, SyncConfig, sample_from, grid_search from ray.tune.schedulers import AsyncHyperBandScheduler from ray.tune.integration.wandb import WandbLogger from pl_runner import pl_train from utils import remove_postfix, dictconfig_to_munch, munch_to_dictconfig HYDRA_TUNE_KEYS = ['_grid', '_sample', '_sample_uniform', '_sample_log_uniform'] def munchconfig_to_tune_munchconfig(cfg): """Convert config to one compatible with Ray Tune. Entry as list whose first element is "_grid" is converted to ray.tune.grid_search. "_sample" is converted to ray.tune.sample_from. "_sample_uniform" is converted to ray.tune.sample_from with uniform distribution [min, max). "_sample_log_uniform" is converted to ray.tune.sample_from with uniform distribution exp(uniform(log(min), log(max))) Examples: lr=1e-3 for a specific learning rate lr=[_grid, 1e-3, 1e-4, 1e-5] means grid search over those values lr=[_sample, 1e-3, 1e-4, 1e-5] means randomly sample from those values lr=[_sample_uniform, 1e-4, 3e-4] means randomly sample from those min/max lr=[_sample_log_uniform, 1e-4, 1e-3] means randomly sample from those min/max but distribution is log uniform: exp(uniform(log 1e-4, log 1e-3)) """ def convert_value(v): # The type is omegaconf.listconfig.ListConfig and not list, so we test if it's a Sequence if not (isinstance(v, Sequence) and len(v) > 0 and v[0] in HYDRA_TUNE_KEYS): return v else: if v[0] == '_grid': # grid_search requires list for some reason return grid_search(list(v[1:])) elif v[0] == '_sample': # Python's lambda doesn't capture the object, it only captures the variable name # So we need extra argument to capture the object # https://docs.python.org/3/faq/programming.html#why-do-lambdas-defined-in-a-loop-with-different-values-all-return-the-same-result # https://stackoverflow.com/questions/2295290/what-do-lambda-function-closures-capture # Switching back to not capturing variable since (i) ray 1.0 doesn't like that # (ii) v isn't changing in this scope return sample_from(lambda _: random.choice(v[1:])) elif v[0] == '_sample_uniform': min_, max_ = v[1:] if isinstance(min_, int) and isinstance(max_, int): return sample_from(lambda _: random.randint(min_, max_)) else: return sample_from(lambda _: random.uniform(min_, max_)) elif v[0] == '_sample_log_uniform': min_, max_ = v[1:] return sample_from(lambda _: math.exp(random.uniform(math.log(min_), math.log(max_)))) else: assert False def convert(cfg): return Munch({k: convert(v) if isinstance(v, Munch) else convert_value(v) for k, v in cfg.items()}) return convert(cfg) class TuneReportCheckpointCallback(Callback): # We group train and val reporting into one, otherwise tune thinks there're 2 different epochs. def on_epoch_end(self, trainer, pl_module): results = {remove_postfix(k, '_epoch'): v for k, v in trainer.logged_metrics.items() if (k.startswith('train_') or k.startswith('val_')) and not k.endswith('_step')} results['mean_loss'] = results.get('val_loss', results['train_loss']) if 'val_accuracy' in results: results['mean_accuracy'] = results['val_accuracy'] # Checkpointing should be done *before* reporting # https://docs.ray.io/en/master/tune/api_docs/trainable.html with tune.checkpoint_dir(step=trainer.current_epoch) as checkpoint_dir: trainer.save_checkpoint(os.path.join(checkpoint_dir, f"{type(pl_module).__name__}.ckpt")) tune.report(**results) def on_test_epoch_end(self, trainer, pl_module): results = {remove_postfix(k, '_epoch'): v for k, v in trainer.logged_metrics.items() if k.startswith('test_') and not k.endswith('_step')} tune.report(**results) def pl_train_with_tune(cfg, pl_module_cls, checkpoint_dir=None): cfg = munch_to_dictconfig(Munch(cfg)) checkpoint_path = (None if not checkpoint_dir else os.path.join(checkpoint_dir, f"{pl_module_cls.__name__}.ckpt")) trainer_extra_args = dict( gpus=1 if cfg.gpu else None, progress_bar_refresh_rate=0, resume_from_checkpoint=checkpoint_path, callbacks=[TuneReportCheckpointCallback()] ) pl_train(cfg, pl_module_cls, **trainer_extra_args) def ray_train(cfg, pl_module_cls): # We need Munch to hold tune functions. DictConfig can only hold static config. cfg = munchconfig_to_tune_munchconfig(dictconfig_to_munch(cfg)) ray_config={ 'model': cfg.model, 'dataset': cfg.dataset, 'train': cfg.train, 'seed': cfg.seed, 'wandb': cfg.wandb, 'gpu': cfg.runner.gpu_per_trial != 0.0, } dataset_str = cfg.dataset._target_.split('.')[-1] model_str = cfg.model._target_.split('.')[-1] args_str = '_' # If we're writing to dfs or efs already, no need to sync explicitly # This needs to be a noop function, not just False. If False, ray won't restore failed spot instances sync_to_driver = None if not cfg.runner.nfs else lambda source, target: None experiment = Experiment( name=f'{dataset_str}_{model_str}', run=partial(pl_train_with_tune, pl_module_cls=pl_module_cls), local_dir=cfg.runner.result_dir, num_samples=cfg.runner.ntrials if not cfg.smoke_test else 1, resources_per_trial={'cpu': 1 + cfg.dataset.num_workers, 'gpu': cfg.runner.gpu_per_trial}, # epochs + 1 because calling trainer.test(model) counts as one epoch stop={"training_iteration": 1 if cfg.smoke_test else cfg.train.epochs + 1}, config=ray_config, loggers=[WandbLogger], keep_checkpoints_num=1, # Save disk space, just need 1 for recovery # checkpoint_at_end=True, # checkpoint_freq=1000, # Just to enable recovery with @max_failures max_failures=-1, sync_to_driver=sync_to_driver, # As of Ray 1.0.0, still need this here ) if cfg.smoke_test or cfg.runner.local: ray.init(num_gpus=torch.cuda.device_count()) else: try: ray.init(address='auto') except: try: with open(project_root / 'ray_config/redis_address', 'r') as f: address = f.read().strip() with open(project_root / 'ray_config/redis_password', 'r') as f: password = f.read().strip() ray.init(address=address, _redis_password=password) except: ray.init(num_gpus=torch.cuda.device_count()) import warnings warnings.warn("Running Ray with just one node") if cfg.runner.hyperband: scheduler = AsyncHyperBandScheduler(metric='mean_accuracy', mode='max', max_t=cfg.train.epochs + 1, grace_period=cfg.runner.grace_period) else: scheduler = None trials = ray.tune.run(experiment, scheduler=scheduler, # sync_config=SyncConfig(sync_to_driver=sync_to_driver), raise_on_failed_trial=False, queue_trials=True) return trials
butterfly-master
convolution/ray_runner.py
# Adapted from https://github.com/algrebe/python-tee, ported to Python 3 import os import sys from abc import ABCMeta, abstractmethod class Tee(object): """ duplicates streams to a file. credits : http://stackoverflow.com/q/616645 """ def __init__(self, filename, mode="a", file_filters=None, stream_filters=None): """ writes both to stream and to file. file_filters is a list of callables that processes a string just before being written to the file. stream_filters is a list of callables that processes a string just before being written to the stream. both stream & filefilters must return a string or None. """ self.filename = filename self.mode = mode self.file_filters = file_filters or [] self.stream_filters = stream_filters or [] self.stream = None self.fp = None @abstractmethod def set_stream(self, stream): """ assigns "stream" to some global variable e.g. sys.stdout """ pass @abstractmethod def get_stream(self): """ returns the original stream e.g. sys.stdout """ pass def write(self, message): stream_message = message for f in self.stream_filters: stream_message = f(stream_message) if stream_message is None: break file_message = message for f in self.file_filters: file_message = f(file_message) if file_message is None: break if stream_message is not None: self.stream.write(stream_message) if file_message is not None: self.fp.write(file_message) # Need to flush to mimic unbuffered writing # https://stackoverflow.com/questions/37462011/write-unbuffered-on-python-3 self.fp.flush() def flush(self): # Need to check for None because Ray seems to call flush after closing the stream if self.stream is not None: self.stream.flush() if self.fp is not None: self.fp.flush() os.fsync(self.fp.fileno()) def __enter__(self): self.stream = self.get_stream() self.fp = open(self.filename, self.mode) self.set_stream(self) def __exit__(self, *args): self.close() def __del__(self): self.close() def close(self): if self.stream != None: self.set_stream(self.stream) self.stream = None if self.fp != None: self.fp.close() self.fp = None def isatty(self): return self.stream.isatty() # Need this, otherwise can't work with Ray def fileno(self): return self.stream.fileno() def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.filename) __str__ = __repr__ __unicode__ = __repr__ class StdoutTee(Tee): def set_stream(self, stream): sys.stdout = stream def get_stream(self): return sys.stdout class StderrTee(Tee): def set_stream(self, stream): sys.stderr = stream def get_stream(self): return sys.stderr
butterfly-master
convolution/tee.py
import torch from torch import nn from torch.nn import functional as F class Task: @staticmethod def metrics(outs, y, len_batch=None): return {} @staticmethod def metrics_epoch(outs, y, len_batch=None): return {} class BinaryClassification(Task): @staticmethod def loss(logits, y, len_batch=None): # BCE loss requires squeezing last dimension of logits so it has the same shape as y return F.binary_cross_entropy_with_logits(logits.squeeze(-1), y.float()) @staticmethod def metrics(logits, y, len_batch=None): return {'accuracy': torch.eq(logits.squeeze(-1) >= 0, y).float().mean()} @staticmethod def metrics_epoch(logits, y, len_batch=None): return BinaryClassification.metrics(torch.cat(logits), torch.cat(y), len_batch) class MulticlassClassification(Task): @staticmethod def loss(logits, y, len_batch=None): return F.cross_entropy(logits, y) @staticmethod def metrics(logits, y, len_batch=None): return {'accuracy': torch.eq(torch.argmax(logits, dim=-1), y).float().mean()} @staticmethod def metrics_epoch(logits, y, len_batch=None): return MulticlassClassification.metrics(torch.cat(logits, dim=0), torch.cat(y, dim=0), len_batch) class MSERegression(Task): @staticmethod def loss(outs, y, len_batch=None): if len_batch is None: return F.mse_loss(outs, y) else: # Computes the loss of the first `lens` items in the batches mask = torch.zeros_like(outs, dtype=torch.bool) for i, l in enumerate(len_batch): mask[i, :l, :] = 1 outs_masked = torch.masked_select(outs, mask) y_masked = torch.masked_select(y, mask) return F.mse_loss(outs_masked, y_masked)
butterfly-master
convolution/tasks.py
from pathlib import Path import torch import pytorch_lightning as pl from pytorch_lightning.callbacks import Callback def pl_train(cfg, pl_module_cls, **kwargs): trainer_args = dict( gpus=1, max_epochs=1 if cfg.smoke_test else cfg.train.epochs, checkpoint_callback=False, # Disable checkpointing to save disk space progress_bar_refresh_rate=1, **cfg.train.pltrainer, ) trainer_args.update(kwargs) if cfg.seed is not None: pl.seed_everything(cfg.seed) model = pl_module_cls(cfg.model, cfg.dataset, cfg.train) trainer = pl.Trainer(**trainer_args) trainer.fit(model, model.datamodule) if 'save_checkpoint_path' in cfg.train: path = cfg.train.save_checkpoint_path path = Path(path) path.parent.mkdir(parents=True, exist_ok=True) trainer.save_checkpoint(str(path)) if cfg.train.run_test: trainer.test(model, model.datamodule) return trainer, model
butterfly-master
convolution/pl_runner.py
import torch def LeNetScheduler(optimizer, nepochs, **kwargs): def sched(epoch): if epoch < int(nepochs * 0.5): return 1.0 elif epoch < int(nepochs * 0.75): return 0.5 else: return 0.1 return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda epoch: sched(epoch))
butterfly-master
convolution/lr_schedulers.py
import torch from omegaconf.dictconfig import DictConfig from munch import Munch def remove_postfix(text, postfix): if text.endswith(postfix): return text[:-len(postfix)] return text # pytorch-lightning returns pytorch 0-dim tensor instead of python scalar def to_scalar(x): return x.item() if isinstance(x, torch.Tensor) else x def dictconfig_to_munch(d): """Convert object of type OmegaConf to Munch so Wandb can log properly Support nested dictionary. """ return Munch({k: dictconfig_to_munch(v) if isinstance(v, DictConfig) else v for k, v in d.items()}) def munch_to_dictconfig(m): return DictConfig({k: munch_to_dictconfig(v) if isinstance(v, Munch) else v for k, v in m.items()})
butterfly-master
convolution/utils.py
from pathlib import Path PROJECT_ROOT = Path(__file__).parent.absolute() import os # Add to $PYTHONPATH so that ray workers can see os.environ['PYTHONPATH'] = str(PROJECT_ROOT) + ":" + os.environ.get('PYTHONPATH', '') import torch import pytorch_lightning as pl import hydra from omegaconf import OmegaConf import models import datamodules import tasks from pl_runner import pl_train from tee import StdoutTee, StderrTee class LightningModel(pl.LightningModule): def __init__(self, model_cfg, dataset_cfg, train_cfg): super().__init__() self.save_hyperparameters() self.dataset_cfg = dataset_cfg self.train_cfg = train_cfg self.model_cfg = model_cfg self.datamodule = hydra.utils.instantiate(dataset_cfg, batch_size=train_cfg.batch_size) self.model = hydra.utils.instantiate(model_cfg, num_classes=self.datamodule.num_classes) self.task = hydra.utils.instantiate(self.train_cfg.task) def forward(self, input): return self.model.forward(input) def training_step(self, batch, batch_idx, prefix='train'): batch_x, batch_y = batch out = self.forward(batch_x) loss = self.task.loss(out, batch_y) metrics = self.task.metrics(out, batch_y) metrics = {f'{prefix}_{k}': v for k, v in metrics.items()} self.log(f'{prefix}_loss', loss, on_epoch=True, prog_bar=False) self.log_dict(metrics, on_epoch=True, prog_bar=True) return loss def validation_step(self, batch, batch_idx): return self.training_step(batch, batch_idx, prefix='val') def test_step(self, batch, batch_idx): return self.training_step(batch, batch_idx, prefix='test') def configure_optimizers(self): # Very important that the twiddle factors shouldn't have weight decay structured_params = filter(lambda p: getattr(p, '_is_structured', False), self.model.parameters()) unstructured_params = filter(lambda p: not getattr(p, '_is_structured', False), self.model.parameters()) params_dict = [{'params': structured_params, 'weight_decay': 0.0}, {'params': unstructured_params}] optimizer = hydra.utils.instantiate(self.train_cfg.optimizer, params_dict) if 'lr_scheduler' not in self.train_cfg: return optimizer else: lr_scheduler = hydra.utils.instantiate(self.train_cfg.lr_scheduler, optimizer) return [optimizer], [lr_scheduler] @hydra.main(config_path="cfg", config_name="config.yaml") def main(cfg: OmegaConf): with StdoutTee('train.stdout'), StderrTee('train.stderr'): print(OmegaConf.to_yaml(cfg)) if cfg.runner.name == 'pl': trainer, model = pl_train(cfg, LightningModel) else: assert cfg.runner.name == 'ray', 'Only pl and ray runners are supported' # Shouldn't need to install ray unless doing distributed training from ray_runner import ray_train ray_train(cfg, LightningModel) if __name__ == "__main__": main()
butterfly-master
convolution/train.py
from .cifar import *
butterfly-master
convolution/datamodules/__init__.py
from pathlib import Path current_dir = Path(__file__).parent.absolute() import torch from torch.utils.data import DataLoader, random_split from torchvision import transforms, datasets from pl_bolts.datamodules import CIFAR10DataModule class CIFAR10(CIFAR10DataModule): def __init__(self, data_dir=current_dir, extra_augment=True, **kwargs): super().__init__(data_dir, **kwargs) if extra_augment: augment_list = [ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), ] # By default it only converts to Tensor and normalizes self.train_transforms = transforms.Compose(augment_list + self.default_transforms().transforms) class CIFAR100(CIFAR10): name = 'cifar100' def __init__(self, **kwargs): super().__init__(**kwargs) self.DATASET = datasets.CIFAR100 @property def num_classes(self): return 100 def default_transforms(self): cf100_transforms = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276]) ]) return cf100_transforms
butterfly-master
convolution/datamodules/cifar.py
import torch.nn as nn import torch.nn.functional as F from .lenet import LeNetPadded from .kops import KOP2d from .lops import LOP2d class ButterfLeNet(LeNetPadded): name = 'butterflenet' def __init__(self, num_classes=10, pooling_mode='avg', butterfly=True, **kwargs): nn.Module.__init__(self) in_size = 32 self.butterfly = butterfly cls = KOP2d if butterfly else LOP2d self.conv1 = cls(in_size, 3, 6, 5, **kwargs) self.conv2 = cls(in_size // 2, 6, 16, 5, **kwargs) self.fc1 = nn.Linear(16*8*8, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, num_classes) assert pooling_mode in ['avg', 'max'] self.pool2d = F.avg_pool2d if pooling_mode == 'avg' else F.max_pool2d
butterfly-master
convolution/models/butterflenet.py
'''ResNet in PyTorch. Small variants for CIFAR Reference: [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Deep Residual Learning for Image Recognition. arXiv:1512.03385 ''' import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init __all__ = ['ResNet8', 'ResNet14', 'ResNet20', 'ResNet32', 'ResNet44', 'ResNet56', 'ResNet110', 'ResNet1202'] def _weights_init(m): if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight) class LambdaLayer(nn.Module): def __init__(self, lambd): super(LambdaLayer, self).__init__() self.lambd = lambd def forward(self, x): return self.lambd(x) class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, option='A'): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: if option == 'A': """ For CIFAR10 ResNet paper uses option A. """ self.shortcut = LambdaLayer(lambda x: F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0)) elif option == 'B': self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion * planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(ResNet, self).__init__() self.in_planes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2) self.linear = nn.Linear(64, num_classes) self.apply(_weights_init) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1]*(num_blocks-1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = F.avg_pool2d(out, out.size()[3]) out = out.view(out.size(0), -1) out = self.linear(out) return out class ResNet8(ResNet): name = 'resnet8' def __init__(self, num_classes=10): return super().__init__(BasicBlock, [1, 1, 1], num_classes) class ResNet14(ResNet): name = 'resnet14' def __init__(self, num_classes=10): return super().__init__(BasicBlock, [2, 2, 2], num_classes) class ResNet20(ResNet): name = 'resnet20' def __init__(self, num_classes=10): return super().__init__(BasicBlock, [3, 3, 3], num_classes) class ResNet32(ResNet): name = 'resnet32' def __init__(self, num_classes=10): return super().__init__(BasicBlock, [5, 5, 5], num_classes) class ResNet44(ResNet): name = 'resnet44' def __init__(self, num_classes=10): return super().__init__(BasicBlock, [7, 7, 7], num_classes) class ResNet56(ResNet): name = 'resnet56' def __init__(self, num_classes=10): return super().__init__(BasicBlock, [9, 9, 9], num_classes) class ResNet110(ResNet): name = 'resnet110' def __init__(self, num_classes=10): return super().__init__(BasicBlock, [18, 18, 18], num_classes) class ResNet1202(ResNet): name = 'resnet1202' def __init__(self, num_classes=10): return super().__init__(BasicBlock, [200, 200, 200], num_classes) def test(net): import numpy as np total_params = 0 for x in filter(lambda p: p.requires_grad, net.parameters()): total_params += np.prod(x.data.numpy().shape) print("Total number of params", total_params) print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters())))) if __name__ == "__main__": for net_name in __all__: print(net_name) test(globals()[net_name]()) print()
butterfly-master
convolution/models/resnet_cifar.py
'''Baseline CNN in PyTorch.''' # Adapted from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py import torch.nn as nn import torch.nn.functional as F from .cnn5 import CNN5 from .kops import KOP2d class CNN5Butterfly(CNN5): name = 'cnn5butterfly' def __init__(self, num_channels=32, num_classes=10, **kwargs): nn.Module.__init__(self) self.num_channels = num_channels in_size = 32 self.conv1 = KOP2d(in_size, 3, num_channels, 3, **kwargs) self.bn1 = nn.BatchNorm2d(num_channels) self.conv2 = KOP2d(in_size // 2, num_channels, num_channels * 2, 3, **kwargs) self.bn2 = nn.BatchNorm2d(num_channels * 2) self.conv3 = KOP2d(in_size // 4, num_channels * 2, num_channels * 4, 3, **kwargs) self.bn3 = nn.BatchNorm2d(num_channels * 4) # 2 fully connected layers to transform the output of the convolution layers to the final output self.fc1 = nn.Linear(4 * 4 * num_channels * 4, num_channels * 4) self.fcbn1 = nn.BatchNorm1d(num_channels * 4) self.fc2 = nn.Linear(num_channels * 4, num_classes)
butterfly-master
convolution/models/cnn5_butterfly.py
from .lenet import * from .resnet import * from .resnet_cifar import * from .cnn5 import * from .butterflenet import * from .cnn5_butterfly import *
butterfly-master
convolution/models/__init__.py
'''Baseline CNN in PyTorch.''' # Adapted from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py import torch.nn as nn import torch.nn.functional as F class CNN5(nn.Module): name = 'cnn5' def __init__(self, num_channels=32, num_classes=10): super().__init__() self.num_channels = num_channels self.conv1 = nn.Conv2d(3, num_channels, 3, stride=1, padding=1) self.bn1 = nn.BatchNorm2d(num_channels) self.conv2 = nn.Conv2d(num_channels, num_channels * 2, 3, stride=1, padding=1) self.bn2 = nn.BatchNorm2d(num_channels * 2) self.conv3 = nn.Conv2d(num_channels * 2, num_channels * 4, 3, stride=1, padding=1) self.bn3 = nn.BatchNorm2d(num_channels * 4) # 2 fully connected layers to transform the output of the convolution layers to the final output self.fc1 = nn.Linear(4 * 4 * num_channels * 4, num_channels * 4) self.fcbn1 = nn.BatchNorm1d(num_channels * 4) self.fc2 = nn.Linear(num_channels * 4, num_classes) def forward(self, x): # -> batch_size x 3 x 32 x 32 # we apply the convolution layers, followed by batch normalisation, maxpool and relu x 3 x = self.bn1(self.conv1(x)) # batch_size x num_channels x 32 x 32 x = F.relu(F.max_pool2d(x, 2)) # batch_size x num_channels x 16 x 16 x = self.bn2(self.conv2(x)) # batch_size x num_channels*2 x 16 x 16 x = F.relu(F.max_pool2d(x, 2)) # batch_size x num_channels*2 x 8 x 8 x = self.bn3(self.conv3(x)) # batch_size x num_channels*4 x 8 x 8 x = F.relu(F.max_pool2d(x, 2)) # batch_size x num_channels*4 x 4 x 4 # flatten the output for each image x = x.view(-1, 4*4*self.num_channels*4) # batch_size x 4*4*num_channels*4 # apply 2 fully connected layers x = F.relu(self.fcbn1(self.fc1(x))) # batch_size x self.num_channels*4 x = self.fc2(x) # batch_size x num_classes return x
butterfly-master
convolution/models/cnn5.py
'''LeNet in PyTorch.''' import torch.nn as nn import torch.nn.functional as F class LeNet(nn.Module): name = 'lenet' def __init__(self, num_classes=10): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16*5*5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, num_classes) def forward(self, x): out = F.relu(self.conv1(x), inplace=True) out = F.max_pool2d(out, 2) out = F.relu(self.conv2(out), inplace=True) out = F.max_pool2d(out, 2) out = out.view(out.size(0), -1) out = F.relu(self.fc1(out), inplace=True) out = F.relu(self.fc2(out), inplace=True) out = self.fc3(out) return out class LeNetPadded(nn.Module): name = 'lenetpadded' def __init__(self, num_classes=10, padding_mode='circular', pooling_mode='avg'): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5, padding=2, padding_mode=padding_mode) self.conv2 = nn.Conv2d(6, 16, 5, padding=2, padding_mode=padding_mode) self.fc1 = nn.Linear(16*8*8, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, num_classes) assert pooling_mode in ['avg', 'max'] self.pool2d = F.avg_pool2d if pooling_mode == 'avg' else F.max_pool2d def forward(self, x): out = F.relu(self.conv1(x), inplace=True) out = self.pool2d(out, 2) out = F.relu(self.conv2(out), inplace=True) out = self.pool2d(out, 2) out = out.view(out.size(0), -1) out = F.relu(self.fc1(out), inplace=True) out = F.relu(self.fc2(out), inplace=True) out = self.fc3(out) return out
butterfly-master
convolution/models/lenet.py
'''ResNet in PyTorch. For Pre-activation ResNet, see 'preact_resnet.py'. Reference: [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Deep Residual Learning for Image Recognition. arXiv:1512.03385 ''' import torch import torch.nn as nn import torch.nn.functional as F __all__ = ['ResNet', 'ResNet18', 'ResNet34', 'ResNet50', 'ResNet101', 'ResNet152'] class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d( in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion*planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) out = F.relu(out) return out class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(ResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear(512*block.expansion, num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1]*(num_blocks-1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) out = self.linear(out) return out class ResNet18(ResNet): name = 'resnet18' def __init__(self, num_classes=10): return super().__init__(BasicBlock, [2, 2, 2, 2], num_classes) class ResNet34(ResNet): name = 'resnet34' def __init__(self, num_classes=10): return super().__init__(BasicBlock, [3, 4, 6, 3], num_classes) class ResNet50(ResNet): name = 'resnet50' def __init__(self, num_classes=10): return super().__init__(Bottleneck, [3, 4, 6, 3], num_classes) class ResNet101(ResNet): name = 'resnet101' def __init__(self, num_classes=10): return super().__init__(Bottleneck, [3, 4, 23, 3], num_classes) class ResNet152(ResNet): name = 'resnet101' def __init__(self, num_classes=10): return super().__init__(Bottleneck, [3, 8, 36, 3], num_classes) def test(): net = ResNet18() y = net(torch.randn(1, 3, 32, 32)) print(y.size()) # test()
butterfly-master
convolution/models/resnet.py
import math import torch import torch.nn as nn import torch.nn.functional as F import torch.fft import torch_butterfly from torch_butterfly import Butterfly from torch_butterfly.complex_utils import ComplexLinear from torch_butterfly.complex_utils import Real2Complex, Complex2Real from torch_butterfly.complex_utils import complex_matmul from torch_butterfly.combine import TensorProduct class LOP2d(nn.Module): """Similar to KOP2d, but we use nn.Linear instead of Butterfly. """ def __init__(self, in_size, in_ch, out_ch, kernel_size, complex=True, init='random'): super().__init__() self.in_size = in_size self.in_ch = in_ch self.out_ch = out_ch self.kernel_size = kernel_size self.complex = complex assert init in ['random', 'fft'] if init == 'fft': assert self.complex, 'fft init requires complex=True' self.init = init if isinstance(self.in_size, int): self.in_size = (self.in_size, self.in_size) if isinstance(self.kernel_size, int): self.kernel_size = (self.kernel_size, self.kernel_size) self.padding = (self.kernel_size[0] - 1) // 2, (self.kernel_size[1] - 1) // 2 # Just to use nn.Conv2d's initialization self.weight = nn.Parameter( nn.Conv2d(self.in_ch, self.out_ch, self.kernel_size, padding=self.padding, bias=False).weight.flip([-1, -2]) ) linear_cls = nn.Linear if not complex else ComplexLinear self.Kd, self.K1, self.K2 = [ TensorProduct( linear_cls(self.in_size[-1], self.in_size[-1], bias=False), linear_cls(self.in_size[-2], self.in_size[-2], bias=False) ) for _ in range(3) ] if init == 'fft': eye1 = torch.eye(self.in_size[-1], dtype=torch.complex64) eye2 = torch.eye(self.in_size[-2], dtype=torch.complex64) # These are symmetric so we don't have to take transpose fft_mat1 = torch.fft.fft(eye1, norm='ortho') fft_mat2 = torch.fft.fft(eye2, norm='ortho') ifft_mat1 = torch.fft.ifft(eye1, norm='ortho') ifft_mat2 = torch.fft.ifft(eye2, norm='ortho') with torch.no_grad(): self.Kd.map1.weight.copy_(fft_mat1) self.Kd.map2.weight.copy_(fft_mat2) self.K1.map1.weight.copy_(fft_mat1) self.K1.map2.weight.copy_(fft_mat2) self.K2.map1.weight.copy_(ifft_mat1) self.K2.map2.weight.copy_(ifft_mat2) with torch.no_grad(): self.Kd.map1.weight *= math.sqrt(self.in_size[-1]) self.Kd.map2.weight *= math.sqrt(self.in_size[-2]) self.Kd.map1.weight._is_structured = True self.Kd.map2.weight._is_structured = True self.K1.map1.weight._is_structured = True self.K1.map2.weight._is_structured = True self.K2.map1.weight._is_structured = True self.K2.map2.weight._is_structured = True if complex: self.Kd = nn.Sequential(Real2Complex(), self.Kd) self.K1 = nn.Sequential(Real2Complex(), self.K1) self.K2 = nn.Sequential(self.K2, Complex2Real()) def forward(self, x): w = F.pad(self.weight, (0, self.in_size[-1] - self.kernel_size[-1])).roll(-self.padding[-1], dims=-1) w = F.pad(w, (0, 0, 0, self.in_size[-2] - self.kernel_size[-2])).roll(-self.padding[-2], dims=-2) # (batch, in_ch, h, w) x_f = self.K1(x) # (out_ch, in_ch, h, w) w_f = self.Kd(w) # prod = (x_f.unsqueeze(1) * w_f).sum(dim=2) prod = complex_matmul(x_f.permute(2, 3, 0, 1), w_f.permute(2, 3, 1, 0)).permute(2, 3, 0, 1) out = self.K2(prod) return out
butterfly-master
convolution/models/lops.py
import unittest import torch import torch.nn as nn from kops import KOP2d class KOP2dTest(unittest.TestCase): def setUp(self): self.rtol = 1e-4 self.atol = 1e-5 def test_fft_init(self): batch_size = 10 in_ch, out_ch = 3, 6 for in_size in [(32, 32), (16, 16), (32, 16), (16, 32)]: for nblocks in [1, 2, 3]: for base in [2, 4]: kop = KOP2d(in_size, in_ch, out_ch, 5, init='fft', nblocks=nblocks, base=base) x = torch.randn(batch_size, in_ch, *in_size) conv = nn.Conv2d(in_ch, out_ch, 5, padding=2, padding_mode='circular', bias=False) with torch.no_grad(): conv.weight.copy_(kop.weight.flip([-1, -2])) self.assertTrue(torch.allclose(kop(x), conv(x), self.rtol, self.atol)) if __name__ == "__main__": unittest.main()
butterfly-master
convolution/models/test_kops.py
import math import torch import torch.nn as nn import torch.nn.functional as F import torch_butterfly from torch_butterfly import Butterfly from torch_butterfly.complex_utils import Real2Complex, Complex2Real from torch_butterfly.complex_utils import complex_matmul from torch_butterfly.combine import TensorProduct from torch_butterfly.permutation import bitreversal_permutation class KOP2d(nn.Module): def __init__(self, in_size, in_ch, out_ch, kernel_size, complex=True, init='ortho', nblocks=1, base=2, zero_pad=True): super().__init__() self.in_size = in_size self.in_ch = in_ch self.out_ch = out_ch self.kernel_size = kernel_size self.complex = complex assert init in ['ortho', 'fft'] if init == 'fft': assert self.complex, 'fft init requires complex=True' self.init = init self.nblocks = nblocks assert base in [2, 4] self.base = base self.zero_pad = zero_pad if isinstance(self.in_size, int): self.in_size = (self.in_size, self.in_size) if isinstance(self.kernel_size, int): self.kernel_size = (self.kernel_size, self.kernel_size) self.padding = (self.kernel_size[0] - 1) // 2, (self.kernel_size[1] - 1) // 2 # Just to use nn.Conv2d's initialization self.weight = nn.Parameter( nn.Conv2d(self.in_ch, self.out_ch, self.kernel_size, padding=self.padding, bias=False).weight.flip([-1, -2]) ) increasing_strides = [False, False, True] inits = ['ortho'] * 3 if self.init == 'ortho' else ['fft_no_br', 'fft_no_br', 'ifft_no_br'] self.Kd, self.K1, self.K2 = [ TensorProduct( Butterfly(self.in_size[-1], self.in_size[-1], bias=False, complex=complex, increasing_stride=incstride, init=i, nblocks=nblocks), Butterfly(self.in_size[-2], self.in_size[-2], bias=False, complex=complex, increasing_stride=incstride, init=i, nblocks=nblocks) ) for incstride, i in zip(increasing_strides, inits) ] with torch.no_grad(): self.Kd.map1 *= math.sqrt(self.in_size[-1]) self.Kd.map2 *= math.sqrt(self.in_size[-2]) if self.zero_pad and self.complex: # Instead of zero-padding and calling weight.roll(-self.padding[-1], dims=-1) and # weight.roll(-self.padding[-2], dims=-2), we multiply self.Kd by complex exponential # instead, using the Shift theorem. # https://en.wikipedia.org/wiki/Discrete_Fourier_transform#Shift_theorem with torch.no_grad(): n1, n2 = self.Kd.map1.n, self.Kd.map2.n device = self.Kd.map1.twiddle.device br1 = bitreversal_permutation(n1, pytorch_format=True).to(device) br2 = bitreversal_permutation(n2, pytorch_format=True).to(device) diagonal1 = torch.exp(1j * 2 * math.pi / n1 * self.padding[-1] * torch.arange(n1, device=device))[br1] diagonal2 = torch.exp(1j * 2 * math.pi / n2 * self.padding[-2] * torch.arange(n2, device=device))[br2] # We multiply the 1st block instead of the last block (only the first block is not # the identity if init=fft). This seems to perform a tiny bit better. # If init=ortho, this won't correspond exactly to rolling the weight. self.Kd.map1.twiddle[:, 0, -1, :, 0, :] *= diagonal1[::2].unsqueeze(-1) self.Kd.map1.twiddle[:, 0, -1, :, 1, :] *= diagonal1[1::2].unsqueeze(-1) self.Kd.map2.twiddle[:, 0, -1, :, 0, :] *= diagonal2[::2].unsqueeze(-1) self.Kd.map2.twiddle[:, 0, -1, :, 1, :] *= diagonal2[1::2].unsqueeze(-1) if base == 4: self.Kd.map1, self.Kd.map2 = self.Kd.map1.to_base4(), self.Kd.map2.to_base4() self.K1.map1, self.K1.map2 = self.K1.map1.to_base4(), self.K1.map2.to_base4() self.K2.map1, self.K2.map2 = self.K2.map1.to_base4(), self.K2.map2.to_base4() if complex: self.Kd = nn.Sequential(Real2Complex(), self.Kd) self.K1 = nn.Sequential(Real2Complex(), self.K1) self.K2 = nn.Sequential(self.K2, Complex2Real()) def forward(self, x): # (batch, in_ch, h, w) x_f = self.K1(x) # (out_ch, in_ch, h, w) # w_f = self.Kd(self.weight) * math.sqrt(self.in_size[0] * self.in_size[1]) # w_f = self.Kd(self.weight) w_f = self.Kd(self.weight) # prod = (x_f.unsqueeze(1) * w_f).sum(dim=2) prod = complex_matmul(x_f.permute(2, 3, 0, 1), w_f.permute(2, 3, 1, 0)).permute(2, 3, 0, 1) out = self.K2(prod) return out
butterfly-master
convolution/models/kops.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. from configs import Config ########### running ########### # torchrun --nproc_per_node=8 main.py <config> def eval_yfcc15m_in1k_mocob16(): return Config( output_dir="yfcc15m_in1k_mocob16", eval=True, resume="checkpoint-best.pth", dataset="yfcc15m_tag", metadata="data/yfcc15m/yfcc15m_w_tag.pkl", root="data/yfcc15m", trainable_weight="head-all", batch_size=1024, max_bert_length=32, max_update=5000, weight_decay=0.2, head_weight_decay=1., eval_steps=500, curate=100, min_ratio=0.003, extra_prompt=True, aug_tag=True, nodes=1, ngpus=1, ) def yfcc15m_in1k_mocob16(): return Config( val_task="imagenet", dataset="yfcc15m_tag", metadata="data/yfcc15m/yfcc15m_w_tag.pkl", root="data/yfcc15m", trainable_weight="head-all", batch_size=1024, max_bert_length=32, max_update=5000, weight_decay=0.2, head_weight_decay=1., eval_steps=500, curate=100, min_ratio=0.003, extra_prompt=True, aug_tag=True, nodes=2, ngpus=8, ) def yfcc100m_in1k_mocob16(): return Config( val_task="imagenet", dataset="yfcc100m_tag", metadata="data/yfcc100m/yfcc100m_image_ids.pkl", root="/datasets01/yfcc100m/090517", trainable_weight="head-all", batch_size=1024, max_bert_length=32, max_update=5000, weight_decay=0.2, head_weight_decay=1., eval_steps=500, curate=100, thres=0.7, sublist=True, min_ratio=0.01, extra_prompt=True, aug_tag=True, nodes=2, ngpus=8, )
CiT-main
run_configs.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. import os import inspect from collections import OrderedDict class Config: dataset = "yfcc15m_tag" root = "data/yfcc15m" metadata = "data/yfcc15m/yfcc15m_w_tag.pkl" # data adaptation val_task = "imagenet" max_sample = None thres = 0.55 num_workers = 6 # model # model = "moco-bert" max_bert_length = 32 trainable_weight = "head-all" vision_backbone = "moco" vision_pretrained = "pretrained_models/moco_hf" text_backbone = "bert" text_pretrained = "princeton-nlp/unsup-simcse-bert-base-uncased" output_root = "runs" # training fp16 = True lr = 5e-4 warmup_div = 25 min_lr = 1e-5 weight_decay = 0.2 head_weight_decay = 1. device = "cuda" dist_eval = False accum_iter = 1 eval = False pin_mem = False resume = None clip_grad = None loss = "CiTCLIPLossGrad" curate = 0 # evaluate use_template = True patience = None eval_steps = 500 seed = 0 dist_on_itp = False log_dir = None def __init__(self, **kwargs): for key in kwargs: setattr(self, key, kwargs[key]) if not hasattr(self, "warmup_steps"): self.warmup_steps = int(self.max_update / self.warmup_div) # TODO move this to main? if not hasattr(self, "output_dir"): self.output_dir = inspect.stack()[1][3] self.output_dir = os.path.join(self.output_root, self.output_dir) print("config.output_dir =", self.output_dir) def add_cmd_args(self, cmd_args): for key, value in vars(cmd_args).items(): if not key.startswith("__") and value is not None: setattr(self, key, value) return self def __str__(self): return "\n".join([f"{k}={v}" for k, v in vars(self).items()]) def build_from_sweep_config(sweep_config): sweep_dict = OrderedDict() key_to_short = OrderedDict() key_to_card = OrderedDict() sweep_name = sweep_config.__name__ cards = 1 for key, value in vars(sweep_config).items(): if not key.startswith("__"): sweep_dict[key] = value[0] if isinstance(value, tuple) else value cards *= len(sweep_dict[key]) key_to_card[key] = len(sweep_dict[key]) key_to_short[key] = value[1] if isinstance(value, tuple) else "" all_update_dicts = [] for sweep_idx in range(cards): key_to_idx = OrderedDict() for key in key_to_card: key_to_idx[key] = sweep_idx % key_to_card[key] sweep_idx = sweep_idx // key_to_card[key] update_dict = OrderedDict() for key, idx in key_to_idx.items(): update_dict[key] = sweep_dict[key][idx] update_dict["output_dir"] = "_".join([value+str(update_dict[key]).replace("/", ".") for key, value in key_to_short.items()]) update_dict["output_dir"] = os.path.join(sweep_name, update_dict["output_dir"]) all_update_dicts.append(update_dict) assert len(all_update_dicts) == cards return all_update_dicts
CiT-main
configs.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. import torch from transformers import VisionTextDualEncoderModel class CiTCLIPVisionTextDualEncoderModel(VisionTextDualEncoderModel): '''a hf model wrapper to support forward with either or both image/text. note that HF impl. uses an artificial pooler that most pre-trained models (e.g., ViT) don't have. # LiT directly uses [CLS] token for both vision and language. text: https://github.com/google-research/vision_transformer/blob/16fc24d2734f34b0a7b16212a4386c41fe662cb4/vit_jax/models_lit.py#L62 vision: https://github.com/google-research/vision_transformer/blob/16fc24d2734f34b0a7b16212a4386c41fe662cb4/vit_jax/models_vit.py#L283 configs of LiT: https://github.com/google-research/vision_transformer/blob/16fc24d2734f34b0a7b16212a4386c41fe662cb4/vit_jax/configs/models.py#L319 ''' def forward( self, input_ids=None, pixel_values=None, attention_mask=None, position_ids=None, return_loss=None, token_type_ids=None, output_attentions=None, output_hidden_states=None, return_dict=None, skip_text_projection=False, split=1, **kwargs, ): image_embeds, text_embeds = None, None if pixel_values is not None: if split > 1: # TODO: test if can merge these two branch. vision_outputs = [] for splitted_pixel_values in torch.split(pixel_values, pixel_values.size(0) // split): vision_outputs.append( self.vision_model( pixel_values=splitted_pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )[1] ) image_embeds = torch.cat(vision_outputs, dim=0) else: vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] # pooler_output image_embeds = self.visual_projection(image_embeds) if input_ids is not None: text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # SimCSE uses pooler as tanh as in HF. text_embeds = text_outputs[1] # pooler_output if not skip_text_projection: text_embeds = self.text_projection(text_embeds) # cosine similarity as logits logit_scale = self.logit_scale.exp() return {"text_embeds": text_embeds, "image_embeds": image_embeds, "logit_scale": logit_scale} def build_model(args): import os import hfmodels from transformers import AutoTokenizer os.environ["TOKENIZERS_PARALLELISM"] = "false" print(f"creating model: {args.vision_backbone}-{args.text_backbone}") model = CiTCLIPVisionTextDualEncoderModel.from_vision_text_pretrained( # VisionTextDualEncoderModel args.vision_pretrained, # we dump simclr/moco into HF format. args.text_pretrained, # all text models are in HF. # vision_model= ... your own model is not HF format. projection_dim=args.projection_dim if hasattr(args, "projection_dim") else 512 ) tokenizer = AutoTokenizer.from_pretrained(args.text_pretrained, use_fast=True) return model, tokenizer
CiT-main
models_citclip.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # A script to run multinode training with submitit. # -------------------------------------------------------- # Copyright (c) Meta Platforms, Inc. All Rights Reserved import argparse import os import uuid from pathlib import Path import submitit def parse_args(): parser = argparse.ArgumentParser("Submitit for adaptation") parser.add_argument("sweep", type=str, help="name of a sweep.") parser.add_argument("--ngpus", default=1, type=int, help="Number of gpus to request on each node") parser.add_argument("--nodes", default=1, type=int, help="Number of nodes to request") parser.add_argument("--resume", default=None, type=str, help="resume a checkpoint.") parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job") parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.") parser.add_argument("--partition", default="learnlab", type=str, help="Partition where to submit") parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler") args = parser.parse_args() return args def get_shared_folder() -> Path: user = os.getenv("USER") if Path("/checkpoint/").is_dir(): p = Path(f"/checkpoint/{user}/adaclip") p.mkdir(exist_ok=True) return p raise RuntimeError("No shared folder available") def get_init_file(): # Init file must not exist, but it's parent dir must exist. os.makedirs(str(get_shared_folder()), exist_ok=True) init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init" if init_file.exists(): os.remove(str(init_file)) return init_file class Trainer(object): def __init__(self, args): self.args = args self.args.config.dist_url = get_init_file().as_uri() def __call__(self): self._setup_gpu_args() import main main.main(self.args.config) def checkpoint(self): import os import submitit self.args.config.dist_url = get_init_file().as_uri() checkpoint_file = os.path.join(self.args.config.output_dir, "checkpoint-last.pth") if os.path.exists(checkpoint_file): self.args.config.resume = checkpoint_file print("Requeuing ", self.args) empty_trainer = type(self)(self.args) return submitit.helpers.DelayedSubmission(empty_trainer) def _setup_gpu_args(self): import submitit import os from pathlib import Path job_env = submitit.JobEnvironment() if self.args.ngpus >= 1: # self.args.config.seed += job_env.global_rank # assert 'SLURM_PROCID' in os.environ: self.args.config.local_rank = job_env.local_rank self.args.config.rank = job_env.global_rank self.args.config.world_size = job_env.num_tasks print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") def main(args): if args.job_dir == "": args.job_dir = get_shared_folder() assert args.job_dir != "" args.job_dir = Path(args.job_dir) / "%j" # Note that the folder will depend on the job_id, to easily track experiments executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30) num_gpus_per_node = args.ngpus nodes = args.nodes timeout_min = args.timeout partition = args.partition kwargs = {} kwargs['slurm_constraint'] = 'volta32gb' if args.comment: kwargs['slurm_comment'] = args.comment executor.update_parameters( mem_gb= 160 * num_gpus_per_node, # if "yfcccc12m" not in args.config.output_dir else 120 * num_gpus_per_node, gpus_per_node=num_gpus_per_node, tasks_per_node=num_gpus_per_node, cpus_per_task=7, nodes=nodes, timeout_min=timeout_min, # Below are cluster dependent parameters slurm_partition=partition, slurm_signal_delay_s=120, **kwargs ) executor.update_parameters(name=os.path.basename(args.config.output_dir)) trainer = Trainer(args) job = executor.submit(trainer) print("Submitted job_id:", job.job_id, "@", str(args.job_dir).replace("%j", job.job_id)) def submit(): args = parse_args() import sweeps import run_configs import configs from copy import deepcopy if hasattr(sweeps, args.sweep): print(f"sweeping {args.sweep} in `sweeps.py`") sweep_config = getattr(sweeps, args.sweep) all_update_dicts = configs.build_from_sweep_config(sweep_config) for update_dict in all_update_dicts: _args = deepcopy(args) config = configs.Config(**update_dict) if args.resume is not None: config.resume = args.resume setattr(_args, "config", config) if hasattr(config, "ngpus"): _args.ngpus = config.ngpus if hasattr(config, "nodes"): _args.nodes = config.nodes _args.job_dir = config.output_dir main(_args) elif hasattr(run_configs, args.sweep): print(f"launch {args.sweep} in `run_configs.py`") config = getattr(run_configs, args.sweep)() _args = deepcopy(args) if args.resume is not None: config.resume = args.resume setattr(_args, "config", config) if hasattr(config, "ngpus"): _args.ngpus = config.ngpus if hasattr(config, "nodes"): _args.nodes = config.nodes _args.job_dir = config.output_dir main(_args) if __name__ == "__main__": submit()
CiT-main
submitit_citclip.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # References: # DeiT: https://github.com/facebookresearch/deit # BEiT: https://github.com/microsoft/unilm/tree/master/beit # -------------------------------------------------------- # Copyright (c) Meta Platforms, Inc. All Rights Reserved import math import sys import json import torch import util.misc as misc import util.lr_sched as lr_sched from typing import Iterable from collections import defaultdict def to_device(samples, device, args): inputs = {} for key in samples: if key not in ["image_ids", "captions", "__key__"]: inputs[key] = samples[key].to(device, non_blocking=True) if key == "pixel_values" and inputs[key].dtype == torch.uint8: from main import get_mean_std # inmem data. normalize it. inputs[key] = inputs[key].to(torch.float32).div_(255.) # b, 3, 224, 224 mean, std = get_mean_std(args) mean = torch.as_tensor(mean, device=inputs[key].device)[None, :, None, None] std = torch.as_tensor(std, device=inputs[key].device)[None, :, None, None] inputs[key] = inputs[key].sub_(mean).div_(std) return inputs @torch.no_grad() def evaluate(args, model, val_transform, tokenizer): from clipeval import datasets, eval_zeroshot catalog, all_templates, all_labels = eval_zeroshot.load_metadata("clipeval") if args.val_task is None or args.val_task in ["mt", "imagenet21k", "imagenet1k"]: # infer val_task for multitasking. val_task = "imagenet" else: val_task = args.val_task metrics = {} for d in catalog: # assume multitask on CLIP suite by default and early stop if IN only. if not args.eval and d != val_task: # training only eval on val_task. continue if args.eval and args.val_task not in ["mt", "imagenet21k", "imagenet1k"] and d != val_task: continue val_dataset = datasets.get_downstream_dataset( catalog, d, is_train=False, transform=val_transform) templates = all_templates[d] labels = all_labels[d] if args.val_task not in ["mt", "imagenet21k", "imagenet1k"] and (hasattr(args, "extra_prompt") and args.extra_prompt) and d == "imagenet": # not eval MT in LiT setup. templates.extend(["A photo of a {}", "{}"]) # see LiT page 16. val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=args.batch_size//2, shuffle=False, num_workers=args.num_workers, pin_memory=False, drop_last=False) if not args.use_template: templates = ["{}"] metric = eval_zeroshot.evaluate(d, val_loader, templates, labels, model, tokenizer, args.max_bert_length, False) metrics[d] = metric if args.eval: json_str = json.dumps({"task": d, "acc": metric}) misc.print_json(args.output_dir, json_str) return metrics if len(metrics) > 1 else metrics[val_task] # be compatible for ImageNet only evaluation. def append_dataset(dataset, batch, mask_selector, batch_size): if "pixel_values" in batch: assert batch["pixel_values"].dtype == torch.uint8 if mask_selector.sum().item() == 0: return assert len(dataset[-1]["image_ids"]) <= batch_size if len(dataset[-1]["image_ids"]) == batch_size: dataset.append(defaultdict(list)) batch_len = len(batch["image_ids"]) for key in batch: assert batch_len == len(batch[key]) for ix, selected in enumerate(mask_selector): if selected: dataset[-1][key].append(batch[key][ix]) while len(dataset[-1]["image_ids"]) >= batch_size: last_batch = dataset[-1] new_batch = {} for key in last_batch: value = last_batch[key] if len(value) >= batch_size: last_batch[key] = value[:batch_size] if torch.is_tensor(value[0]): last_batch[key] = torch.stack(last_batch[key]) if len(value) > batch_size: new_batch[key] = value[batch_size:] if new_batch: dataset.append(new_batch) else: return def train_one_epoch(model: torch.nn.Module, model_without_ddp, criterion: torch.nn.Module, tokenizer, data_loader: Iterable, data_loader_val: Iterable, val_transform, best_acc, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, step, loss_scaler, eff_batch_size, max_norm: float = 0, # mixup_fn: Optional[Mixup] = None, log_writer=None, args=None): model.train(True) metric_logger = misc.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 20 accum_iter = args.accum_iter optimizer.zero_grad() # assuming data_loader is either a real dataloader or inmem as a list of batches? for data_iter_step, samples in enumerate(metric_logger.log_every(data_loader, print_freq, header, args.max_update)): if step[0] > args.max_update: break # we use a per iteration (instead of per epoch) lr scheduler if data_iter_step % accum_iter == 0: lr_sched.adjust_step_learning_rate(optimizer, step[0], args.lr, args.min_lr, args.warmup_steps, args.max_update) inputs = to_device(samples, device, args) with torch.cuda.amp.autocast(enabled=args.fp16): outputs = model(**inputs) loss = criterion(**outputs) loss_value = loss.item() if not math.isfinite(loss_value): print("Loss is {}, stopping training".format(loss_value)) sys.exit(1) loss /= accum_iter update_grad = (data_iter_step + 1) % accum_iter == 0 loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=False, update_grad=update_grad) if update_grad: step[0] += 1 optimizer.zero_grad() torch.cuda.synchronize() metric_logger.update(loss=loss_value) min_lr = 10. max_lr = 0. for group in optimizer.param_groups: min_lr = min(min_lr, group["lr"]) max_lr = max(max_lr, group["lr"]) metric_logger.update(lr=max_lr) loss_value_reduce = misc.all_reduce_mean(loss_value) if log_writer is not None: log_writer.add_scalar('lr', max_lr, step[0]) log_writer.add_scalar('loss', loss_value_reduce, step[0]) if step[0] and step[0] % args.eval_steps == 0: metric = evaluate(args, model, val_transform, tokenizer) json_str = json.dumps({"step": step[0], "acc": metric, "seen": eff_batch_size * step[0]}) misc.print_json(args.output_dir, json_str) if log_writer is not None: log_writer.add_scalar('acc', metric, step[0]) if isinstance(data_loader, list) or (hasattr(data_loader, "dataset") and isinstance(data_loader.dataset, torch.utils.data.IterableDataset)): misc.save_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=0, epoch_name="last", best_acc=best_acc[0], step=step[0]) if metric > best_acc[0]: best_acc[0] = metric misc.save_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=step[0], epoch_name="best", best_acc=best_acc[0], step=step[0]) model.train(True) if step[0] and curate_condition(step[0], args): break # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) return {k: meter.global_avg for k, meter in metric_logger.meters.items()} def curate_condition(step, args): if args.curate and step % args.curate == 0: return True else: return False def curate_scheduler(step, args): return args.curate def max_sim(logits, thres): logits, idx = logits.max(dim=-1) return logits > thres, idx ratio = 1.0 thres = None def thres_scheduler(step, args): return args.thres def while_condition(example_ids, step, args): if hasattr(args, "inmem") and args.inmem: return len(example_ids) < curate_scheduler(step, args) or (len(example_ids) == curate_scheduler(step, args) and len(example_ids[-1]["image_ids"]) < args.batch_size) else: return len(example_ids) < (curate_scheduler(step, args) * args.batch_size) @torch.no_grad() def iterative_classcurate(step, device, producer_iter, model, tokenizer, args): model.eval() from clipeval import eval_zeroshot catalog, all_templates, all_labels = eval_zeroshot.load_metadata("clipeval") if args.val_task == "mt": labels = set() for d in catalog: for label in all_labels[d]: if isinstance(label, list): for _label in label: labels.add(_label) else: labels.add(label) labels = list(labels) elif args.val_task == "imagenet21k": labels = set() with open("clipeval/imagenet21k_wordnet_lemmas.txt", "r") as fr: for line in fr: labels.add(line.strip()) labels = list(labels) else: d = args.val_task # infer catalog_subsets labels = all_labels[d] templates = ["{}"] if not (hasattr(args, "templatefilter") and args.templatefilter) else all_templates[args.val_task] # no templates for now. labels_emb = [] with torch.cuda.amp.autocast(): labels_emb, _, _ = eval_zeroshot.build_text_features( templates, labels, model, tokenizer, args.max_bert_length, skip_text_projection=True) labels_emb = labels_emb.t().to(torch.float32) if hasattr(args, "sublist") and args.sublist: example_ids = [] else: example_ids = set() total_example = 0 global thres thres = thres_scheduler(step[0], args) while while_condition(example_ids, step[0], args): samples = next(producer_iter) image_ids = samples["image_ids"] total_example += len(image_ids) if hasattr(args, "skip_step") and step[0] < args.skip_step: mask_selector = torch.ones((len(image_ids),), dtype=torch.bool) else: inputs = to_device(samples, device, args) with torch.cuda.amp.autocast(): text_embeds = model(**inputs, skip_text_projection=False if hasattr(args, "project_emb") else True)["text_embeds"] text_embeds = text_embeds.to(torch.float32) text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True) logits = torch.matmul(text_embeds, labels_emb).cpu() mask_selector, class_idx = max_sim(logits, thres) batch_ratio = float(mask_selector.sum() / len(mask_selector)) if hasattr(args, "min_ratio") and batch_ratio < args.min_ratio: # use topr logic. max_logits, class_idx = logits.max(dim=-1) _, idx = max_logits.topk(dim=-1, k=int(args.min_ratio * logits.size(0))) mask_selector = torch.zeros_like(max_logits, dtype=torch.bool) mask_selector[idx] = True if mask_selector.sum() > 0: assert len(mask_selector.size()) == 1 and len(image_ids) == mask_selector.size(0) filtered_image_ids = [image_ids[_idx] for _idx in range(len(image_ids)) if mask_selector[_idx]] for image_id_field in filtered_image_ids: if hasattr(args, "sublist") and args.sublist: example_ids.append(image_id_field) else: example_ids.add(image_id_field) global ratio ratio = len(example_ids) / total_example misc.print_json(args.output_dir, json.dumps({"step": step[0], "ratio": ratio, "thres": thres})) model.train() return example_ids
CiT-main
engine.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. """ pre-configed sweeps. """ import json class alltask_5k_mr005: batch_size = [1536], "bsz" max_update = [5000], "s" refilter = [100], "refilter" prefilter = [0.45], "" min_ratio = [0.05], "r" sublist = [True], "" val_task = [d for d in json.load(open("clipeval/dataset_catalog.json")).keys()], "" aug_tag = [True], "" nodes = [1], "" ngpus = [1], ""
CiT-main
sweeps.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Meta Platforms, Inc. All Rights Reserved import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed as dist from util import misc class AllGather(torch.autograd.Function): @staticmethod def forward(ctx, tensor): output = [torch.empty_like(tensor) for _ in range(misc.get_world_size())] dist.all_gather(output, tensor) ctx.rank = misc.get_rank() ctx.batch_size = tensor.shape[0] return torch.cat(output, 0) @staticmethod def backward(ctx, grad_output): return ( grad_output[ ctx.batch_size * ctx.rank : ctx.batch_size * (ctx.rank + 1) ], None, ) class CiTCLIPLossGrad(nn.Module): def forward(self, image_embeds, text_embeds, logit_scale): # normalized features image_embeds = F.normalize(image_embeds, dim=-1, p=2) text_embeds = F.normalize(text_embeds, dim=-1, p=2) if misc.get_world_size() > 1: # gather features from all GPUs image_embeds = AllGather.apply(image_embeds) text_embeds = AllGather.apply(text_embeds) # cosine similarity as logits logits_per_image = logit_scale * image_embeds @ text_embeds.t() labels = torch.arange(logits_per_image.size(0), device=image_embeds.device) loss = F.cross_entropy(logits_per_image, labels) return loss class CLIPLossGrad(nn.Module): def forward(self, image_embeds, text_embeds, logit_scale): image_embeds = F.normalize(image_embeds, dim=-1, p=2) text_embeds = F.normalize(text_embeds, dim=-1, p=2) if misc.get_world_size() > 1: # gather features from all GPUs image_embeds = AllGather.apply(image_embeds) text_embeds = AllGather.apply(text_embeds) # cosine similarity as logits logits_per_image = logit_scale * image_embeds @ text_embeds.t() logits_per_text = logit_scale * text_embeds @ image_embeds.t() labels = torch.arange(logits_per_image.size(0), device=image_embeds.device) loss = (F.cross_entropy(logits_per_image, labels) + \ F.cross_entropy(logits_per_text, labels)) / 2. return loss
CiT-main
losses.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # References: # DeiT: https://github.com/facebookresearch/deit # BEiT: https://github.com/microsoft/unilm/tree/master/beit # -------------------------------------------------------- # Copyright (c) Meta Platforms, Inc. All Rights Reserved import argparse import datetime import numpy as np import os import time import json from pathlib import Path import torch import torch.backends.cudnn as cudnn from collections import defaultdict import losses import util.misc as misc from util.misc import NativeScalerWithGradNormCount as NativeScaler from models_citclip import build_model from engine import train_one_epoch, evaluate, iterative_classcurate from weights import freeze_model def get_mean_std(args): if "augreg" in args.vision_backbone or "augreg" in args.vision_pretrained: mean = [0.5, 0.5, 0.5] std = [0.5, 0.5, 0.5] else: mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] return mean, std def get_val_transform(args): """moved from SLIP's eval_zeroshot.py""" import torchvision.transforms as transforms mean, std = get_mean_std(args) print(args.vision_backbone, "val_normalizer", mean, std) return transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), lambda x: x.convert('RGB'), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std) ]) def get_train_transform(args): import torchvision.transforms as transforms trans = [transforms.RandomResizedCrop(224, scale=(0.5, 1.0))] if hasattr(args, "inmem") and args.inmem: # use in-mem training / no dataloader for consumer dataset. from torchvision.transforms.functional import pil_to_tensor trans.append(pil_to_tensor) else: trans.append(transforms.ToTensor()) mean, std = get_mean_std(args) print(args.vision_backbone, "train_normalizer", mean, std) trans.append(transforms.Normalize(mean=mean, std=std)) return transforms.Compose(trans) def build_dataset(args, tokenizer): from clipeval import datasets train_transform = get_train_transform(args) train_task_example_ids = None if hasattr(args, "pcurate") or (args.val_task is not None and args.curate == 0): # no validation for full yfcc15m training (same as SLIP/CLIP). thres = args.pcurate if hasattr(args, "pcurate") else args.thres if args.dataset in ["yfcc15m_tag"]: task_meta = torch.load(f"data/CLIP/{args.dataset}/{args.val_task}_ub_{args.dataset}_simcse{thres}_{args.max_bert_length}.pt") if hasattr(args, "sublist") and args.sublist: train_task_example_ids = task_meta["example_ids"] else: train_task_example_ids = set(task_meta["example_ids"]) print("train_task_example_ids_key", len(train_task_example_ids)) else: task_meta = torch.load(f"data/CLIP/CLIP_eval/{args.val_task}_ub_{args.dataset}_simcse{thres}.pt") if hasattr(args, "sublist") and args.sublist: train_task_example_ids = task_meta["example_ids"] else: train_task_example_ids = set(task_meta["example_ids"]) print("train_task_example_ids", len(train_task_example_ids)) tar_files = None train_dataset = datasets.ImageCaptionDatasetCLIP( args, args.dataset, args.root, args.metadata, train_task_example_ids, train_transform, tokenizer, args.max_bert_length, max_sample=args.max_sample ) return train_dataset, None, train_transform, tar_files def producer_collator(batch_list): result = defaultdict(list) for item in batch_list: for key in item: if key not in ["__key__"]: result[key].append(item[key]) for key in result: if key not in ["image_ids", "__key__", "captions"]: result[key] = torch.stack(result[key]) return result def main(args): misc.init_distributed_mode(args) print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) print("{}".format(args).replace(', ', ',\n')) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + misc.get_rank() torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True model, tokenizer = build_model(args) model = freeze_model(model, args) model.to(device) dataset_train, dataset_val, train_transform, tar_files = build_dataset(args, tokenizer) val_transform = get_val_transform(args) num_tasks = misc.get_world_size() global_rank = misc.get_rank() sampler_train = torch.utils.data.DistributedSampler( dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True ) if not isinstance(dataset_train, torch.utils.data.IterableDataset) else None if args.dist_eval: if len(dataset_val) % num_tasks != 0: print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' 'This will slightly alter validation results as extra duplicate entries are added to achieve ' 'equal num of samples per-process.') sampler_val = torch.utils.data.DistributedSampler( dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias else: sampler_val = None if dataset_val is None else torch.utils.data.SequentialSampler(dataset_val) if global_rank == 0 and args.log_dir is not None and not args.eval: from torch.utils.tensorboard import SummaryWriter os.makedirs(args.log_dir, exist_ok=True) log_writer = SummaryWriter(log_dir=args.log_dir) else: log_writer = None model_without_ddp = model n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) print("Model = %s" % str(model_without_ddp)) print('number of params (M): %.2f' % (n_parameters / 1.e6)) eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() if args.lr is None: # only base_lr is specified args.lr = args.blr * eff_batch_size / 256 print("base lr: %.2e" % (args.lr * 256 / eff_batch_size)) print("actual lr: %.2e" % args.lr) print("accumulate grad iterations: %d" % args.accum_iter) print("effective batch size: %d" % eff_batch_size) if not isinstance(dataset_train, torch.utils.data.IterableDataset): print("len(dataset)", len(dataset_train)) else: print("cannot estimate len of torch.utils.data.IterableDataset.") if args.distributed: find_unused = False model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=find_unused) model_without_ddp = model.module # https://github.com/rwightman/pytorch-image-models/blob/fd360ac951a179474917f4b2d21db8669bf87f68/timm/models/vision_transformer.py#L407 no_weight_decay_list = {'pos_embed', 'cls_token', 'dist_token'} # THIS DOESN'T MATTER YET as we frozen all. head_weight_decay_list = {"visual_projection", "text_projection"} p_wd, p_no_wd = [], [] p_head_wd = [] # only apply 1-dim no decay for now. for n, p in model.named_parameters(): if not p.requires_grad: continue # frozen weights if p.ndim == 1 or n in no_weight_decay_list: p_no_wd.append(p) elif hasattr(args, "no_wd_emb") and isinstance(p, torch.nn.Embedding): p_no_wd.append(p) elif hasattr(args, "no_wd_ln") and isinstance(p, torch.nn.LayerNorm): p_no_wd.append(p) elif hasattr(args, "head_weight_decay") and [True for _part in head_weight_decay_list if _part in n]: p_head_wd.append(p) else: p_wd.append(p) param_groups = [{"params": p_wd, "weight_decay": args.weight_decay}, {"params": p_no_wd, "weight_decay": 0.}] if p_head_wd: param_groups.append({"params": p_head_wd, "weight_decay": args.head_weight_decay}) optimizer = torch.optim.AdamW(param_groups, lr=args.lr, eps=1e-8) loss_scaler = NativeScaler(args.fp16) start_epoch, best_acc, step = 0, [0.], [0] if args.resume: if args.resume.endswith(".pth"): # a pytorch checkpoint for resuming training. if args.resume.startswith("checkpoint"): args.resume = os.path.join(args.output_dir, args.resume) start_epoch, _, best_acc, step = misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) best_acc, step = [best_acc], [step if step is not None else 0] if isinstance(dataset_train, torch.utils.data.IterableDataset): # random from step to avoid dupped train. dataset_train.start_shard_id = step[0] % dataset_train.num_shards print("resuming", args.resume, "from step", step[0], "with best_acc", best_acc[0]) else: print("assuming a huggingface transformer pretrained model (no optimizer states).") from models_citclip import CiTCLIPVisionTextDualEncoderModel metric = evaluate(args, model, val_transform, tokenizer) model = CiTCLIPVisionTextDualEncoderModel.from_pretrained(args.resume) if args.eval: metric = evaluate(args, model, val_transform, tokenizer) json_str = json.dumps({"step": step[0], "acc": metric, "seen": eff_batch_size * step[0]}) print(json_str) exit(0) criterion = getattr(losses, args.loss)().to(device) print("criterion = %s" % str(criterion)) if args.curate is not None and args.curate > 1: curate_batch_size = args.batch_size * 2 dataset_train.with_vision = True if hasattr(args, "inmem") and args.inmem else False data_loader_producer = torch.utils.data.DataLoader( dataset_train, sampler=sampler_train, batch_size=curate_batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, collate_fn=producer_collator, persistent_workers=True ) def producer_fn(epoch): while True: data_loader_producer.sampler.set_epoch(epoch) for batch in data_loader_producer: yield batch epoch += 1 producer_iter = iter(producer_fn(start_epoch)) else: data_loader_train = torch.utils.data.DataLoader( dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, ) data_loader_val = None if dataset_val is None else torch.utils.data.DataLoader( dataset_val, sampler=sampler_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False ) import math if not isinstance(dataset_train, torch.utils.data.IterableDataset) and not args.curate: epochs = math.ceil(args.max_update / (len(dataset_train) // eff_batch_size)) print(f"Start training for {args.max_update} steps / {epochs} epochs") else: epochs = 1000000 # a big number to allow infinity run on iterativedataset. print(f"Start training for {args.max_update} steps on torch.utils.data.IterableDataset or curate dataset, the checkpoint is stateless.") start_time = time.time() for epoch in range(start_epoch, epochs): if step[0] >= args.max_update: break if args.curate is not None and (args.curate > 0 and step[0] % args.curate == 0): curate_cls = iterative_classcurate all_example_ids = curate_cls(step, device, producer_iter, model, tokenizer, args) print(len(all_example_ids), "after curate", args.curate * args.batch_size, "expected") if hasattr(args, "inmem") and args.inmem: data_loader_train = all_example_ids else: if hasattr(args, "sublist") and args.sublist: assert isinstance(all_example_ids, list) all_example_ids = all_example_ids[:args.curate * args.batch_size] else: all_example_ids = set(list(all_example_ids)[:args.curate * args.batch_size]) assert len(all_example_ids) == args.curate * args.batch_size from clipeval import datasets dataset_train = datasets.ImageCaptionDatasetCLIP(args, args.dataset, args.root, args.metadata, all_example_ids, train_transform, tokenizer, args.max_bert_length, max_sample=args.max_sample ) data_loader_train = torch.utils.data.DataLoader( dataset_train, shuffle=True, # just a local sampler. batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, ) if hasattr(data_loader_train, "sampler") and isinstance(data_loader_train.sampler, torch.utils.data.DistributedSampler): data_loader_train.sampler.set_epoch(epoch) train_stats = train_one_epoch( model, model_without_ddp, criterion, tokenizer, data_loader_train, data_loader_val, val_transform, best_acc, optimizer, device, epoch, step, loss_scaler, eff_batch_size, args.clip_grad, log_writer=log_writer, args=args ) if not isinstance(dataset_train, torch.utils.data.IterableDataset): misc.save_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch, epoch_name="last", best_acc=best_acc[0], step=step[0]) else: misc.save_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=0, epoch_name="last", best_acc=best_acc[0], step=step[0]) # if log_writer is not None: # log_writer.finish() args.resume = os.path.join(args.output_dir, "checkpoint-best.pth") if os.path.isfile(args.resume): misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) metric = evaluate(args, model, val_transform, tokenizer) json_str = json.dumps({"step": step[0], "acc": metric, "seen": eff_batch_size * step[0]}) print(json_str) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) def parse_args(): '''see configs.py or sweep.py (we only allow pre-defined config).''' parser = argparse.ArgumentParser(description='CiTCLIP', add_help=False) parser.add_argument('config_name', type=str, help='see configs.py') parser.add_argument('--world_size', default=1, type=int) parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://') parser.add_argument('--resume', default=None, type=str) parser.add_argument('--eval', default=None, action='store_true') cmd_args = parser.parse_args() import run_configs config = getattr(run_configs, cmd_args.config_name)().add_cmd_args(cmd_args) return config if __name__ == '__main__': args = parse_args() if args.output_dir: Path(args.output_dir).mkdir(parents=True, exist_ok=True) main(args)
CiT-main
main.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. """ pre-configed trainable weights. """ pre_projection_weights = ['logit_scale', 'visual_projection.weight', 'text_projection.weight'] # TODO: unify layer selection for all models. pre_vision_trainable_weights = { "moco": { "head": ['moco.head'], "none": [], "all": ["[ALL]"] }, "augreg": { "none": [], "all": ["[ALL]"], }, "swag": { "none": [], "all": ["[ALL]"], } } pre_text_trainable_weights = { "bert": { "pool": ['pooler.dense.weight', 'pooler.dense.bias'], "all": ["[ALL]"] }, } def _freeze_model(model, trainable_weights): '''we assume pretrained model has unknown freezing status. all model must pass through this function. (e.g.,, MoCo teacher is freezed after pretraining. [ALL] indicates fully trainable. ''' for name, parameter in model.named_parameters(): for param in trainable_weights: if name.startswith(param) or param == "[ALL]": parameter.requires_grad = True break else: parameter.requires_grad = False trainable_parameters = [] for name, parameter in model.named_parameters(): if parameter.requires_grad: trainable_parameters.append(name) print(f"{model.__class__.__name__} trainable weights:", trainable_parameters) def freeze_model(model, args): assert "-" in args.trainable_weight, "trainable_weight needs format <vision_weight_config>-<text_weight_config>." vision_config, text_config = args.trainable_weight.split("-") vision_trainable_weights = pre_vision_trainable_weights[args.vision_backbone][vision_config] text_trainable_weights = pre_text_trainable_weights[args.text_backbone][text_config] _freeze_model(model, pre_projection_weights) _freeze_model(model.vision_model, vision_trainable_weights) _freeze_model(model.text_model, text_trainable_weights) return model
CiT-main
weights.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # References: # DeiT: https://github.com/facebookresearch/deit # BEiT: https://github.com/microsoft/unilm/tree/master/beit # -------------------------------------------------------- # Copyright (c) Meta Platforms, Inc. All Rights Reserved import builtins import datetime import os import time from collections import defaultdict, deque from pathlib import Path import torch import torch.distributed as dist from torch._six import inf class SmoothedValue(object): """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return self.total / self.count @property def max(self): return max(self.deque) @property def value(self): return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value) class MetricLogger(object): def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if v is None: continue if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format( type(self).__name__, attr)) def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append( "{}: {}".format(name, str(meter)) ) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None, max_update=None): i = 0 if not header: header = '' start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}') if hasattr(iterable, "dataset") and isinstance(iterable.dataset, torch.utils.data.IterableDataset): len_iter = max_update else: len_iter = len(iterable) space_fmt = ':' + str(len(str(len_iter))) + 'd' log_msg = [ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}' ] if torch.cuda.is_available(): log_msg.append('max mem: {memory:.0f}') log_msg = self.delimiter.join(log_msg) MB = 1024.0 * 1024.0 for obj in iterable: data_time.update(time.time() - end) yield obj iter_time.update(time.time() - end) if i % print_freq == 0 or i == len_iter - 1: eta_seconds = iter_time.global_avg * (len_iter - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print(log_msg.format( i, len_iter, eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=torch.cuda.max_memory_allocated() / MB)) else: print(log_msg.format( i, len_iter, eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time))) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('{} Total time: {} ({:.4f} s / it)'.format( header, total_time_str, total_time / len_iter)) def setup_for_distributed(is_master): """ This function disables printing when not in master process """ builtin_print = builtins.print def print(*args, **kwargs): force = kwargs.pop('force', False) force = force or (get_world_size() > 8) if is_master or force: now = datetime.datetime.now() builtin_print('[{}] '.format(now), end='') # print with time stamp builtin_print(*args, **kwargs) builtins.print = print def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs) def init_distributed_mode(args): if args.dist_on_itp: args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) os.environ['LOCAL_RANK'] = str(args.gpu) os.environ['RANK'] = str(args.rank) os.environ['WORLD_SIZE'] = str(args.world_size) # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) elif 'SLURM_PROCID' in os.environ: args.rank = int(os.environ['SLURM_PROCID']) args.gpu = args.rank % torch.cuda.device_count() else: print('Not using distributed mode') setup_for_distributed(is_master=True) # hack args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}, gpu {}'.format( args.rank, args.dist_url, args.gpu), flush=True) torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.distributed.barrier() setup_for_distributed(args.rank == 0) class NativeScalerWithGradNormCount: state_dict_key = "amp_scaler" def __init__(self, fp16=True): self._scaler = torch.cuda.amp.GradScaler(enabled=fp16) def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): self._scaler.scale(loss).backward(create_graph=create_graph) if update_grad: if clip_grad is not None: assert parameters is not None self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) else: self._scaler.unscale_(optimizer) norm = get_grad_norm_(parameters) self._scaler.step(optimizer) self._scaler.update() else: norm = None return norm def state_dict(self): return self._scaler.state_dict() def load_state_dict(self, state_dict): self._scaler.load_state_dict(state_dict) def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = [p for p in parameters if p.grad is not None] norm_type = float(norm_type) if len(parameters) == 0: return torch.tensor(0.) device = parameters[0].grad.device if norm_type == inf: total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) else: total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) return total_norm def save_model(args, epoch, epoch_name, model, model_without_ddp, optimizer, loss_scaler, best_val_loss=None, best_acc=None, step=None): output_dir = Path(args.output_dir) if loss_scaler is not None: checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)] for checkpoint_path in checkpoint_paths: to_save = { 'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args, 'best_val_loss': best_val_loss, 'best_acc': best_acc, 'step': step, } save_on_master(to_save, checkpoint_path) else: client_state = {'epoch': epoch, 'best_val_loss': best_val_loss, 'best_acc': best_acc, 'step': step} model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state) def load_model(args, model_without_ddp, optimizer, loss_scaler): if args.resume: start_epoch, best_val_loss = None, None if args.resume.startswith('https'): checkpoint = torch.hub.load_state_dict_from_url( args.resume, map_location='cpu', check_hash=True) else: checkpoint = torch.load(args.resume, map_location='cpu') model_without_ddp.load_state_dict(checkpoint['model']) print("Resume checkpoint %s" % args.resume) if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval): optimizer.load_state_dict(checkpoint['optimizer']) start_epoch = checkpoint['epoch'] + 1 if 'scaler' in checkpoint: loss_scaler.load_state_dict(checkpoint['scaler']) print("With optim & sched!") best_val_loss = checkpoint['best_val_loss'] if 'best_val_loss' in checkpoint else None best_acc = checkpoint['best_acc'] if 'best_acc' in checkpoint else 0. if isinstance(best_acc, list): # TODO: be backward compatible; remove this line before release; best_acc = best_acc[0] step = checkpoint['step'] if 'step' in checkpoint else 0 return start_epoch, best_val_loss, best_acc, step def all_reduce_mean(x): world_size = get_world_size() if world_size > 1: x_reduce = torch.tensor(x).cuda() dist.all_reduce(x_reduce) x_reduce /= world_size return x_reduce.item() else: return x def print_json(output_dir, json_str, mode="a"): print(json_str) if output_dir and is_main_process(): with open(os.path.join(output_dir, "log.txt"), mode=mode, encoding="utf-8") as f: f.write(json_str + "\n")
CiT-main
util/misc.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Meta Platforms, Inc. All Rights Reserved import math def adjust_step_learning_rate(optimizer, step, lr, min_lr, warmup_steps, max_update): """huxu: add supports for steps instead of epoch. Decay the learning rate with half-cycle cosine after warmup""" if step < warmup_steps: lr = lr * step / warmup_steps else: lr = min_lr + (lr - min_lr) * 0.5 * \ (1. + math.cos(math.pi * (step - warmup_steps) / (max_update - warmup_steps))) for param_group in optimizer.param_groups: if "lr_scale" in param_group: param_group["lr"] = lr * param_group["lr_scale"] else: param_group["lr"] = lr return lr def adjust_learning_rate(optimizer, epoch, args): """Decay the learning rate with half-cycle cosine after warmup""" if epoch < args.warmup_epochs: lr = args.lr * epoch / args.warmup_epochs else: lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \ (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs))) for param_group in optimizer.param_groups: if "lr_scale" in param_group: param_group["lr"] = lr * param_group["lr_scale"] else: param_group["lr"] = lr return lr
CiT-main
util/lr_sched.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. import torch from transformers import ( PreTrainedModel, PretrainedConfig, AutoConfig, AutoModel, ) from transformers.modeling_outputs import BaseModelOutputWithPooling import timm assert timm.__version__ >= "0.4.12", "make sure timm uses augreg checkpoints." class AugRegConfig(PretrainedConfig): """ HF or older timm doesn't load augreg weights. """ model_type = "augreg" def __init__( self, config_name="vit_base_patch32_224_in21k", hidden_size=768, **kwargs ): super().__init__(**kwargs) self.config_name = config_name self.hidden_size = hidden_size AutoConfig.register("augreg", AugRegConfig) class AugRegModel(PreTrainedModel): config_class = AugRegConfig @classmethod def from_orig_pretrained(cls, config_name): augreg = timm.create_model(config_name, pretrained=True) config = AugRegConfig(config_name=config_name, hidden_size=augreg.embed_dim) model = AugRegModel(config) model.augreg = augreg return model @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): import os ckpt_path = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin") state_dict = torch.load(os.path.join(ckpt_path)) config = AutoConfig.from_pretrained(pretrained_model_name_or_path) model = AugRegModel(config) model.load_state_dict(state_dict, strict=True) return model def __init__(self, config): super().__init__(config) self.config = config self.augreg = timm.create_model(config.config_name, pretrained=False) self.post_init() def _init_weights(self, module): self.augreg._init_weights(module) def forward( self, pixel_values=None, # attention_mask=None, # head_mask=None, output_attentions=None, output_hidden_states=None, # interpolate_pos_encoding=None, return_dict=None ): # https://github.com/rwightman/pytorch-image-models/blob/e0c4eec4b66dc14ae96097c7b4a7ef2af45ba309/timm/models/vision_transformer.py#L358 # pre_logits is nn.Identity and token means from CLS [:, 0] sequence_output = self.augreg.forward_features(pixel_values) pooled_output = sequence_output if not return_dict: return (sequence_output, pooled_output) return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=None, # encoder_outputs.hidden_states, attentions=None, # encoder_outputs.attentions, ) AutoModel.register(AugRegConfig, AugRegModel) if __name__ == '__main__': # dump this model for AutoModel: `python -m hfmodels.augreg` models = ["vit_base_patch32_224_in21k", "vit_base_patch16_224_in21k", "vit_large_patch16_224_in21k"] for model in models: vision_model = AugRegModel.from_orig_pretrained(model) vision_model.save_pretrained(f"pretrained_models/{model}_augreg_hf")
CiT-main
hfmodels/augreg.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. import torch from transformers import ( PreTrainedModel, PretrainedConfig, AutoConfig, AutoModel, ) from transformers.modeling_outputs import BaseModelOutputWithPooling class SwagConfig(PretrainedConfig): model_type = "swag" def __init__( self, config_name="vit_b16", hidden_size=768, **kwargs ): super().__init__(**kwargs) self.config_name = config_name self.hidden_size = hidden_size AutoConfig.register("swag", SwagConfig) class SwagModel(PreTrainedModel): config_class = SwagConfig @classmethod def from_orig_pretrained(cls, config_name): swag = torch.hub.load("facebookresearch/swag", model=config_name) config = SwagConfig(config_name=config_name, hidden_size=swag.hidden_dim) model = SwagModel(config) model.swag = swag return model @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): import os ckpt_path = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin") state_dict = torch.load(os.path.join(ckpt_path)) config = AutoConfig.from_pretrained(pretrained_model_name_or_path) model = SwagModel(config) model.load_state_dict(state_dict, strict=True) return model def __init__(self, config): super().__init__(config) self.config = config self.swag = torch.hub.load("facebookresearch/swag", model=config.config_name) self.post_init() def _init_weights(self, module): self.swag.init_weights() # check existence. def forward( self, pixel_values=None, # attention_mask=None, # head_mask=None, output_attentions=None, output_hidden_states=None, # interpolate_pos_encoding=None, return_dict=None ): # https://github.com/rwightman/pytorch-image-models/blob/e0c4eec4b66dc14ae96097c7b4a7ef2af45ba309/timm/models/vision_transformer.py#L358 # pre_logits is nn.Identity and token means from CLS [:, 0] sequence_output = self.swag(pixel_values) pooled_output = sequence_output if not return_dict: return (sequence_output, pooled_output) return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=None, # encoder_outputs.hidden_states, attentions=None, # encoder_outputs.attentions, ) AutoModel.register(SwagConfig, SwagModel) if __name__ == '__main__': # dump this model for AutoModel: `python -m hfmodels.swag` models = ["vit_b16", "vit_l16", "vit_h14"] for model in models: vision_model = SwagModel.from_orig_pretrained(model) vision_model.save_pretrained(f"pretrained_models/{model}_swag_hf")
CiT-main
hfmodels/swag.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. from .moco import MoCoModel, MoCoConfig from .augreg import AugRegModel, AugRegConfig from .swag import SwagModel, SwagConfig
CiT-main
hfmodels/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. import torch import sys sys.path.append("moco-v3") # repo path to moco-v3 from transformers import ( PreTrainedModel, PretrainedConfig, AutoConfig, AutoModel, ) from torch import nn from transformers.modeling_outputs import BaseModelOutputWithPooling from vits import vit_base from functools import partial from moco.builder import MoCo_ViT from collections import OrderedDict class MoCoConfig(PretrainedConfig): """ refer `https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/vit/configuration_vit.py#L29` `model_type` only has three choices. https://github.com/huggingface/transformers/blob/05fa1a7ac17bb7aa07b9e0c1e138ecb31a28bbfe/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py#L94 how to make sure `hidden_size` match checkpoint ? """ model_type = "moco" def __init__( self, config_name="vit_base_patch16", hidden_size=256, **kwargs ): super().__init__(**kwargs) self.config_name = config_name self.hidden_size = hidden_size AutoConfig.register("moco", MoCoConfig) class MoCoModel(PreTrainedModel): config_class = MoCoConfig @classmethod def from_orig_pretrained(cls, ckpt_dir): """load from original checkpoint; used to save a HF checkpoint, see main.""" config = MoCoConfig(hidden_size=256) model = MoCoModel(config) print("loading weights from", ckpt_dir) ckpt = torch.load(ckpt_dir, map_location='cpu') state_dict = OrderedDict() for k, v in ckpt['state_dict'].items(): k = k.replace('module.', '') for prefix in ["momentum_encoder", "predictor"]: if k.startswith(prefix): break else: state_dict[k.replace("base_encoder.", "")] = v model.moco.load_state_dict(state_dict, strict=True) model.eval() return model @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): import os ckpt_path = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin") state_dict = torch.load(os.path.join(ckpt_path)) config = AutoConfig.from_pretrained(pretrained_model_name_or_path) model = MoCoModel(config) model.load_state_dict(state_dict, strict=True) return model def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.moco = MoCo_ViT( partial(vit_base, stop_grad_conv1=True), 256, 4096, 0.2 ).base_encoder self.post_init() def _init_weights(self, m): # borrowed from mae if isinstance(m, nn.Linear): # we use xavier_uniform following official JAX ViT: torch.nn.init.xavier_uniform_(m.weight) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward( self, pixel_values=None, # attention_mask=None, # head_mask=None, output_attentions=None, output_hidden_states=None, # interpolate_pos_encoding=None, return_dict=None ): encoder_outputs = self.moco(pixel_values) sequence_output = encoder_outputs.unsqueeze(1) pooled_output = encoder_outputs if not return_dict: return (sequence_output, pooled_output) # + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=encoder_outputs, hidden_states=None, # encoder_outputs.hidden_states, attentions=None, # encoder_outputs.attentions, ) AutoModel.register(MoCoConfig, MoCoModel) if __name__ == '__main__': # dump this model for AutoModel: `python -m hfmodels.moco` vision_model = MoCoModel.from_orig_pretrained("pretrained_models/moco/vit-b-300ep.pth.tar") vision_model.save_pretrained("pretrained_models/moco_hf")
CiT-main
hfmodels/moco.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Meta Platforms, Inc. All Rights Reserved import numpy as np import pickle import re import time import sqlite3 import webdataset as wds from urllib.parse import unquote from tqdm import tqdm # Borrowed from SLIP but add tag field to be consistent with LiT: https://lbsn.vgiscience.org/yfcc-introduction/ def to_pkl(): cleanhtml = re.compile('<a.*?>|</a>|<b>|</b>|<i>|</i>') cleanurl = re.compile('http\S+|www\S+') print('=> loading YFCC image ids') image_ids = np.load('data/yfcc15m/flickr_unique_ids.npy') image_ids = set(image_ids) print('=> loading CLIP image ids') print('=> collecting and cleaning subset captions') captioned = [] valid_image_ids = [] with open('/datasets01/yfcc100m/090517/yfcc100m_dataset.txt') as f: for l in tqdm(f): row = l.strip().split('\t') if int(row[0]) in image_ids: title = unquote(row[8]).replace('+', ' ') title = re.sub(cleanhtml, '', title) title = re.sub(cleanurl, '', title).strip() desc = unquote(row[9]).replace('+', ' ') desc = re.sub(cleanhtml, '', desc) desc = re.sub(cleanurl, '', desc).strip() tag = ",".join([row[10].strip(), row[11].strip()]) tag = unquote(tag).replace('+', ' ') tag = re.sub(cleanhtml, '', tag) tag = re.sub(cleanurl, '', tag).strip() if any([len(title) > 0, len(desc) > 0, len(tag) > 0]): captioned.append((int(row[0]), title, desc, tag)) valid_image_ids.append(int(row[0])) with open('data/yfcc100m/yfcc100m_captioned_w_tag.pkl', 'wb') as f: pickle.dump(captioned, f) with open('data/yfcc100m/yfcc100m_image_ids.pkl', 'wb') as f: pickle.dump(valid_image_ids, f) print('Total captioned images:', len(captioned)) # 94514285 def write_json(): with open('data/yfcc100m/yfcc100m_captioned_w_tag.pkl', 'rb') as f: captioned = pickle.load(f) from collections import defaultdict repos = defaultdict(dict) for idx, (image_id, title, desc, tag) in enumerate(captioned): index = format(image_id, "0>8d") repo = index[:2] z = index[2: 5] repos[f"{str(repo).zfill(2)}_{str(z).zfill(3)}"][str(image_id).zfill(8)] = {"title": title, "desc": desc, "tag": tag} import json from pathlib import Path for repo in repos: _repo, z = repo.split("_") Path(f"data/yfcc100m/yfcc100m_captioned_w_tag/{_repo}").mkdir(parents=True, exist_ok=True) with open(f"data/yfcc100m/yfcc100m_captioned_w_tag/{_repo}/{z}.json", "w") as fw: json.dump(repos[repo], fw) to_pkl() write_json()
CiT-main
scripts/make_yfcc100m_dataset.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Meta Platforms, Inc. All Rights Reserved import numpy as np import pickle import re from urllib.parse import unquote from tqdm import tqdm # Borrowed from SLIP but add tag field to be consistent with LiT: https://lbsn.vgiscience.org/yfcc-introduction/ cleanhtml = re.compile('<a.*?>|</a>|<b>|</b>|<i>|</i>') cleanurl = re.compile('http\S+|www\S+') print('=> loading YFCC image ids') image_ids = np.load('data/yfcc15m/flickr_unique_ids.npy') image_ids = set(image_ids) print('=> loading CLIP image ids') clip_ids = set() with open('data/yfcc15m/yfcc100m_subset_data.tsv') as f: for l in tqdm(f.readlines()): row = l.strip().split('\t') clip_ids.add(int(row[0])) print('=> collecting and cleaning subset captions') captioned = [] with open('/datasets01/yfcc100m/090517/yfcc100m_dataset.txt') as f: for l in tqdm(f): row = l.strip().split('\t') if int(row[0]) in image_ids: if int(row[0]) in clip_ids: title = unquote(row[8]).replace('+', ' ') title = re.sub(cleanhtml, '', title) title = re.sub(cleanurl, '', title) desc = unquote(row[9]).replace('+', ' ') desc = re.sub(cleanhtml, '', desc) desc = re.sub(cleanurl, '', desc) tag = ",".join([row[10].strip(), row[11].strip()]) tag = unquote(tag).replace('+', ' ') tag = re.sub(cleanhtml, '', tag) tag = re.sub(cleanurl, '', tag) captioned.append((int(row[0]), title, desc, tag)) with open('data/yfcc15m/yfcc15m_w_tag.pkl', 'wb') as f: pickle.dump(captioned, f) print('Total captioned images:', len(captioned)) # 14689580
CiT-main
scripts/make_yfcc15m_dataset.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Meta Platforms, Inc. All Rights Reserved import json import os import pickle import zipfile import numpy as np import torch import random from PIL import Image, ImageFile from torchvision import datasets as t_datasets ImageFile.LOAD_TRUNCATED_IMAGES = True def pil_loader(path): # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) with open(path, 'rb') as f: img = Image.open(f) return img.convert('RGB') def yfcc_loader(root, index): index = format(index, "0>8d") repo = index[:2] z = index[2: 5] file_img = index[5:] + '.jpg' path_zip = os.path.join(root, 'images', repo, z) + '.zip' with zipfile.ZipFile(path_zip, 'r') as myzip: img = Image.open(myzip.open(file_img)) return img.convert('RGB') def aug_tag(tag): delims = [" ", ",", ";", "/", "\n"] delim = random.choice(delims)[0] segs = [seg.strip() for seg in tag.split(",") if len(seg.strip()) > 0] random.shuffle(segs) tag = delim.join(segs) return tag class ImageCaptionDatasetBase(torch.utils.data.Dataset): def __init__(self, args, dataset, root, metadata, task_example_ids=None, with_vision=True, with_text=True, max_sample=None): self.with_vision = with_vision self.with_text = with_text self.dataset = dataset self.root = root if hasattr(args, "aug_tag"): self.aug_tag = args.aug_tag if self.dataset in ["yfcc100m_tag"]: self.json_root = os.path.join(os.path.dirname(metadata), "yfcc100m_captioned_w_tag") self.samples = [] if task_example_ids is not None: if isinstance(task_example_ids, list): self.samples.extend(task_example_ids) else: self.samples.extend(list(task_example_ids)) print(f"apply task filter with {len(self.samples)} examples.") else: with open(metadata, 'rb') as f: samples = pickle.load(f) self.samples.extend(samples) if max_sample is not None and len(self.samples) >= max_sample: self.samples = self.samples[:max_sample] elif self.dataset in ['yfcc15m_tag', 'yfcc15m']: with open(metadata, 'rb') as f: samples = pickle.load(f) self.samples = [] if task_example_ids is not None: if isinstance(task_example_ids, list): # build the index of sample and follow the list order. image_id_to_sample = {} for image_id, title, desc, tag in samples: title, desc, tag = title.strip(), desc.strip(), tag.strip() if len(title) > 0: image_id_to_sample["_".join([str(image_id).zfill(8), "title"])] = {"image_id": image_id, "title": title} if len(desc) > 0: image_id_to_sample["_".join([str(image_id).zfill(8), "desc"])] = {"image_id": image_id, "desc": desc} if "tag" in self.dataset and len(tag) > 0: image_id_to_sample["_".join([str(image_id).zfill(8), "tag"])] = {"image_id": image_id, "tag": tag} for image_key in task_example_ids: if max_sample is not None and len(self.samples) >= max_sample: break image_id, field = image_key.split("_") image_id = image_id.zfill(8) image_key = "_".join([image_id, field]) self.samples.append(image_id_to_sample[image_key]) else: for image_id, title, desc, tag in samples: title, desc, tag = title.strip(), desc.strip(), tag.strip() if str(image_id).zfill(8) + "_title" in task_example_ids and len(title) > 0: self.samples.append({"image_id": image_id, "title": title}) if str(image_id).zfill(8) + "_desc" in task_example_ids and len(desc) > 0: self.samples.append({"image_id": image_id, "desc": desc}) if "tag" in self.dataset and str(image_id).zfill(8) + "_tag" in task_example_ids and len(tag) > 0: self.samples.append({"image_id": image_id, "tag": tag}) if max_sample is not None and len(self.samples) >= max_sample: break print(f"apply task filter with {len(self.samples)} examples.") else: for image_id, title, desc, tag in samples: title, desc, tag = title.strip(), desc.strip(), tag.strip() rec = {} if len(title) > 0: rec["title"] = title if len(desc) > 0: rec["desc"] = desc if "tag" in self.dataset and len(tag) > 0: rec["tag"] = tag if len(rec) > 0: rec["image_id"] = image_id self.samples.append(rec) if max_sample is not None and len(self.samples) >= max_sample: break else: raise ValueError(f"unknown dataset {self.dataset}") def get_raw_item(self, i): if self.dataset in ["yfcc100m_tag"]: sample = self.samples[i] if isinstance(sample, str): index, key = sample.split("_") else: index = sample index = format(index, "0>8d") img = yfcc_loader(self.root, int(index)) if self.with_vision else None if self.with_text: repo = index[:2] z = index[2: 5] with open(f"{self.json_root}/{repo}/{z}.json") as fr: repo_z = json.load(fr) rec = repo_z[str(index).zfill(8)] if not isinstance(sample, str): key = random.choice([key for key in rec if len(rec[key]) > 0]) index = "_".join([str(index).zfill(8), key]) if key == "tag" and (hasattr(self, "aug_tag") and self.aug_tag): caption = aug_tag(rec[key]) else: caption = rec[key] elif self.dataset in ['yfcc15m_tag', 'yfcc15m']: rec = self.samples[i] index = rec["image_id"] img = yfcc_loader(self.root, index) if self.with_vision else None if self.with_text: key = random.choice([_key for _key in rec if _key != "image_id"]) index = "_".join([str(index).zfill(8), key]) if key == "tag" and hasattr(self, "aug_tag"): caption = aug_tag(rec[key]) else: caption = rec[key] else: raise ValueError(f"unknown dataset {self.dataset}") return index, img, caption def __getitem__(self, i): raise NotImplementedError def __len__(self): return len(self.samples) class ImageCaptionDatasetCLIP(ImageCaptionDatasetBase): def __init__(self, args, dataset, root, metadata, task_example_ids, transform=None, tokenizer=None, max_bert_length=77, with_vision=True, with_text=True, max_sample=None): super().__init__(args, dataset, root, metadata, task_example_ids, with_vision, with_text, max_sample) self.max_bert_length = max_bert_length self.transform = transform self.tokenizer = tokenizer def __getitem__(self, i): index, img, caption = self.get_raw_item(i) result = {"image_ids": index} # apply transformation if img is not None and self.transform is not None: img = self.transform(img) result["pixel_values"] = img # tokenize caption if caption is not None and self.tokenizer is not None: inputs = self.tokenizer(caption, padding="max_length", truncation=True, max_length=self.max_bert_length, return_tensors="pt") for key in inputs: inputs[key] = inputs[key][0] result.update(**inputs) result["captions"] = caption return result class FileListDataset(torch.utils.data.Dataset): def __init__(self, images, labels, transform=None, target_transform=None): self.transform = transform self.target_transform = target_transform self.images = np.load(images) self.labels = np.load(labels) def __getitem__(self, index): img = pil_loader(self.images[index]) target = self.labels[index] if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target def __len__(self): return len(self.images) def get_downstream_dataset(catalog, name, is_train, transform): entry = catalog[name] root = entry['path'] if entry['type'] == 'imagefolder': dataset = t_datasets.ImageFolder(os.path.join(root, entry['train'] if is_train else entry['test']), transform=transform) elif entry['type'] == 'special': if name == 'cifar10': dataset = t_datasets.CIFAR10(root, train=is_train, transform=transform, download=True) elif name == 'cifar100': dataset = t_datasets.CIFAR100(root, train=is_train, transform=transform, download=True) elif name == 'stl10': dataset = t_datasets.STL10(root, split='train' if is_train else 'test', transform=transform, download=True) elif name == 'mnist': dataset = t_datasets.MNIST(root, train=is_train, transform=transform, download=True) elif entry['type'] == 'filelist': path = entry['train'] if is_train else entry['test'] val_images = os.path.join(root, path + '_images.npy') val_labels = os.path.join(root, path + '_labels.npy') if name == 'clevr_counts': target_transform = lambda x: ['count_10', 'count_3', 'count_4', 'count_5', 'count_6', 'count_7', 'count_8', 'count_9'].index(x) else: target_transform = None dataset = FileListDataset(val_images, val_labels, transform, target_transform) else: raise Exception('Unknown dataset') return dataset
CiT-main
clipeval/datasets.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Meta Platforms, Inc. All Rights Reserved import torch import json import os from sklearn import metrics def load_metadata(metadir="clipeval"): with open(os.path.join(metadir, 'dataset_catalog.json')) as f: catalog = json.load(f) with open(os.path.join(metadir, 'templates.json')) as f: all_templates = json.load(f) with open(os.path.join(metadir, 'labels.json')) as f: all_labels = json.load(f) return catalog, all_templates, all_labels def evaluate(d, val_loader, templates, labels, model, tokenizer, max_bert_length, classnorm=False): print('Evaluating {}'.format(d)) is_acc = d not in ['aircraft', 'pets', 'caltech101', 'flowers', 'kinetics700_frames', 'hateful_memes'] acc_or_outputs = validate_zeroshot(val_loader, templates, labels, model, tokenizer, is_acc, max_bert_length, classnorm) if d in ['aircraft', 'pets', 'caltech101', 'flowers']: metric = mean_per_class(*acc_or_outputs) elif d == 'kinetics700_frames': top1, top5 = accuracy(*acc_or_outputs, topk=(1, 5)) metric = (top1 + top5) / 2 metric = metric.item() elif d == 'hateful_memes': metric = roc_auc(*acc_or_outputs) else: metric = acc_or_outputs return metric @torch.no_grad() def build_text_features(templates, labels, model, tokenizer, max_bert_length=77, skip_text_projection=False, classnorm=False): # (huxu) TODO: add device text_features = [] for label in labels: if isinstance(label, list): texts = [t.format(l) for t in templates for l in label] else: texts = [t.format(label) for t in templates] texts = tokenizer(texts, padding=True, truncation=True, max_length=max_bert_length, return_tensors="pt") for key in texts: texts[key] = texts[key].to(next(model.parameters()).device, non_blocking=True) # texts = texts.view(-1, max_bert_length).contiguous() class_embeddings = model(**texts, skip_text_projection=skip_text_projection)["text_embeds"] class_embeddings = class_embeddings / class_embeddings.norm(dim=-1, keepdim=True) class_embeddings = class_embeddings.mean(dim=0) text_features.append(class_embeddings) text_features = torch.stack(text_features, dim=0) mean, std = None, None if classnorm: mean, std = text_features.mean(dim=0)[None, :], text_features.std(dim=0)[None, :] text_features = (text_features - mean) / std text_features = text_features / text_features.norm(dim=-1, keepdim=True) return text_features, mean, std @torch.no_grad() def validate_zeroshot(val_loader, templates, labels, model, tokenizer, is_acc, max_bert_length, classnorm=False): # switch to evaluate mode model.cuda() model.eval() total_top1 = 0 total_images = 0 all_outputs = [] all_targets = [] text_features = None for samples in val_loader: if text_features is None: print('=> encoding captions') text_features, mean, std = build_text_features(templates, labels, model, tokenizer, max_bert_length, classnorm=classnorm) if isinstance(samples, tuple) or isinstance(samples, list): images, target = samples[0], samples[1] elif isinstance(samples, dict): images, target = samples["pixel_values"], samples["targets"] else: raise ValueError("unknown sample type", type(samples)) images = images.cuda(non_blocking=True) target = target.cuda(non_blocking=True) # encode images image_features = model(pixel_values=images)["image_embeds"] if classnorm: image_features = (image_features - mean) / std image_features = image_features / image_features.norm(dim=-1, keepdim=True) # cosine similarity as logits logits_per_image = image_features @ text_features.t() logits_per_image = logits_per_image.cpu() target = target.cpu() if is_acc: # measure accuracy and record loss pred = logits_per_image.argmax(dim=1) correct = pred.eq(target).sum() total_top1 += correct.item() total_images += images.size(0) else: all_outputs.append(logits_per_image) all_targets.append(target) if is_acc: return 100 * total_top1 / total_images else: return torch.cat(all_outputs), torch.cat(all_targets) def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.reshape(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def mean_per_class(outputs, targets): pred = outputs.argmax(1) confusion_matrix = metrics.confusion_matrix(targets, pred) per_classes = confusion_matrix.diagonal() / confusion_matrix.sum(axis=1) return 100 * per_classes.mean() def roc_auc(outputs, targets): pos_score = outputs[:, 1] - outputs[:, 0] metric = metrics.roc_auc_score(targets, pos_score) return 100 * metric if __name__ == '__main__': logits = torch.randn(128, 10) targets = torch.randint(size=(128,), low=0, high=10) evaluate("imagenet", logits, targets)
CiT-main
clipeval/eval_zeroshot.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse import time import yaml import torch import utils.logger from utils import main_utils, eval_utils import torch.multiprocessing as mp parser = argparse.ArgumentParser(description='Evaluation on ESC Sound Classification') parser.add_argument('cfg', metavar='CFG', help='config file') parser.add_argument('model_cfg', metavar='CFG', help='config file') parser.add_argument('--quiet', action='store_true') parser.add_argument('--debug', action='store_true') parser.add_argument('--test-only', action='store_true') parser.add_argument('--resume', action='store_true') parser.add_argument('--distributed', action='store_true') parser.add_argument('--port', default='1234') def main(): ngpus = torch.cuda.device_count() args = parser.parse_args() cfg = yaml.safe_load(open(args.cfg)) if args.test_only: cfg['test_only'] = True if args.resume: cfg['resume'] = True if args.debug: cfg['num_workers'] = 1 cfg['dataset']['batch_size'] = 4 if args.distributed: mp.spawn(main_worker, nprocs=ngpus, args=(ngpus, cfg['dataset']['fold'], args, cfg)) else: main_worker(None, ngpus, cfg['dataset']['fold'], args, cfg) def main_worker(gpu, ngpus, fold, args, cfg): args.gpu = gpu args.world_size = ngpus # Prepare folder and logger eval_dir, model_cfg, logger = eval_utils.prepare_environment(args, cfg, fold) # Model model, ckp_manager = eval_utils.build_model(model_cfg, cfg, eval_dir, args, logger) # Optimizer optimizer, scheduler = main_utils.build_optimizer(model.parameters(), cfg['optimizer'], logger) # Datasets train_loader, test_loader, dense_loader = eval_utils.build_dataloaders( cfg['dataset'], fold, cfg['num_workers'], args.distributed, logger) ################################ Train ################################ start_epoch, end_epoch = 0, cfg['optimizer']['num_epochs'] if cfg['resume'] and ckp_manager.checkpoint_exists(last=True): start_epoch = ckp_manager.restore(model, optimizer, scheduler, restore_last=True) logger.add_line("Loaded checkpoint '{}' (epoch {})".format(ckp_manager.last_checkpoint_fn(), start_epoch)) if not cfg['test_only']: logger.add_line("=" * 30 + " Training " + "=" * 30) # Warmup. Train classifier for a few epochs. if start_epoch == 0 and 'warmup_classifier' in cfg['optimizer'] and cfg['optimizer']['warmup_classifier']: n_wu_epochs = cfg['optimizer']['warmup_epochs'] if 'warmup_epochs' in cfg['optimizer'] else 5 cls_opt, _ = main_utils.build_optimizer( params=[p for n, p in model.named_parameters() if 'feature_extractor' not in n], cfg={'lr': {'base_lr': cfg['optimizer']['lr']['base_lr'], 'milestones': [n_wu_epochs,], 'gamma': 1.}, 'weight_decay': cfg['optimizer']['weight_decay'], 'name': cfg['optimizer']['name']} ) for epoch in range(n_wu_epochs): run_phase('train', train_loader, model, cls_opt, epoch, args, cfg, logger) top1, _ = run_phase('test', test_loader, model, None, epoch, args, cfg, logger) # Main training loop for epoch in range(start_epoch, end_epoch): scheduler.step(epoch=epoch) if args.distributed: train_loader.sampler.set_epoch(epoch) test_loader.sampler.set_epoch(epoch) logger.add_line('='*30 + ' Epoch {} '.format(epoch) + '='*30) logger.add_line('LR: {}'.format(scheduler.get_lr())) run_phase('train', train_loader, model, optimizer, epoch, args, cfg, logger) top1, _ = run_phase('test', test_loader, model, None, epoch, args, cfg, logger) ckp_manager.save(model, optimizer, scheduler, epoch, eval_metric=top1) ################################ Eval ################################ logger.add_line('\n' + '=' * 30 + ' Final evaluation ' + '=' * 30) cfg['dataset']['test']['clips_per_video'] = 25 # Evaluate clip-level predictions with 25 clips per video for metric stability train_loader, test_loader, dense_loader = eval_utils.build_dataloaders(cfg['dataset'], fold, cfg['num_workers'], args.distributed, logger) top1, top5 = run_phase('test', test_loader, model, None, end_epoch, args, cfg, logger) top1_dense, top5_dense = run_phase('test_dense', dense_loader, model, None, end_epoch, args, cfg, logger) logger.add_line('\n' + '=' * 30 + ' Evaluation done ' + '=' * 30) logger.add_line('Clip@1: {:6.2f}'.format(top1)) logger.add_line('Clip@5: {:6.2f}'.format(top5)) logger.add_line('Video@1: {:6.2f}'.format(top1_dense)) logger.add_line('Video@5: {:6.2f}'.format(top5_dense)) def run_phase(phase, loader, model, optimizer, epoch, args, cfg, logger): from utils import metrics_utils batch_time = metrics_utils.AverageMeter('Time', ':6.3f', window_size=100) data_time = metrics_utils.AverageMeter('Data', ':6.3f', window_size=100) loss_meter = metrics_utils.AverageMeter('Loss', ':.4e') top1_meter = metrics_utils.AverageMeter('Acc@1', ':6.2f') top5_meter = metrics_utils.AverageMeter('Acc@5', ':6.2f') progress = utils.logger.ProgressMeter(len(loader), meters=[batch_time, data_time, loss_meter, top1_meter, top5_meter], phase=phase, epoch=epoch, logger=logger) # switch to train/test mode model.train(phase == 'train') if phase in {'test_dense', 'test'}: model = eval_utils.BatchWrapper(model, cfg['dataset']['batch_size']) criterion = torch.nn.CrossEntropyLoss() softmax = torch.nn.Softmax(dim=1) end = time.time() logger.add_line('\n{}: Epoch {}'.format(phase, epoch)) for it, sample in enumerate(loader): data_time.update(time.time() - end) video = sample['frames'] target = sample['label'].cuda() if args.gpu is not None: video = video.cuda(args.gpu, non_blocking=True) if torch.cuda.device_count() == 1 and args.gpu is None: video = video.cuda() # compute outputs if phase == 'test_dense': batch_size, clips_per_sample = video.shape[0], video.shape[1] video = video.flatten(0, 1).contiguous() if phase == 'train': logits = model(video) else: with torch.no_grad(): logits = model(video) # compute loss and accuracy if phase == 'test_dense': confidence = softmax(logits).view(batch_size, clips_per_sample, -1).mean(1) labels_tiled = target.unsqueeze(1).repeat(1, clips_per_sample).view(-1) loss = criterion(logits, labels_tiled) else: confidence = softmax(logits) loss = criterion(logits, target) with torch.no_grad(): acc1, acc5 = metrics_utils.accuracy(confidence, target, topk=(1, 5)) loss_meter.update(loss.item(), target.size(0)) top1_meter.update(acc1[0], target.size(0)) top5_meter.update(acc5[0], target.size(0)) # compute gradient and do SGD step if phase == 'train': optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if (it + 1) % 100 == 0 or it == 0 or it + 1 == len(loader): progress.display(it+1) if args.distributed: progress.synchronize_meters(args.gpu) progress.display(len(loader) * args.world_size) return top1_meter.avg, top5_meter.avg if __name__ == '__main__': main()
AVID-CMA-main
eval-action-recg.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse import os import random import time import warnings import yaml import torch import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim import torch.multiprocessing as mp import utils.logger from utils import main_utils parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') parser.add_argument('cfg', help='model directory') parser.add_argument('--quiet', action='store_true') parser.add_argument('--world-size', default=-1, type=int, help='number of nodes for distributed training') parser.add_argument('--rank', default=-1, type=int, help='node rank for distributed training') parser.add_argument('--dist-url', default='tcp://localhost:15475', type=str, help='url used to set up distributed training') parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.') parser.add_argument('--multiprocessing-distributed', action='store_true', help='Use multi-processing distributed training to launch ' 'N processes per node, which has N GPUs. This is the ' 'fastest way to use PyTorch for either single node or ' 'multi node data parallel training') def main(): args = parser.parse_args() cfg = yaml.safe_load(open(args.cfg)) if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') if args.gpu is not None: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') if args.dist_url == "env://" and args.world_size == -1: args.world_size = int(os.environ["WORLD_SIZE"]) args.distributed = args.world_size > 1 or args.multiprocessing_distributed ngpus_per_node = torch.cuda.device_count() if args.multiprocessing_distributed: # Since we have ngpus_per_node processes per node, the total world_size # needs to be adjusted accordingly args.world_size = ngpus_per_node * args.world_size # Use torch.multiprocessing.spawn to launch distributed processes: the # main_worker process function mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args, cfg)) else: # Simply call main_worker function main_worker(args.gpu, ngpus_per_node, args, cfg) def main_worker(gpu, ngpus_per_node, args, cfg): args.gpu = gpu # Setup environment args = main_utils.initialize_distributed_backend(args, ngpus_per_node) logger, tb_writter, model_dir = main_utils.prep_environment(args, cfg) # Define model model = main_utils.build_model(cfg['model'], logger) model, args, cfg['dataset']['batch_size'], cfg['num_workers'] = main_utils.distribute_model_to_cuda(model, args, cfg['dataset']['batch_size'], cfg['num_workers'], ngpus_per_node) # Define dataloaders train_loader = main_utils.build_dataloaders(cfg['dataset'], cfg['num_workers'], args.distributed, logger) # Define criterion device = args.gpu if args.gpu is not None else 0 cfg['loss']['args']['embedding_dim'] = model.module.out_dim cfg['loss']['args']['device'] = device train_criterion = main_utils.build_criterion(cfg['loss'], logger=logger) # Define optimizer optimizer, scheduler = main_utils.build_optimizer( params=list(model.parameters())+list(train_criterion.parameters()), cfg=cfg['optimizer'], logger=logger) ckp_manager = main_utils.CheckpointManager(model_dir, rank=args.rank) # Optionally resume from a checkpoint start_epoch, end_epoch = 0, cfg['optimizer']['num_epochs'] if cfg['resume']: if ckp_manager.checkpoint_exists(last=True): start_epoch = ckp_manager.restore(restore_last=True, model=model, optimizer=optimizer, train_criterion=train_criterion) scheduler.step(start_epoch) logger.add_line("Checkpoint loaded: '{}' (epoch {})".format(ckp_manager.last_checkpoint_fn(), start_epoch)) else: logger.add_line("No checkpoint found at '{}'".format(ckp_manager.last_checkpoint_fn())) cudnn.benchmark = True ############################ TRAIN ######################################### test_freq = cfg['test_freq'] if 'test_freq' in cfg else 1 for epoch in range(start_epoch, end_epoch): if epoch in cfg['optimizer']['lr']['milestones']: ckp_manager.save(epoch, model=model, train_criterion=train_criterion, optimizer=optimizer, filename='checkpoint-ep{}.pth.tar'.format(epoch)) if args.distributed: train_loader.sampler.set_epoch(epoch) scheduler.step(epoch) train_criterion.set_epoch(epoch) # Train for one epoch logger.add_line('='*30 + ' Epoch {} '.format(epoch) + '='*30) logger.add_line('LR: {}'.format(scheduler.get_lr())) run_phase('train', train_loader, model, optimizer, train_criterion, epoch, args, cfg, logger, tb_writter) if epoch % test_freq == 0 or epoch == end_epoch - 1: ckp_manager.save(epoch+1, model=model, optimizer=optimizer, train_criterion=train_criterion) def run_phase(phase, loader, model, optimizer, criterion, epoch, args, cfg, logger, tb_writter): from utils import metrics_utils logger.add_line('\n{}: Epoch {}'.format(phase, epoch)) batch_time = metrics_utils.AverageMeter('Time', ':6.3f', window_size=100) data_time = metrics_utils.AverageMeter('Data', ':6.3f', window_size=100) loss_meter = metrics_utils.AverageMeter('Loss', ':.3e') progress = utils.logger.ProgressMeter(len(loader), [batch_time, data_time, loss_meter], phase=phase, epoch=epoch, logger=logger, tb_writter=tb_writter) # switch to train mode model.train(phase == 'train') end = time.time() device = args.gpu if args.gpu is not None else 0 for i, sample in enumerate(loader): # measure data loading time data_time.update(time.time() - end) # Prepare batch video, audio, index = sample['frames'], sample['audio'], sample['index'] video = video.cuda(device, non_blocking=True) audio = audio.cuda(device, non_blocking=True) index = index.cuda(device, non_blocking=True) # compute audio and video embeddings if phase == 'train': video_emb, audio_emb = model(video, audio) else: with torch.no_grad(): video_emb, audio_emb = model(video, audio) # compute loss loss, loss_debug = criterion(video_emb, audio_emb, index) loss_meter.update(loss.item(), video.size(0)) # compute gradient and do SGD step during training if phase == 'train': optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # print to terminal and tensorboard step = epoch * len(loader) + i if (i+1) % cfg['print_freq'] == 0 or i == 0 or i+1 == len(loader): progress.display(i+1) if tb_writter is not None: for key in loss_debug: tb_writter.add_scalar('{}-batch/{}'.format(phase, key), loss_debug[key].item(), step) # Sync metrics across all GPUs and print final averages if args.distributed: progress.synchronize_meters(args.gpu) progress.display(len(loader)*args.world_size) if tb_writter is not None: for meter in progress.meters: tb_writter.add_scalar('{}-epoch/{}'.format(phase, meter.name), meter.avg, epoch) if __name__ == '__main__': main()
AVID-CMA-main
main-avid.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse import time import yaml import torch from utils import main_utils, eval_utils import utils.logger import torch.multiprocessing as mp parser = argparse.ArgumentParser(description='Evaluation on ESC Sound Classification') parser.add_argument('cfg', metavar='CFG', help='config file') parser.add_argument('model_cfg', metavar='CFG', help='config file') parser.add_argument('--quiet', action='store_true') parser.add_argument('--test-only', action='store_true') parser.add_argument('--resume', action='store_true') parser.add_argument('--debug', action='store_true') parser.add_argument('--distributed', action='store_true') parser.add_argument('--port', default='1234') def main(): args = parser.parse_args() cfg = yaml.safe_load(open(args.cfg)) if args.test_only: cfg['test_only'] = True if args.resume: cfg['resume'] = True if args.debug: cfg['num_workers'] = 1 cfg['dataset']['batch_size'] = 4 ngpus = torch.cuda.device_count() for fold in range(1, cfg['dataset']['num_folds']+1): if args.distributed: mp.spawn(main_worker, nprocs=ngpus, args=(ngpus, fold, args, cfg)) else: main_worker(None, ngpus, fold, args, cfg) def main_worker(gpu, ngpus, fold, args, cfg): args.gpu = gpu args.world_size = ngpus # Prepare folder and logger eval_dir, model_cfg, logger = eval_utils.prepare_environment(args, cfg, fold) # Model model, ckp_manager = eval_utils.build_model(model_cfg, cfg, eval_dir, args, logger) # Optimizer optimizer, scheduler = main_utils.build_optimizer(model.parameters(), cfg['optimizer'], logger) # Datasets train_loader, test_loader, dense_loader = eval_utils.build_dataloaders( cfg['dataset'], fold, cfg['num_workers'], args.distributed, logger) ################################ Train ################################ start_epoch, end_epoch = 0, cfg['optimizer']['num_epochs'] if (cfg['resume'] or args.test_only) and ckp_manager.checkpoint_exists(last=True): start_epoch = ckp_manager.restore(model, optimizer, scheduler, restore_last=True) logger.add_line("Loaded checkpoint '{}' (epoch {})".format(ckp_manager.last_checkpoint_fn(), start_epoch)) if not cfg['test_only']: logger.add_line("=" * 30 + " Training " + "=" * 30) for epoch in range(start_epoch, end_epoch): scheduler.step(epoch=epoch) if args.distributed: train_loader.sampler.set_epoch(epoch) test_loader.sampler.set_epoch(epoch) logger.add_line('='*30 + ' Epoch {} '.format(epoch) + '='*30) logger.add_line('LR: {}'.format(scheduler.get_lr())) run_phase('train', train_loader, model, optimizer, epoch, args, cfg, logger) run_phase('test', test_loader, model, None, epoch, args, cfg, logger) ckp_manager.save(model, optimizer, scheduler, epoch) ################################ Eval ################################ logger.add_line('\n' + '=' * 30 + ' Final evaluation ' + '=' * 30) cfg['dataset']['test']['clips_per_video'] = 25 train_loader, test_loader, dense_loader = eval_utils.build_dataloaders(cfg['dataset'], fold, cfg['num_workers'], args.distributed, logger) top1_dense, top5_dense = run_phase('test_dense', dense_loader, model, None, end_epoch, args, cfg, logger) top1, top5 = run_phase('test', test_loader, model, None, end_epoch, args, cfg, logger) logger.add_line('\n' + '=' * 30 + ' Evaluation done ' + '=' * 30) for ft in top1: logger.add_line('') logger.add_line('[{}] Clip@1: {:6.2f}'.format(ft, top1[ft])) logger.add_line('[{}] Clip@5: {:6.2f}'.format(ft, top5[ft])) logger.add_line('[{}] Video@1: {:6.2f}'.format(ft, top1_dense[ft])) logger.add_line('[{}] Video@5: {:6.2f}'.format(ft, top5_dense[ft])) def run_phase(phase, loader, model, optimizer, epoch, args, cfg, logger): from utils import metrics_utils logger.add_line('\n{}: Epoch {}'.format(phase, epoch)) feature_names = cfg['model']['args']['feat_names'] batch_time = metrics_utils.AverageMeter('Time', ':6.3f', 100) data_time = metrics_utils.AverageMeter('Data', ':6.3f', 100) loss_meters = {ft: metrics_utils.AverageMeter('Loss', ':.4e', 0) for ft in feature_names} top1_meters = {ft: metrics_utils.AverageMeter('Acc@1', ':6.2f', 0) for ft in feature_names} top5_meters = {ft: metrics_utils.AverageMeter('Acc@5', ':6.2f', 0) for ft in feature_names} progress = {'timers': utils.logger.ProgressMeter(len(loader), meters=[batch_time, data_time], phase=phase, epoch=epoch, logger=logger)} progress.update({ft: utils.logger.ProgressMeter(len(loader), meters=[loss_meters[ft], top1_meters[ft], top5_meters[ft]], phase=phase, epoch=epoch, logger=logger) for ft in feature_names}) # switch to train/test mode model.train(phase == 'train') if phase in {'test_dense', 'test'}: model = BatchWrapper(model, cfg['dataset']['batch_size']) end = time.time() criterion = torch.nn.CrossEntropyLoss() softmax = torch.nn.Softmax(dim=1) for it, sample in enumerate(loader): data_time.update(time.time() - end) video = sample['frames'] target = sample['label'].cuda() if args.gpu is not None: video = video.cuda(args.gpu, non_blocking=True) if phase == 'test_dense': batch_size, clips_per_sample = video.shape[0], video.shape[1] video = video.flatten(0, 1).contiguous() # compute outputs if phase == 'train': logits = model(video) else: with torch.no_grad(): logits = model(video) # compute loss and measure accuracy total_loss = 0. for ft in feature_names: if phase == 'test_dense': confidence = softmax(logits[ft]).view(batch_size, clips_per_sample, -1).mean(1) target_tiled = target.unsqueeze(1).repeat(1, clips_per_sample).view(-1) loss = criterion(logits[ft], target_tiled) else: confidence = softmax(logits[ft]) loss = criterion(logits[ft], target) total_loss += loss with torch.no_grad(): acc1, acc5 = metrics_utils.accuracy(confidence, target, topk=(1, 5)) loss_meters[ft].update(loss.item(), target.size(0)) top1_meters[ft].update(acc1[0].item(), target.size(0)) top5_meters[ft].update(acc5[0].item(), target.size(0)) # compute gradient and do SGD step if phase == 'train': optimizer.zero_grad() total_loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if (it + 1) % 100 == 0 or it == 0 or it + 1 == len(loader): for ft in progress: progress[ft].display(it+1) if args.distributed: for ft in progress: progress[ft].synchronize_meters(args.gpu) progress[ft].display(len(loader) * args.world_size) return {ft: top1_meters[ft].avg for ft in feature_names}, {ft: top5_meters[ft].avg for ft in feature_names} class BatchWrapper: def __init__(self, model, batch_size): self.model = model self.batch_size = batch_size def __call__(self, x): from collections import defaultdict outs = defaultdict(list) for i in range(0, x.shape[0], self.batch_size): odict = self.model(x[i:i + self.batch_size]) for k in odict: outs[k] += [odict[k]] for k in outs: outs[k] = torch.cat(outs[k], 0) return outs if __name__ == '__main__': main()
AVID-CMA-main
eval-action-recg-linear.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import csv import numpy as np import glob from datasets.video_db import VideoDataset DATA_PATH = '/data/datasets/AS240/data/' CACHE_PATH = 'datasets/cache/audioset' class AudiosetClasses: def __init__(self): ann_list = list(csv.DictReader(open(CACHE_PATH + '/class_labels_indices.csv'))) self.classes = [ann['mid'] for ann in ann_list] self.class_label = {ann['mid']: int(ann['index']) for ann in ann_list} self.display_name = {ann['mid']: ann['display_name'] for ann in ann_list} def __getitem__(self, index): return self.display_name[self.classes[index]] def __len__(self): return len(self.classes) def class2index(self, class_string): return self.class_label[class_string] class AudioSet(VideoDataset): def __init__(self, subset, return_video=True, video_clip_duration=1., video_fps=25., video_transform=None, return_audio=False, audio_clip_duration=1., audio_fps=None, audio_fps_out=64, audio_transform=None, return_labels=False, return_index=False, max_offsync_augm=0, mode='clip', clips_per_video=1, ): root = f"{DATA_PATH}/{subset.split('-')[0]}_segments/video" classes = AudiosetClasses() filenames = [f"{ln.strip().split()[0]}" for ln in open(f"{CACHE_PATH}/{subset}.txt")] available = set([fn.split('/')[-1].split('.')[0] for fn in glob.glob(f"{root}/*")]) filenames = [fn for fn in filenames if fn.split('.')[0] in available] assert return_labels is False labels = None super(AudioSet, self).__init__( return_video=return_video, video_clip_duration=video_clip_duration, video_root=root, video_fns=filenames, video_fps=video_fps, video_transform=video_transform, return_audio=return_audio, audio_clip_duration=audio_clip_duration, audio_root=root, audio_fns=filenames, audio_fps=audio_fps, audio_fps_out=audio_fps_out, audio_transform=audio_transform, return_labels=return_labels, labels=labels, return_index=return_index, max_offsync_augm=max_offsync_augm, mode=mode, clips_per_video=clips_per_video, ) self.name = 'AudioSet dataset' self.root = root self.subset = subset self.num_videos = len(filenames) self.num_classes = len(classes) self.sample_id = np.array([fn.split('.')[0].encode('utf-8') for fn in filenames])
AVID-CMA-main
datasets/audioset.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from .audioset import AudioSet from .kinetics import Kinetics from .ucf import UCF from .hmdb import HMDB
AVID-CMA-main
datasets/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import torch import numpy as np import random import librosa from utils.videotransforms import video_transforms, volume_transforms, tensor_transforms class VideoPrep_MSC_CJ(object): def __init__(self, crop=(224, 224), color=(0.4, 0.4, 0.4, 0.2), min_area=0.08, augment=True, normalize=True, totensor=True, num_frames=8, pad_missing=False, ): self.crop = crop self.augment = augment self.num_frames = num_frames self.pad_missing = pad_missing if normalize: assert totensor if augment: transforms = [ video_transforms.RandomResizedCrop(crop, scale=(min_area, 1.)), video_transforms.RandomHorizontalFlip(), video_transforms.ColorJitter(*color), ] else: transforms = [ video_transforms.Resize(int(crop[0]/0.875)), video_transforms.CenterCrop(crop), ] if totensor: transforms += [volume_transforms.ClipToTensor()] if normalize: transforms += [tensor_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])] self.transform = video_transforms.Compose(transforms) def __call__(self, frames): frames = self.transform(frames) if self.pad_missing: while True: n_missing = self.num_frames - frames.shape[1] if n_missing > 0: frames = torch.cat((frames, frames[:, :n_missing]), 1) else: break return frames class VideoPrep_Crop_CJ(object): def __init__(self, resize=(256, 256), crop=(224, 224), color=(0.4, 0.4, 0.4, 0.2), num_frames=8, pad_missing=False, augment=True, normalize=True, totensor=True, ): self.resize = resize self.crop = crop self.augment = augment self.num_frames = num_frames self.pad_missing = pad_missing if normalize: assert totensor if augment: transforms = [ video_transforms.Resize(resize), video_transforms.RandomCrop(crop), video_transforms.RandomHorizontalFlip(), video_transforms.ColorJitter(*color), ] else: transforms = [ video_transforms.Resize(resize), video_transforms.CenterCrop(crop), ] if totensor: transforms += [volume_transforms.ClipToTensor()] if normalize: transforms += [tensor_transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])] self.transform = video_transforms.Compose(transforms) def __call__(self, frames): if isinstance(frames[0], list): return torch.stack([self(f) for f in frames]) frames = self.transform(frames) if self.pad_missing: while True: n_missing = self.num_frames - frames.shape[1] if n_missing > 0: frames = torch.cat((frames, frames[:, :n_missing]), 1) else: break return frames class AudioPrep(object): def __init__(self, trim_pad=True, duration=None, missing_as_zero=False, augment=False, to_tensor=False, volume=0.1): self.trim_pad = trim_pad self.missing_as_zero = missing_as_zero self.augment = augment self.to_tensor = to_tensor self.volume = volume if trim_pad: assert duration is not None self.duration = duration def __call__(self, sig, sr, duration=None): if duration is None: duration = self.duration num_frames = int(duration*sr) # Check if audio is missing if self.missing_as_zero and sig is None: sig = np.zeros((1, num_frames), dtype=np.float32) # Downmix to mono sig = sig.mean(0).astype(np.float32) # Trim or pad to constant shape if self.trim_pad: if sig.shape[0] > num_frames: sig = sig[:num_frames] elif sig.shape[0] < num_frames: n_pad = num_frames - sig.shape[0] sig = np.pad(sig, (0, n_pad), mode='constant', constant_values=(0., 0.)) # Augment by changing volume +/- 10% if self.augment: sig *= random.uniform(1.-self.volume, 1.+self.volume) sig = sig[np.newaxis] if self.to_tensor: sig = torch.from_numpy(sig) return sig, sr class LogSpectrogram(object): def __init__(self, fps, n_fft=512, hop_size=0.005, normalize=False): self.inp_fps = fps self.n_fft = n_fft self.hop_size = hop_size self.rate = 1./hop_size self.normalize = normalize if self.normalize: if n_fft == 512 and fps == 24000: stats = np.load('datasets/assets/audio-spectDB-24k-513-norm-stats.npz') elif n_fft == 256 and fps == 24000: stats = np.load('datasets/assets/audio-spectDB-24k-257-norm-stats.npz') self.mean, self.std = stats['mean'], stats['std'] def __call__(self, sig, sr, duration=None): hop_length = int(self.hop_size * sr) spect = np.abs(librosa.stft(sig[0], n_fft=self.n_fft*2, hop_length=hop_length))**2. spect = np.concatenate([spect[:1], spect[1:].reshape(self.n_fft//2, 2, -1).mean(1)], 0) if duration is not None: num_frames = int(duration * self.rate) spect = spect[:, :num_frames] spect = librosa.core.power_to_db(spect, top_db=100) if self.normalize: spect = (spect - self.mean[:, np.newaxis]) / (self.std[:, np.newaxis] + 1e-5) spect_tensor = torch.from_numpy(spect) spect_tensor = torch.transpose(spect_tensor, 0, 1).unsqueeze(0) return spect_tensor, self.rate
AVID-CMA-main
datasets/preprocessing.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import random import torch import numpy as np import torch.utils.data as data from utils.ioutils import av_wrappers from collections import defaultdict def chararray(fn_list): charr = np.chararray(len(fn_list), itemsize=max([len(fn) for fn in fn_list])) for i in range(len(fn_list)): charr[i] = fn_list[i] return charr class VideoDataset(data.Dataset): def __init__(self, return_video=True, video_root=None, video_fns=None, video_clip_duration=1., video_fps=25, video_transform=None, return_audio=True, audio_root=None, audio_fns=None, audio_clip_duration=1., audio_fps=None, audio_fps_out=None, audio_transform=None, return_labels=False, labels=None, return_index=False, mode='clip', clips_per_video=1, max_offsync_augm=0, ): super(VideoDataset, self).__init__() self.num_samples = 0 self.return_video = return_video self.video_root = video_root if return_video: self.video_fns = chararray(video_fns) self.num_samples = self.video_fns.shape[0] self.video_fps = video_fps if video_transform is not None: if not isinstance(video_transform, list): video_transform = [video_transform] self.video_transform = video_transform self.return_audio = return_audio self.audio_root = audio_root if return_audio: self.audio_fns = chararray(audio_fns) self.num_samples = self.audio_fns.shape[0] self.audio_fps = audio_fps self.audio_fps_out = audio_fps_out self.audio_transform = audio_transform self.return_labels = return_labels if return_labels: self.labels = np.array(labels) self.return_index = return_index self.video_clip_duration = video_clip_duration self.audio_clip_duration = audio_clip_duration self.max_offsync_augm = max_offsync_augm self.clips_per_video = clips_per_video self.mode = mode def _load_sample(self, sample_idx): video_ctr = None if self.return_video: video_fn = '{}/{}'.format(self.video_root, self.video_fns[sample_idx].decode()) video_ctr = av_wrappers.av_open(video_fn) audio_ctr = None if self.return_audio: audio_fn = '{}/{}'.format(self.audio_root, self.audio_fns[sample_idx].decode()) if self.return_video and audio_fn == video_fn: audio_ctr = video_ctr else: audio_ctr = av_wrappers.av_open(audio_fn) return video_ctr, audio_ctr def __getitem__(self, index): if self.mode == 'clip': try: sample_idx = index % self.num_samples video_ctr, audio_ctr = self._load_sample(sample_idx) v_ss, v_dur, a_ss, a_dur = self._sample_snippet(video_ctr, audio_ctr) sample = self._get_clip(sample_idx, video_ctr, audio_ctr, v_ss, a_ss, video_clip_duration=v_dur, audio_clip_duration=a_dur) if sample is None: return self[(index+1) % len(self)] return sample except Exception: return self[(index+1) % len(self)] else: video_ctr, audio_ctr = self._load_sample(index) # Load entire video vs, vf, ss, sf = self._get_time_lims(video_ctr, audio_ctr) start_time = vs final_time = vf if self.return_audio: start_time = max(vs, ss) if ss < 0 else vs final_time = min(vf, sf) if ss < 0 else vf if final_time <= start_time: final_time = start_time + max(self.video_clip_duration, self.audio_clip_duration) video_dur = final_time - start_time sample = self._get_clip(index, video_ctr, audio_ctr, start_time, start_time, video_clip_duration=video_dur, audio_clip_duration=video_dur) # Split video into overlapping chunks chunks = defaultdict(list) if self.return_video: nf = sample['frames'].shape[1] chunk_size = int(self.video_clip_duration * self.video_fps) if chunk_size >= nf: chunks['frames'] = torch.stack([sample['frames'] for _ in range(self.clips_per_video)]) else: timestamps = np.linspace(0, max(nf - chunk_size, 1), self.clips_per_video).astype(int) chunks['frames'] = torch.stack([sample['frames'][:, ss:ss+chunk_size] for ss in timestamps]) if self.return_audio: nf = sample['audio'].shape[1] chunk_size = int(self.audio_clip_duration * self.audio_fps_out) if chunk_size >= nf: chunks['audio'] = torch.stack([sample['audio'] for _ in range(self.clips_per_video)]) else: timestamps = np.linspace(0, max(nf - chunk_size, 1), self.clips_per_video).astype(int) chunks['audio'] = torch.stack([sample['audio'][:, ss:ss+chunk_size] for ss in timestamps]) if self.return_labels: chunks['label'] = sample['label'] if self.return_index: ts = torch.from_numpy(np.linspace(start_time, final_time-self.video_clip_duration, self.clips_per_video)) chunks['index'] = torch.stack([sample['index'][:1].repeat(self.clips_per_video), ts.float()], dim=1) return chunks def __len__(self): if self.mode == 'clip': return self.num_samples * self.clips_per_video else: return self.num_samples def __repr__(self): desc = "{}\n - Root: {}\n - Subset: {}\n - Num videos: {}\n - Num samples: {}\n".format( self.name, self.root, self.subset, self.num_videos, self.num_videos * self.clips_per_video) if self.return_video: desc += " - Example video: {}/{}\n".format(self.video_root, self.video_fns[0].decode()) if self.return_audio: desc += " - Example audio: {}/{}\n".format(self.audio_root, self.audio_fns[0].decode()) return desc def _get_time_lims(self, video_ctr, audio_ctr): video_st, video_ft, audio_st, audio_ft = None, None, None, None if video_ctr is not None: video_stream = video_ctr.streams.video[0] tbase = video_stream.time_base video_st = video_stream.start_time * tbase video_dur = video_stream.duration * tbase video_ft = video_st + video_dur if audio_ctr is not None: audio_stream = audio_ctr.streams.audio[0] tbase = audio_stream.time_base audio_st = audio_stream.start_time * tbase audio_dur = audio_stream.duration * tbase audio_ft = audio_st + audio_dur return video_st, video_ft, audio_st, audio_ft def _sample_snippet(self, video_ctr, audio_ctr): video_st, video_ft, audio_st, audio_ft = self._get_time_lims(video_ctr, audio_ctr) if not self.return_audio: video_duration = video_ft - video_st if self.video_clip_duration > video_duration: return 0., video_duration, 0., video_duration else: min_d, max_d = self.video_clip_duration, min(self.video_clip_duration, video_duration) duration = random.uniform(min_d, max_d) sample_ss_v = random.uniform(video_st, video_ft - duration) return sample_ss_v, duration, sample_ss_v, duration else: min_ss = max(audio_st, video_st) max_ss = min(audio_ft - self.audio_clip_duration, video_ft - self.video_clip_duration) assert max_ss > min_ss if self.audio_clip_duration > self.video_clip_duration: sample_ss_a = random.uniform(min_ss, max_ss) sample_tt_a = sample_ss_a + self.audio_clip_duration win_min = max(sample_ss_a - self.max_offsync_augm, video_st) win_max = min(sample_tt_a + self.max_offsync_augm - self.video_clip_duration, video_ft) sample_ss_v = random.uniform(win_min, win_max) return sample_ss_v, self.video_clip_duration, sample_ss_a, self.audio_clip_duration else: sample_ss_v = random.uniform(min_ss, max_ss) sample_tt_v = sample_ss_v + self.video_clip_duration win_min = max(sample_ss_v - self.max_offsync_augm, audio_st) win_max = min(sample_tt_v + self.max_offsync_augm - self.audio_clip_duration, audio_ft) sample_ss_a = random.uniform(win_min, win_max) return sample_ss_v, self.video_clip_duration, sample_ss_a, self.audio_clip_duration def _get_clip(self, clip_idx, video_ctr, audio_ctr, video_start_time, audio_start_time, video_clip_duration=None, audio_clip_duration=None): if video_clip_duration is None: video_clip_duration = self.video_clip_duration if audio_clip_duration is None: audio_clip_duration = self.audio_clip_duration sample = {} if self.return_video: frames, fps, start_time = av_wrappers.av_load_video( video_ctr, video_fps=self.video_fps, start_time=video_start_time, duration=video_clip_duration, ) if self.video_transform is not None: for t in self.video_transform: frames = t(frames) sample['frames'] = frames audio_start_time = audio_start_time - (video_start_time - start_time) if self.return_audio: samples, rate = av_wrappers.av_laod_audio( audio_ctr, audio_fps=self.audio_fps, start_time=audio_start_time, duration=audio_clip_duration, ) if self.audio_transform is not None: if isinstance(self.audio_transform, list): for t in self.audio_transform: samples, rate = t(samples, rate, audio_clip_duration) else: samples, rate = self.audio_transform(samples, rate) sample['audio'] = samples if self.return_labels: lbl = self.labels[clip_idx] if isinstance(lbl, np.ndarray): sample['label'] = torch.from_numpy(lbl) else: sample['label'] = lbl if self.return_index: sample['index'] = clip_idx return sample
AVID-CMA-main
datasets/video_db.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import os from datasets.video_db import VideoDataset DATA_PATH = '/data/datasets/hmdb/videos' ANNO_PATH = '/data/datasets/hmdb/splits/' class HMDB(VideoDataset): def __init__(self, subset, return_video=True, video_clip_duration=1., video_fps=25., video_transform=None, return_audio=False, return_labels=False, max_offsync_augm=0, mode='clip', clips_per_video=20, ): assert return_audio is False self.name = 'HMDB-101' self.root = DATA_PATH self.subset = subset # Get filenames classes = sorted(os.listdir(DATA_PATH)) subset, split = subset.split('-') subset_id = {'train': '1', 'test': '2'}[subset] filenames, labels = [], [] for cls in classes: for ln in open(f'{ANNO_PATH}/{cls}_test_{split}.txt'): fn, ss = ln.strip().split() if ss == subset_id: filenames += [f"{cls}/{fn}"] labels += [classes.index(cls)] self.classes = classes self.num_classes = len(self.classes) self.num_videos = len(filenames) super(HMDB, self).__init__( return_video=return_video, video_clip_duration=video_clip_duration, video_root=DATA_PATH, video_fns=filenames, video_fps=video_fps, video_transform=video_transform, return_audio=False, return_labels=return_labels, labels=labels, max_offsync_augm=max_offsync_augm, mode=mode, clips_per_video=clips_per_video, )
AVID-CMA-main
datasets/hmdb.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from datasets.video_db import VideoDataset DATA_PATH = '/data/datasets/UCF101/data' ANNO_PATH = '/data/datasets/UCF101/ucfTrainTestlist/' class UCF(VideoDataset): def __init__(self, subset, video_clip_duration=0.5, return_video=True, video_fps=16., video_transform=None, return_audio=False, return_labels=False, max_offsync_augm=0, mode='clip', clips_per_video=20, ): assert return_audio is False self.name = 'UCF-101' self.root = DATA_PATH self.subset = subset classes_fn = f'{ANNO_PATH}/classInd.txt' self.classes = [l.strip().split()[1] for l in open(classes_fn)] filenames = [ln.strip().split()[0] for ln in open(f'{ANNO_PATH}/{subset}.txt')] labels = [fn.split('/')[0] for fn in filenames] labels = [self.classes.index(cls) for cls in labels] self.num_classes = len(self.classes) self.num_videos = len(filenames) super(UCF, self).__init__( return_video=return_video, video_root=DATA_PATH, video_clip_duration=video_clip_duration, video_fns=filenames, video_fps=video_fps, video_transform=video_transform, return_audio=False, return_labels=return_labels, labels=labels, max_offsync_augm=max_offsync_augm, mode=mode, clips_per_video=clips_per_video, )
AVID-CMA-main
datasets/ucf.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import os import glob import numpy as np DATA_PATH = '/data/datasets/kinetics/' from datasets.video_db import VideoDataset class Kinetics(VideoDataset): def __init__(self, subset, return_video=True, video_clip_duration=1., video_fps=25., video_transform=None, return_audio=False, audio_clip_duration=1., audio_fps=None, audio_fps_out=64, audio_transform=None, return_labels=False, return_index=False, max_offsync_augm=0, mode='clip', clips_per_video=1, ): classes = sorted(os.listdir(f"{DATA_PATH}/{subset}")) filenames = ['/'.join(fn.split('/')[-2:]) for fn in glob.glob(f"{DATA_PATH}/{subset}/*/*.mp4")] labels = [classes.index(fn.split('/')[-2]) for fn in filenames] super(Kinetics, self).__init__( return_video=return_video, video_root=f"{DATA_PATH}/{subset}", video_fns=filenames, video_clip_duration=video_clip_duration, video_fps=video_fps, video_transform=video_transform, return_audio=return_audio, audio_root=f"{DATA_PATH}/{subset}", audio_fns=filenames, audio_clip_duration=audio_clip_duration, audio_fps=audio_fps, audio_fps_out=audio_fps_out, audio_transform=audio_transform, return_labels=return_labels, labels=labels, return_index=return_index, mode=mode, clips_per_video=clips_per_video, max_offsync_augm=max_offsync_augm, ) self.name = 'Kinetics dataset' self.root = f"{DATA_PATH}/{subset}" self.subset = subset self.classes = classes self.num_videos = len(filenames) self.num_classes = len(classes) self.sample_id = np.array([fn.split('/')[-1].split('.')[0].encode('utf-8') for fn in filenames])
AVID-CMA-main
datasets/kinetics.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. #
AVID-CMA-main
utils/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import datetime import sys import torch from torch import distributed as dist class Logger(object): def __init__(self, quiet=False, log_fn=None, rank=0, prefix=""): self.rank = rank if rank is not None else 0 self.quiet = quiet self.log_fn = log_fn self.prefix = "" if prefix: self.prefix = prefix + ' | ' self.file_pointers = [] if self.rank == 0: if self.quiet: open(log_fn, 'w').close() def add_line(self, content): if self.rank == 0: msg = self.prefix+content if self.quiet: fp = open(self.log_fn, 'a') fp.write(msg+'\n') fp.flush() fp.close() else: print(msg) sys.stdout.flush() class ProgressMeter(object): def __init__(self, num_batches, meters, phase, epoch=None, logger=None, tb_writter=None): self.batches_per_epoch = num_batches self.batch_fmtstr = self._get_batch_fmtstr(epoch, num_batches) self.meters = meters self.phase = phase self.epoch = epoch self.logger = logger self.tb_writter = tb_writter def display(self, batch): step = self.epoch * self.batches_per_epoch + batch date = str(datetime.datetime.now()) entries = ['{} | {} {}'.format(date, self.phase, self.batch_fmtstr.format(batch))] entries += [str(meter) for meter in self.meters] if self.logger is None: print('\t'.join(entries)) else: self.logger.add_line('\t'.join(entries)) if self.tb_writter is not None: for meter in self.meters: self.tb_writter.add_scalar('{}-batch/{}'.format(self.phase, meter.name), meter.val, step) def _get_batch_fmtstr(self, epoch, num_batches): num_digits = len(str(num_batches // 1)) fmt = '{:' + str(num_digits) + 'd}' epoch_str = '[{}]'.format(epoch) if epoch is not None else '' return epoch_str+'[' + fmt + '/' + fmt.format(num_batches) + ']' def synchronize_meters(self, cur_gpu): metrics = torch.tensor([m.avg for m in self.progress.meters]).cuda(cur_gpu) metrics_gather = [torch.ones_like(metrics) for _ in range(dist.get_world_size())] dist.all_gather(metrics_gather, metrics) metrics = torch.stack(metrics_gather).mean(0).cpu().numpy() for meter, m in zip(self.progress.meters, metrics): meter.avg = m
AVID-CMA-main
utils/logger.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import torch from torch import distributed as dist def _gather_from_all(tensor): """ Gather tensors from all gpus """ gathered_tensor = [torch.zeros_like(tensor) for _ in range(dist.get_world_size())] dist.all_gather(gathered_tensor, tensor) gathered_tensor = torch.cat(gathered_tensor, 0) return gathered_tensor
AVID-CMA-main
utils/distributed_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import torch from collections import deque def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':f', window_size=0): self.name = name self.fmt = fmt self.window_size = window_size self.reset() def reset(self): if self.window_size > 0: self.q = deque(maxlen=self.window_size) self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val if self.window_size > 0: self.q.append((val, n)) self.count = sum([n for v, n in self.q]) self.sum = sum([v * n for v, n in self.q]) else: self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__)
AVID-CMA-main
utils/metrics_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import torch class AliasMethod(object): """ From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ """ def __init__(self, probs): if probs.sum() > 1: probs.div_(probs.sum()) K = len(probs) self.prob = torch.zeros(K) self.alias = torch.LongTensor([0]*K) # Sort the data into the outcomes with probabilities # that are larger and smaller than 1/K. smaller = [] larger = [] for kk, prob in enumerate(probs): self.prob[kk] = K*prob if self.prob[kk] < 1.0: smaller.append(kk) else: larger.append(kk) # Loop though and create little binary mixtures that # appropriately allocate the larger outcomes over the # overall uniform mixture. while len(smaller) > 0 and len(larger) > 0: small = smaller.pop() large = larger.pop() self.alias[small] = large self.prob[large] = (self.prob[large] - 1.0) + self.prob[small] if self.prob[large] < 1.0: smaller.append(large) else: larger.append(large) for last_one in smaller+larger: self.prob[last_one] = 1 def to(self, device): self.prob = self.prob.to(device) self.alias = self.alias.to(device) def draw(self, N): """ Draw N samples from multinomial :param N: number of samples :return: samples """ K = self.alias.size(0) kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K) prob = self.prob.index_select(0, kk) alias = self.alias.index_select(0, kk) # b is whether a random number is greater than q b = torch.bernoulli(prob) oq = kk.mul(b.long()) oj = alias.mul((1-b).long()) return oq + oj
AVID-CMA-main
utils/alias_method.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import os import shutil import torch import numpy as np import torch.distributed as dist import datetime from utils.logger import Logger def initialize_distributed_backend(args, ngpus_per_node): if args.distributed: if args.dist_url == "env://" and args.rank == -1: args.rank = int(os.environ["RANK"]) if args.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes args.rank = args.rank * ngpus_per_node + args.gpu dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) if args.rank == -1: args.rank = 0 return args def prep_environment(args, cfg): from torch.utils.tensorboard import SummaryWriter # Prepare loggers (must be configured after initialize_distributed_backend()) model_dir = '{}/{}'.format(cfg['model']['model_dir'], cfg['model']['name']) if args.rank == 0: prep_output_folder(model_dir, False) log_fn = '{}/train.log'.format(model_dir) logger = Logger(quiet=args.quiet, log_fn=log_fn, rank=args.rank) logger.add_line(str(datetime.datetime.now())) if any(['SLURM' in env for env in list(os.environ.keys())]): logger.add_line("=" * 30 + " SLURM " + "=" * 30) for env in os.environ.keys(): if 'SLURM' in env: logger.add_line('{:30}: {}'.format(env, os.environ[env])) logger.add_line("=" * 30 + " Config " + "=" * 30) def print_dict(d, ident=''): for k in d: if isinstance(d[k], dict): logger.add_line("{}{}".format(ident, k)) print_dict(d[k], ident=' '+ident) else: logger.add_line("{}{}: {}".format(ident, k, str(d[k]))) print_dict(cfg) logger.add_line("=" * 30 + " Args " + "=" * 30) for k in args.__dict__: logger.add_line('{:30} {}'.format(k, args.__dict__[k])) tb_writter = None if cfg['log2tb'] and args.rank == 0: tb_dir = '{}/tensorboard'.format(model_dir) os.system('mkdir -p {}'.format(tb_dir)) tb_writter = SummaryWriter(tb_dir) return logger, tb_writter, model_dir def build_model(cfg, logger=None): import models assert cfg['arch'] in models.__dict__, 'Unknown model architecture' model = models.__dict__[cfg['arch']](**cfg['args']) if logger is not None: if isinstance(model, (list, tuple)): logger.add_line("=" * 30 + " Model " + "=" * 30) for m in model: logger.add_line(str(m)) logger.add_line("=" * 30 + " Parameters " + "=" * 30) for m in model: logger.add_line(parameter_description(m)) else: logger.add_line("=" * 30 + " Model " + "=" * 30) logger.add_line(str(model)) logger.add_line("=" * 30 + " Parameters " + "=" * 30) logger.add_line(parameter_description(model)) return model def distribute_model_to_cuda(models, args, batch_size, num_workers, ngpus_per_node): if ngpus_per_node == 0: return models, args, batch_size, num_workers squeeze = False if not isinstance(models, list): models = [models] squeeze = True for i in range(len(models)): if args.distributed: # For multiprocessing distributed, DistributedDataParallel constructor # should always set the single device scope, otherwise, # DistributedDataParallel will use all available devices. if args.gpu is not None: torch.cuda.set_device(args.gpu) models[i].cuda(args.gpu) models[i] = torch.nn.parallel.DistributedDataParallel(models[i], device_ids=[args.gpu]) else: models[i].cuda() # DistributedDataParallel will divide and allocate batch_size to all # available GPUs if device_ids are not set models[i] = torch.nn.parallel.DistributedDataParallel(models[i]) elif args.gpu is not None: torch.cuda.set_device(args.gpu) models[i] = models[i].cuda(args.gpu) else: # DataParallel will divide and allocate batch_size to all available GPUs models[i] = torch.nn.DataParallel(models[i]).cuda() if squeeze: models = models[0] if args.distributed and args.gpu is not None: # When using a single GPU per process and per # DistributedDataParallel, we need to divide the batch size # ourselves based on the total number of GPUs we have batch_size = int(batch_size / ngpus_per_node) num_workers = int((num_workers + ngpus_per_node - 1) / ngpus_per_node) return models, args, batch_size, num_workers def build_dataloaders(cfg, num_workers, distributed, logger): train_loader = build_dataloader(cfg, cfg['train'], num_workers, distributed) logger.add_line("\n"+"="*30+" Train data "+"="*30) logger.add_line(str(train_loader.dataset)) return train_loader def build_dataloader(db_cfg, split_cfg, num_workers, distributed): import torch.utils.data as data import torch.utils.data.distributed from datasets import preprocessing import datasets # Video transforms num_frames = int(db_cfg['video_clip_duration'] * db_cfg['video_fps']) if db_cfg['transforms'] == 'crop+color': video_transform = preprocessing.VideoPrep_Crop_CJ( resize=db_cfg['frame_size'], crop=(db_cfg['crop_size'], db_cfg['crop_size']), augment=split_cfg['use_augmentation'], num_frames=num_frames, pad_missing=True, ) elif db_cfg['transforms'] == 'msc+color': video_transform = preprocessing.VideoPrep_MSC_CJ( crop=(db_cfg['crop_size'], db_cfg['crop_size']), augment=split_cfg['use_augmentation'], num_frames=num_frames, pad_missing=True, ) else: raise ValueError('Unknown transform') # Audio transforms audio_transforms = [ preprocessing.AudioPrep( trim_pad=True, duration=db_cfg['audio_clip_duration'], augment=split_cfg['use_augmentation'], missing_as_zero=True), preprocessing.LogSpectrogram( db_cfg['audio_fps'], n_fft=db_cfg['n_fft'], hop_size=1. / db_cfg['spectrogram_fps'], normalize=True) ] audio_fps_out = db_cfg['spectrogram_fps'] if db_cfg['name'] == 'audioset': dataset = datasets.AudioSet elif db_cfg['name'] == 'kinetics': dataset = datasets.Kinetics else: raise ValueError('Unknown dataset') clips_per_video = split_cfg['clips_per_video'] if 'clips_per_video' in split_cfg else 1 db = dataset( subset=split_cfg['split'], return_video=True, video_clip_duration=db_cfg['video_clip_duration'], video_fps=db_cfg['video_fps'], video_transform=video_transform, return_audio=True, audio_clip_duration=db_cfg['audio_clip_duration'], audio_fps=db_cfg['audio_fps'], audio_fps_out=audio_fps_out, audio_transform=audio_transforms, max_offsync_augm=0.5 if split_cfg['use_augmentation'] else 0, return_labels=False, return_index=True, mode='clip', clips_per_video=clips_per_video, ) if distributed: sampler = torch.utils.data.distributed.DistributedSampler(db) else: sampler = None loader = torch.utils.data.DataLoader( db, batch_size=db_cfg['batch_size'], shuffle=(sampler is None), drop_last=split_cfg['drop_last'], num_workers=num_workers, pin_memory=True, sampler=sampler) return loader def build_criterion(cfg, logger=None): import criterions criterion = criterions.__dict__[cfg['name']](**cfg['args']) if logger is not None: logger.add_line(str(criterion)) return criterion def build_optimizer(params, cfg, logger=None): if cfg['name'] == 'sgd': optimizer = torch.optim.SGD( params=params, lr=cfg['lr']['base_lr'], momentum=cfg['momentum'], weight_decay=cfg['weight_decay'], nesterov=cfg['nesterov'] ) elif cfg['name'] == 'adam': optimizer = torch.optim.Adam( params=params, lr=cfg['lr']['base_lr'], weight_decay=cfg['weight_decay'], betas=cfg['betas'] if 'betas' in cfg else [0.9, 0.999] ) else: raise ValueError('Unknown optimizer.') scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg['lr']['milestones'], gamma=cfg['lr']['gamma']) return optimizer, scheduler class CheckpointManager(object): def __init__(self, checkpoint_dir, rank=0): self.checkpoint_dir = checkpoint_dir self.rank = rank self.best_metric = 0. def save(self, epoch, filename=None, eval_metric=0., **kwargs): if self.rank != 0: return is_best = False if eval_metric > self.best_metric: self.best_metric = eval_metric is_best = True state = {'epoch': epoch} for k in kwargs: state[k] = kwargs[k].state_dict() if filename is None: save_checkpoint(state=state, is_best=is_best, model_dir=self.checkpoint_dir) else: save_checkpoint(state=state, is_best=False, filename='{}/{}'.format(self.checkpoint_dir, filename)) def last_checkpoint_fn(self): return '{}/checkpoint.pth.tar'.format(self.checkpoint_dir) def best_checkpoint_fn(self): return '{}/model_best.pth.tar'.format(self.checkpoint_dir) def checkpoint_fn(self, last=False, best=False): assert best or last assert not (last and best) if last: return self.last_checkpoint_fn() if best: return self.best_checkpoint_fn() def checkpoint_exists(self, last=False, best=False): return os.path.isfile(self.checkpoint_fn(last, best)) def restore(self, fn=None, restore_last=False, restore_best=False, **kwargs): checkpoint_fn = fn if fn is not None else self.checkpoint_fn(restore_last, restore_best) ckp = torch.load(checkpoint_fn, map_location={'cuda:0': 'cpu'}) start_epoch = ckp['epoch'] for k in kwargs: if k == 'train_criterion': kwargs[k].load_state_dict(ckp[k], strict=False) else: kwargs[k].load_state_dict(ckp[k]) return start_epoch def save_checkpoint(state, is_best, model_dir='.', filename=None): if filename is None: filename = '{}/checkpoint.pth.tar'.format(model_dir) torch.save(state, filename) if is_best: shutil.copyfile(filename, '{}/model_best.pth.tar'.format(model_dir)) def prep_output_folder(model_dir, evaluate): if evaluate: assert os.path.isdir(model_dir) else: if not os.path.isdir(model_dir): os.makedirs(model_dir) def parameter_description(model): desc = '' for n, p in model.named_parameters(): desc += "{:70} | {:10} | {:30} | {}\n".format( n, 'Trainable' if p.requires_grad else 'Frozen', ' x '.join([str(s) for s in p.size()]), str(np.prod(p.size()))) return desc
AVID-CMA-main
utils/main_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import torch from torch import nn import torch.distributed as dist import utils.logger from utils import main_utils import yaml import os def prepare_environment(args, cfg, fold): if args.distributed: while True: try: dist.init_process_group(backend='nccl', init_method='tcp://localhost:{}'.format(args.port), world_size=args.world_size, rank=args.gpu) break except RuntimeError: args.port = str(int(args.port) + 1) model_cfg = yaml.safe_load(open(args.model_cfg))['model'] eval_dir = '{}/{}/eval-{}/fold-{:02d}'.format(model_cfg['model_dir'], model_cfg['name'], cfg['benchmark']['name'], fold) os.makedirs(eval_dir, exist_ok=True) yaml.safe_dump(cfg, open('{}/config.yaml'.format(eval_dir), 'w')) logger = utils.logger.Logger(quiet=args.quiet, log_fn='{}/eval.log'.format(eval_dir), rank=args.gpu) if any(['SLURM' in env for env in list(os.environ.keys())]): logger.add_line("=" * 30 + " SLURM " + "=" * 30) for env in os.environ.keys(): if 'SLURM' in env: logger.add_line('{:30}: {}'.format(env, os.environ[env])) logger.add_line("=" * 30 + " Config " + "=" * 30) def print_dict(d, ident=''): for k in d: if isinstance(d[k], dict): logger.add_line("{}{}".format(ident, k)) print_dict(d[k], ident=' '+ident) else: logger.add_line("{}{}: {}".format(ident, k, str(d[k]))) print_dict(cfg) logger.add_line("=" * 30 + " Model Config " + "=" * 30) print_dict(model_cfg) return eval_dir, model_cfg, logger def distribute_model_to_cuda(model, args, cfg): if torch.cuda.device_count() == 1: model = model.cuda() elif args.distributed: torch.cuda.set_device(args.gpu) model.cuda(args.gpu) cfg['dataset']['batch_size'] = max(cfg['dataset']['batch_size'] // args.world_size, 1) cfg['num_workers'] = max(cfg['num_workers'] // args.world_size, 1) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) else: model = torch.nn.DataParallel(model).cuda() return model def build_dataloader(db_cfg, split_cfg, fold, num_workers, distributed): import torch.utils.data as data from datasets import preprocessing if db_cfg['transform'] == 'msc+color': video_transform = preprocessing.VideoPrep_MSC_CJ( crop=(db_cfg['crop_size'], db_cfg['crop_size']), num_frames=int(db_cfg['video_fps'] * db_cfg['clip_duration']), pad_missing=True, augment=split_cfg['use_augmentation'], min_area=db_cfg['min_area'], color=db_cfg['color'], ) elif db_cfg['transform'] == 'crop+color': video_transform = preprocessing.VideoPrep_Crop_CJ( crop=(db_cfg['crop_size'], db_cfg['crop_size']), num_frames=int(db_cfg['video_fps'] * db_cfg['clip_duration']), pad_missing=True, augment=split_cfg['use_augmentation'], ) else: raise ValueError import datasets if db_cfg['name'] == 'ucf101': dataset = datasets.UCF elif db_cfg['name'] == 'hmdb51': dataset = datasets.HMDB elif db_cfg['name'] == 'kinetics': dataset = datasets.Kinetics else: raise ValueError('Unknown dataset') db = dataset( subset=split_cfg['split'].format(fold=fold), return_video=True, video_clip_duration=db_cfg['clip_duration'], video_fps=db_cfg['video_fps'], video_transform=video_transform, return_audio=False, return_labels=True, mode=split_cfg['mode'], clips_per_video=split_cfg['clips_per_video'], ) if distributed: sampler = torch.utils.data.distributed.DistributedSampler(db) else: sampler = None drop_last = split_cfg['drop_last'] if 'drop_last' in split_cfg else True loader = data.DataLoader( db, batch_size=db_cfg['batch_size'] if split_cfg['mode'] == 'clip' else max(1, db_cfg['batch_size']//split_cfg['clips_per_video']), num_workers=num_workers, pin_memory=True, shuffle=(sampler is None) and split_cfg['use_shuffle'], sampler=sampler, drop_last=drop_last ) return loader def build_dataloaders(cfg, fold, num_workers, distributed, logger): logger.add_line("=" * 30 + " Train DB " + "=" * 30) train_loader = build_dataloader(cfg, cfg['train'], fold, num_workers, distributed) logger.add_line(str(train_loader.dataset)) logger.add_line("=" * 30 + " Test DB " + "=" * 30) test_loader = build_dataloader(cfg, cfg['test'], fold, num_workers, distributed) logger.add_line(str(test_loader.dataset)) logger.add_line("=" * 30 + " Dense DB " + "=" * 30) dense_loader = build_dataloader(cfg, cfg['test_dense'], fold, num_workers, distributed) logger.add_line(str(dense_loader.dataset)) return train_loader, test_loader, dense_loader class CheckpointManager(object): def __init__(self, checkpoint_dir, rank=0): self.checkpoint_dir = checkpoint_dir self.best_metric = 0. self.rank = rank def save(self, model, optimizer, scheduler, epoch, eval_metric=0.): if self.rank is not None and self.rank != 0: return is_best = False if eval_metric > self.best_metric: self.best_metric = eval_metric is_best = True main_utils.save_checkpoint(state={ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), }, is_best=is_best, model_dir=self.checkpoint_dir) def last_checkpoint_fn(self): return '{}/checkpoint.pth.tar'.format(self.checkpoint_dir) def best_checkpoint_fn(self): return '{}/model_best.pth.tar'.format(self.checkpoint_dir) def checkpoint_fn(self, last=False, best=False): assert best or last assert not (last and best) if last: return self.last_checkpoint_fn() if best: return self.best_checkpoint_fn() def checkpoint_exists(self, last=False, best=False): return os.path.isfile(self.checkpoint_fn(last, best)) def restore(self, model, optimizer, scheduler, restore_last=False, restore_best=False): checkpoint_fn = self.checkpoint_fn(restore_last, restore_best) ckp = torch.load(checkpoint_fn, map_location={'cuda:0': 'cpu'}) start_epoch = ckp['epoch'] model.load_state_dict(ckp['state_dict']) optimizer.load_state_dict(ckp['optimizer']) scheduler.load_state_dict(ckp['scheduler']) return start_epoch class ClassificationWrapper(torch.nn.Module): def __init__(self, feature_extractor, n_classes, feat_name, feat_dim, pooling_op=None, use_dropout=False, dropout=0.5): super(ClassificationWrapper, self).__init__() self.feature_extractor = feature_extractor self.feat_name = feat_name self.use_dropout = use_dropout if pooling_op is not None: self.pooling = eval('torch.nn.'+pooling_op) else: self.pooling = None if use_dropout: self.dropout = torch.nn.Dropout(dropout) self.classifier = torch.nn.Linear(feat_dim, n_classes) def forward(self, *inputs): emb = self.feature_extractor(*inputs, return_embs=True)[self.feat_name] emb_pool = self.pooling(emb) if self.pooling is not None else emb emb_pool = emb_pool.view(inputs[0].shape[0], -1) if self.use_dropout: emb_pool = self.dropout(emb_pool) logit = self.classifier(emb_pool) return logit class Classifier(nn.Module): def __init__(self, n_classes, feat_name, feat_dim, pooling, l2_norm=False, use_bn=True, use_dropout=False): super(Classifier, self).__init__() self.use_bn = use_bn self.feat_name = feat_name self.pooling = eval('nn.'+pooling) if pooling is not None else None self.l2_norm = l2_norm if use_bn: self.bn = nn.BatchNorm1d(feat_dim) self.use_dropout = use_dropout if use_dropout: self.dropout = nn.Dropout() self.classifier = nn.Linear(feat_dim, n_classes) def forward(self, x): with torch.no_grad(): if self.use_dropout: x = self.dropout(x) if self.l2_norm: x = nn.functional.normalize(x, p=2, dim=-1) if self.pooling is not None and len(x.shape) > 2: x = self.pooling(x) x = x.view(x.shape[0], -1).contiguous().detach() if self.use_bn: x = self.bn(x) return self.classifier(x) class MOSTCheckpointManager(object): def __init__(self, checkpoint_dir, rank=0): self.rank = rank self.checkpoint_dir = checkpoint_dir self.best_metric = 0. def save(self, model, optimizer, epoch, eval_metric=0.): if self.rank != 0: return is_best = False if eval_metric > self.best_metric: self.best_metric = eval_metric is_best = True try: state_dict = model.classifiers.state_dict() except AttributeError: state_dict = model.module.classifiers.state_dict() main_utils.save_checkpoint(state={ 'epoch': epoch, 'state_dict': state_dict, 'optimizer': optimizer.state_dict(), }, is_best=is_best, model_dir=self.checkpoint_dir) def last_checkpoint_fn(self): return '{}/checkpoint.pth.tar'.format(self.checkpoint_dir) def best_checkpoint_fn(self): return '{}/model_best.pth.tar'.format(self.checkpoint_dir) def checkpoint_fn(self, last=False, best=False): assert best or last # assert not (last and best) if last: return self.last_checkpoint_fn() elif best: return self.best_checkpoint_fn() def checkpoint_exists(self, last=False, best=False): return os.path.isfile(self.checkpoint_fn(last, best)) def restore(self, model, optimizer, restore_last=False, restore_best=False): checkpoint_fn = self.checkpoint_fn(restore_last, restore_best) ckp = torch.load(checkpoint_fn, map_location={'cuda:0': 'cpu'}) start_epoch = ckp['epoch'] try: model.classifiers.load_state_dict(ckp['state_dict']) except AttributeError: model.module.classifiers.load_state_dict(ckp['state_dict']) optimizer.load_state_dict(ckp['optimizer']) return start_epoch class MOSTModel(nn.Module): def __init__(self, feature_extractor, n_classes, feat_names, feat_dims, pooling_ops, l2_norm=None, use_bn=False, use_dropout=False): super(MOSTModel, self).__init__() assert len(feat_dims) == len(pooling_ops) == len(feat_names) n_outputs = len(feat_dims) self.feat_names = feat_names self.feat_dims = feat_dims self.pooling_ops = pooling_ops if l2_norm is None: l2_norm = [False] * len(feat_names) if not isinstance(l2_norm, list): l2_norm = [l2_norm] * len(feat_names) self.l2_norm = l2_norm feature_extractor.train(False) self.feature_extractor = feature_extractor self.classifiers = nn.ModuleList([ Classifier(n_classes, feat_name=feat_names[i], feat_dim=feat_dims[i], pooling=pooling_ops[i], l2_norm=l2_norm[i], use_bn=use_bn, use_dropout=use_dropout) for i in range(n_outputs) ]) for p in self.feature_extractor.parameters(): p.requires_grad = False def forward(self, *x): with torch.no_grad(): embs = self.feature_extractor(*x, return_embs=self.feat_names) embs = {ft: embs[ft] for ft in self.feat_names} for classifier, ft in zip(self.classifiers, self.feat_names): embs[ft] = classifier(embs[ft]) return embs def build_model(feat_cfg, eval_cfg, eval_dir, args, logger): import models pretrained_net = models.__dict__[feat_cfg['arch']](**feat_cfg['args']) # Load from checkpoint checkpoint_fn = '{}/{}/checkpoint.pth.tar'.format(feat_cfg['model_dir'], feat_cfg['name']) ckp = torch.load(checkpoint_fn, map_location='cpu') pretrained_net.load_state_dict({k.replace('module.', ''): ckp['model'][k] for k in ckp['model']}) # Wrap with linear-head classifiers if eval_cfg['model']['name'] == 'ClassificationWrapper': model = ClassificationWrapper(feature_extractor=pretrained_net.video_model, **eval_cfg['model']['args']) ckp_manager = CheckpointManager(eval_dir, rank=args.gpu) elif eval_cfg['model']['name'] == 'MOSTWrapper': model = MOSTModel(feature_extractor=pretrained_net.video_model, **eval_cfg['model']['args']) ckp_manager = MOSTCheckpointManager(eval_dir, rank=args.gpu) else: raise ValueError # Log model description logger.add_line("=" * 30 + " Model " + "=" * 30) logger.add_line(str(model)) logger.add_line("=" * 30 + " Parameters " + "=" * 30) logger.add_line(main_utils.parameter_description(model)) logger.add_line("=" * 30 + " Pretrained model " + "=" * 30) logger.add_line("File: {}\nEpoch: {}".format(checkpoint_fn, ckp['epoch'])) # Distribute model = distribute_model_to_cuda(model, args, eval_cfg) return model, ckp_manager class BatchWrapper: def __init__(self, model, batch_size): self.model = model self.batch_size = batch_size def __call__(self, x): outs = [] for i in range(0, x.shape[0], self.batch_size): outs += [self.model(x[i:i + self.batch_size])] return torch.cat(outs, 0)
AVID-CMA-main
utils/eval_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. #
AVID-CMA-main
utils/ioutils/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import av import numpy as np from fractions import Fraction av.logging.set_level(0) def av_open(inpt): return av.open(inpt) def av_load_video(container, video_fps=None, start_time=0, duration=None): video_stream = container.streams.video[0] _ss = video_stream.start_time * video_stream.time_base _dur = video_stream.duration * video_stream.time_base _ff = _ss + _dur _fps = video_stream.average_rate if video_fps is None: video_fps = _fps if duration is None: duration = _ff - start_time # Figure out which frames to decode outp_times = [t for t in np.arange(start_time, min(start_time + duration - 0.5/_fps, _ff), 1./video_fps)][:int(duration*video_fps)] outp_vframes = [int((t - _ss) * _fps) for t in outp_times] start_time = outp_vframes[0] / float(_fps) # Fast forward container.seek(int(start_time * av.time_base)) # Decode snippet frames = [] for frame in container.decode(video=0): if len(frames) == len(outp_vframes): break # All frames have been decoded frame_no = frame.pts * frame.time_base * _fps if frame_no < outp_vframes[len(frames)]: continue # Not the frame we want # Decode pil_img = frame.to_image() while frame_no >= outp_vframes[len(frames)]: frames += [pil_img] if len(frames) == len(outp_vframes): break # All frames have been decoded return frames, video_fps, start_time def av_laod_audio(container, audio_fps=None, start_time=0, duration=None): audio_stream = container.streams.audio[0] _ss = audio_stream.start_time * audio_stream.time_base if audio_stream.start_time is not None else 0. _dur = audio_stream.duration * audio_stream.time_base _ff = _ss + _dur _fps = audio_stream.rate if audio_fps is None: resample = False audio_fps = _fps else: resample = True audio_resampler = av.audio.resampler.AudioResampler(format="s16p", layout="mono", rate=audio_fps) if duration is None: duration = _ff - start_time duration = min(duration, _ff - start_time) end_time = start_time + duration # Fast forward container.seek(int(start_time * av.time_base)) # Decode snippet data, timestamps = [], [] for frame in container.decode(audio=0): frame_pts = frame.pts * frame.time_base frame_end_pts = frame_pts + Fraction(frame.samples, frame.rate) if frame_end_pts < start_time: # Skip until start time continue if frame_pts > end_time: # Exit if clip has been extracted break try: frame.pts = None if resample: np_snd = audio_resampler.resample(frame).to_ndarray() else: np_snd = frame.to_ndarray() data += [np_snd] timestamps += [frame_pts] except AttributeError: break data = np.concatenate(data, 1) # Trim audio start_decoded_time = timestamps[0] ss = int((start_time - start_decoded_time) * audio_fps) t = int(duration * audio_fps) if ss < 0: data = np.pad(data, ((0, 0), (-ss, 0)), 'constant', constant_values=0) ss = 0 if t > data.shape[1]: data = np.pad(data, ((0, 0), (0, t-data.shape[1])), 'constant', constant_values=0) data = data[:, ss: ss+t] data = data / np.iinfo(data.dtype).max return data, audio_fps
AVID-CMA-main
utils/ioutils/av_wrappers.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import random import torch from utils.videotransforms.utils import functional as F class Normalize(object): """Normalize a tensor image with mean and standard deviation Given mean: m and std: s will normalize each channel as channel = (channel - mean) / std Args: mean (int): mean value std (int): std value """ def __init__(self, mean, std): self.mean = torch.tensor(mean).view(3, 1, 1, 1) self.std = torch.tensor(std).view(3, 1, 1, 1) def __call__(self, tensor): """ Args: tensor (Tensor): Tensor of stacked images or image of size (C, H, W) to be normalized Returns: Tensor: Normalized stack of image of image """ return F.normalize(tensor, self.mean, self.std) class SpatialRandomCrop(object): """Crops a random spatial crop in a spatio-temporal numpy or tensor input [Channel, Time, Height, Width] """ def __init__(self, size): """ Args: size (tuple): in format (height, width) """ self.size = size def __call__(self, tensor): h, w = self.size _, _, tensor_h, tensor_w = tensor.shape if w > tensor_w or h > tensor_h: error_msg = ( 'Initial tensor spatial size should be larger then ' 'cropped size but got cropped sizes : ({w}, {h}) while ' 'initial tensor is ({t_w}, {t_h})'.format( t_w=tensor_w, t_h=tensor_h, w=w, h=h)) raise ValueError(error_msg) x1 = random.randint(0, tensor_w - w) y1 = random.randint(0, tensor_h - h) cropped = tensor[:, :, y1:y1 + h, x1:x1 + h] return cropped
AVID-CMA-main
utils/videotransforms/tensor_transforms.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import numpy as np from PIL import Image import torch from utils.videotransforms.utils import images as imageutils class ClipToTensor(object): """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] """ def __init__(self, channel_nb=3, div_255=True, numpy=False): self.channel_nb = channel_nb self.div_255 = div_255 self.numpy = numpy def __call__(self, clip): """ Args: clip (list of numpy.ndarray): clip (list of images) to be converted to tensor. """ # Retrieve shape if isinstance(clip[0], np.ndarray): h, w, ch = clip[0].shape assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( ch) elif isinstance(clip[0], Image.Image): w, h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) # Convert for img_idx, img in enumerate(clip): if isinstance(img, np.ndarray): pass elif isinstance(img, Image.Image): img = np.array(img, copy=False) else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) img = imageutils.convert_img(img) np_clip[:, img_idx, :, :] = img if self.numpy: if self.div_255: np_clip = np_clip / 255 return np_clip else: tensor_clip = torch.from_numpy(np_clip) if not isinstance(tensor_clip, torch.FloatTensor): tensor_clip = tensor_clip.float() if self.div_255: tensor_clip = tensor_clip.div(255) return tensor_clip class ToTensor(object): """Converts numpy array to tensor """ def __call__(self, array): tensor = torch.from_numpy(array) return tensor
AVID-CMA-main
utils/videotransforms/volume_transforms.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import numbers import numpy as np import PIL def crop_clip(clip, min_h, min_w, h, w): if isinstance(clip[0], np.ndarray): cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] elif isinstance(clip[0], PIL.Image.Image): cropped = [ img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip ] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) return cropped def resize_clip(clip, size, interpolation='bilinear'): if isinstance(clip[0], np.ndarray): if isinstance(size, numbers.Number): im_h, im_w, im_c = clip[0].shape # Min spatial dim already matches minimal size if (im_w <= im_h and im_w == size) or (im_h <= im_w and im_h == size): return clip new_h, new_w = get_resize_sizes(im_h, im_w, size) size = (new_w, new_h) else: size = size[1], size[0] if interpolation == 'bilinear': np_inter = cv2.INTER_LINEAR else: np_inter = cv2.INTER_NEAREST scaled = [ cv2.resize(img, size, interpolation=np_inter) for img in clip ] elif isinstance(clip[0], PIL.Image.Image): if isinstance(size, numbers.Number): im_w, im_h = clip[0].size # Min spatial dim already matches minimal size if (im_w <= im_h and im_w == size) or (im_h <= im_w and im_h == size): return clip new_h, new_w = get_resize_sizes(im_h, im_w, size) size = (new_w, new_h) else: size = size[1], size[0] if interpolation == 'bilinear': pil_inter = PIL.Image.NEAREST else: pil_inter = PIL.Image.BILINEAR scaled = [img.resize(size, pil_inter) for img in clip] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) return scaled def get_resize_sizes(im_h, im_w, size): if im_w < im_h: ow = size oh = int(size * im_h / im_w) else: oh = size ow = int(size * im_w / im_h) return oh, ow
AVID-CMA-main
utils/videotransforms/functional.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import numpy as np import PIL import torch from utils.videotransforms.utils import images as imageutils class ToStackedTensor(object): """Converts a list of m (H x W x C) numpy.ndarrays in the range [0, 255] or PIL Images to a torch.FloatTensor of shape (m*C x H x W) in the range [0, 1.0] """ def __init__(self, channel_nb=3): self.channel_nb = channel_nb def __call__(self, clip): """ Args: clip (list of numpy.ndarray or PIL.Image.Image): clip (list of images) to be converted to tensor. """ # Retrieve shape if isinstance(clip[0], np.ndarray): h, w, ch = clip[0].shape assert ch == self.channel_nb, 'got {} channels instead of 3'.format( ch) elif isinstance(clip[0], PIL.Image.Image): w, h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) np_clip = np.zeros([self.channel_nb * len(clip), int(h), int(w)]) # Convert for img_idx, img in enumerate(clip): if isinstance(img, np.ndarray): pass elif isinstance(img, PIL.Image.Image): img = np.array(img, copy=False) else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) img = imageutils.convert_img(img) np_clip[img_idx * self.channel_nb:( img_idx + 1) * self.channel_nb, :, :] = img tensor_clip = torch.from_numpy(np_clip) return tensor_clip.float().div(255)
AVID-CMA-main
utils/videotransforms/stack_transforms.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import numbers import random import numpy as np import PIL import torchvision import warnings import math from utils.videotransforms import functional as F from torchvision.transforms import functional as vF class Compose(object): """Composes several transforms Args: transforms (list of ``Transform`` objects): list of transforms to compose """ def __init__(self, transforms): self.transforms = transforms def __call__(self, clip): for t in self.transforms: clip = t(clip) return clip class RandomDrop(object): """Randomly drops frames in order to keep a fixed number of frames """ def __init__(self, num_frames): self.num_frames = num_frames def __call__(self, clip): assert len(clip) >= self.num_frames if len(clip) == self.num_frames: return clip idx = sorted(random.sample(range(len(clip)), k=self.num_frames)) return [clip[i] for i in idx] class UniformDrop(object): """Randomly drops frames in order to keep a fixed number of frames """ def __init__(self, num_frames=None, ss_ratio=None): self.num_frames = num_frames self.ss_ratio = ss_ratio def __call__(self, clip): if self.num_frames is not None: if len(clip) <= self.num_frames: return clip idx = np.linspace(0, len(clip)-1, self.num_frames, endpoint=True).astype(int) return [clip[i] for i in idx] elif self.ss_ratio is not None: if self.ss_ratio == 1: return clip idx = np.arange(0, len(clip), self.ss_ratio).astype(int) return [clip[i] for i in idx] class RandomHorizontalFlip(object): """Horizontally flip the list of given images randomly with a probability 0.5 """ def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Randomly flipped clip """ if random.random() < 0.5: if isinstance(clip[0], np.ndarray): return [np.fliplr(img) for img in clip] elif isinstance(clip[0], PIL.Image.Image): return [ img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip ] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + ' but got list of {0}'.format(type(clip[0]))) return clip class RandomGray(object): """Horizontally flip the list of given images randomly with a probability 0.5 """ def __init__(self, p): self.p = p def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Randomly flipped clip """ rand_gray_clip = [] for img in clip: if random.random() < self.p: img = vF.to_grayscale(img) rand_gray_clip.append(img) return rand_gray_clip class RandomResize(object): """Resizes a list of (H x W x C) numpy.ndarray to the final size The larger the original image is, the more times it takes to interpolate Args: interpolation (str): Can be one of 'nearest', 'bilinear' defaults to nearest size (tuple): (widht, height) """ def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): self.ratio = ratio self.interpolation = interpolation def __call__(self, clip): scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) if isinstance(clip[0], np.ndarray): im_h, im_w, im_c = clip[0].shape elif isinstance(clip[0], PIL.Image.Image): im_w, im_h = clip[0].size new_w = int(im_w * scaling_factor) new_h = int(im_h * scaling_factor) new_size = (new_w, new_h) resized = F.resize_clip( clip, new_size, interpolation=self.interpolation) return resized class Resize(object): """Resizes a list of (H x W x C) numpy.ndarray to the final size The larger the original image is, the more times it takes to interpolate Args: interpolation (str): Can be one of 'nearest', 'bilinear' defaults to nearest size (tuple): (widht, height) """ def __init__(self, size, interpolation='nearest'): self.size = size self.interpolation = interpolation def __call__(self, clip): resized = F.resize_clip( clip, self.size, interpolation=self.interpolation) return resized class RandomCrop(object): """Extract random crop at the same location for a list of images Args: size (sequence or int): Desired output size for the crop in format (h, w) """ def __init__(self, size): if isinstance(size, numbers.Number): size = (size, size) self.size = size def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ h, w = self.size if isinstance(clip[0], np.ndarray): im_h, im_w, im_c = clip[0].shape elif isinstance(clip[0], PIL.Image.Image): im_w, im_h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) if w > im_w or h > im_h: error_msg = ( 'Initial image size should be larger then ' 'cropped size but got cropped sizes : ({w}, {h}) while ' 'initial image is ({im_w}, {im_h})'.format( im_w=im_w, im_h=im_h, w=w, h=h)) raise ValueError(error_msg) x1 = random.randint(0, im_w - w) y1 = random.randint(0, im_h - h) cropped = F.crop_clip(clip, y1, x1, h, w) return cropped class CenterCrop(object): """Extract center crop at the same location for a list of images Args: size (sequence or int): Desired output size for the crop in format (h, w) """ def __init__(self, size): if isinstance(size, numbers.Number): size = (size, size) self.size = size def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ h, w = self.size if isinstance(clip[0], np.ndarray): im_h, im_w, im_c = clip[0].shape elif isinstance(clip[0], PIL.Image.Image): im_w, im_h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) if w > im_w or h > im_h: error_msg = ( 'Initial image size should be larger then ' 'cropped size but got cropped sizes : ({w}, {h}) while ' 'initial image is ({im_w}, {im_h})'.format( im_w=im_w, im_h=im_h, w=w, h=h)) raise ValueError(error_msg) x1 = int(round((im_w - w) / 2.)) y1 = int(round((im_h - h) / 2.)) cropped = F.crop_clip(clip, y1, x1, h, w) return cropped class TenCrop(object): """Extract center crop at the same location for a list of images Args: size (sequence or int): Desired output size for the crop in format (h, w) """ def __init__(self, size): if isinstance(size, numbers.Number): size = (size, size) self.size = size def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ cropped = [] for frame in clip: cropped += list(vF.ten_crop(frame, self.size)) return cropped class RandomResizedCrop(object): """Crops a series of PIL Image to random size and aspect ratio. A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop is finally resized to given size. This is popularly used to train the Inception networks. Args: size: expected output size of each edge scale: range of size of the origin size cropped ratio: range of aspect ratio of the origin aspect ratio cropped interpolation: Default: PIL.Image.BILINEAR """ def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=PIL.Image.BILINEAR): if isinstance(size, tuple): self.size = size else: self.size = (size, size) if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): warnings.warn("range should be of kind (min, max)") self.interpolation = interpolation self.scale = scale self.ratio = ratio @staticmethod def get_params(img, scale, ratio): """Get parameters for ``crop`` for a random sized crop. Args: img (PIL Image): Image to be cropped. scale (tuple): range of size of the origin size cropped ratio (tuple): range of aspect ratio of the origin aspect ratio cropped Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for a random sized crop. """ area = img.size[0] * img.size[1] for attempt in range(10): target_area = random.uniform(*scale) * area log_ratio = (math.log(ratio[0]), math.log(ratio[1])) aspect_ratio = math.exp(random.uniform(*log_ratio)) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if w <= img.size[0] and h <= img.size[1]: i = random.randint(0, img.size[1] - h) j = random.randint(0, img.size[0] - w) return i, j, h, w # Fallback to central crop in_ratio = img.size[0] / img.size[1] if (in_ratio < min(ratio)): w = img.size[0] h = int(round(w / min(ratio))) elif (in_ratio > max(ratio)): h = img.size[1] w = int(round(h * max(ratio))) else: # whole image w = img.size[0] h = img.size[1] i = (img.size[1] - h) // 2 j = (img.size[0] - w) // 2 return i, j, h, w def __call__(self, clip): """ Args: img (PIL Image): Image to be cropped and resized. Returns: PIL Image: Randomly cropped and resized image. """ i, j, h, w = self.get_params(clip[0], self.scale, self.ratio) return [vF.resized_crop(img, i, j, h, w, self.size, self.interpolation) for img in clip] def __repr__(self): interpolate_str = '?' format_string = self.__class__.__name__ + '(size={0}'.format(self.size) format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) format_string += ', interpolation={0})'.format(interpolate_str) return format_string class ColorJitter(object): """Randomly change the brightness, contrast and saturation and hue of the clip Args: brightness (float): How much to jitter brightness. brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. contrast (float): How much to jitter contrast. contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. saturation (float): How much to jitter saturation. saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. hue(float): How much to jitter hue. hue_factor is chosen uniformly from [-hue, hue]. Should be >=0 and <= 0.5. """ def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): self.brightness = brightness self.contrast = contrast self.saturation = saturation self.hue = hue def get_params(self, brightness, contrast, saturation, hue): if brightness > 0: brightness_factor = random.uniform( max(0, 1 - brightness), 1 + brightness) else: brightness_factor = None if contrast > 0: contrast_factor = random.uniform( max(0, 1 - contrast), 1 + contrast) else: contrast_factor = None if saturation > 0: saturation_factor = random.uniform( max(0, 1 - saturation), 1 + saturation) else: saturation_factor = None if hue > 0: hue_factor = random.uniform(-hue, hue) else: hue_factor = None return brightness_factor, contrast_factor, saturation_factor, hue_factor def __call__(self, clip): """ Args: clip (list): list of PIL.Image Returns: list PIL.Image : list of transformed PIL.Image """ if isinstance(clip[0], np.ndarray): raise TypeError( 'Color jitter not yet implemented for numpy arrays') elif isinstance(clip[0], PIL.Image.Image): brightness, contrast, saturation, hue = self.get_params( self.brightness, self.contrast, self.saturation, self.hue) # Create img transform function sequence img_transforms = [] if brightness is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) if saturation is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) if hue is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) if contrast is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) random.shuffle(img_transforms) # Apply to all images jittered_clip = [] for img in clip: for func in img_transforms: img = func(img) jittered_clip.append(img) else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) return jittered_clip class TemporalJitter(object): """Crop video sequence temporally with jitter Args: n_frames (int): number of output frames min_scale (float): minimum subsample rate (default 1.0) max_scale (float): maximum subsample rate (default 1.0) """ def __init__(self, n_frames, time_scale=(1.,1.)): self.n_frames = n_frames self.time_scale = time_scale def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ rate = self.time_scale[0] if self.time_scale[0] == self.time_scale[1] else random.uniform(self.time_scale[0], min(self.time_scale[1], float(len(clip))/float(self.n_frames))) clip_ss = [clip[int(t)] for t in np.arange(0, len(clip), rate)] if len(clip_ss) == self.n_frames: clip_out = clip_ss elif len(clip_ss) < self.n_frames: # Wrap to fill frames clip_out = [clip_ss[t%len(clip_ss)] for t in range(self.n_frames)] else: # Extract random crop max_init_t = len(clip_ss) - self.n_frames init_t = random.randint(0, max_init_t) clip_out = clip_ss[init_t:init_t + self.n_frames] return clip_out class TemporalCenterCrop(object): """Crop video sequence temporally with jitter Args: n_frames (int): number of output frames min_scale (float): minimum subsample rate (default 1.0) max_scale (float): maximum subsample rate (default 1.0) """ def __init__(self, n_frames, time_scale=1.): self.n_frames = n_frames self.time_scale = time_scale def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ clip_ss = [clip[int(t)] for t in np.arange(0, len(clip), self.time_scale)] if len(clip_ss) == self.n_frames: clip_out = clip_ss elif len(clip_ss) < self.n_frames: # Wrap to fill frames clip_out = [clip_ss[t%len(clip_ss)] for t in range(self.n_frames)] else: # Extract random crop init_t = (len(clip_ss) - self.n_frames)//2 clip_out = clip_ss[init_t:init_t + self.n_frames] return clip_out
AVID-CMA-main
utils/videotransforms/video_transforms.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # def normalize(tensor, mean, std): """ Args: tensor (Tensor): Tensor to normalize Returns: Tensor: Normalized tensor """ tensor.sub_(mean).div_(std) return tensor
AVID-CMA-main
utils/videotransforms/utils/functional.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import numpy as np def convert_img(img): """Converts (H, W, C) numpy.ndarray to (C, W, H) format """ if len(img.shape) == 3: img = img.transpose(2, 0, 1) if len(img.shape) == 2: img = np.expand_dims(img, 0) return img
AVID-CMA-main
utils/videotransforms/utils/images.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from .video import * from .audio import * from .av_wrapper import *
AVID-CMA-main
models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import torch import torch.nn as nn import numpy as np class Basic2DBlock(nn.Module): def __init__(self, in_planes, out_planes, stride=(1, 1)): self.__dict__.update(locals()) super(Basic2DBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=(3, 3), padding=(1, 1), stride=stride, bias=False) self.bn1 = nn.BatchNorm2d(out_planes) self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=(3, 3), padding=(1, 1), bias=False) self.bn2 = nn.BatchNorm2d(out_planes) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.relu(self.bn1(self.conv1(x))) x = self.relu(self.bn2(self.conv2(x))) return x class BasicR2P1DBlock(nn.Module): def __init__(self, in_planes, out_planes, stride=(1, 1, 1)): super(BasicR2P1DBlock, self).__init__() spt_stride = (1, stride[1], stride[2]) tmp_stride = (stride[0], 1, 1) self.spt_conv1 = nn.Conv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=spt_stride, padding=(0, 1, 1), bias=False) self.spt_bn1 = nn.BatchNorm3d(out_planes) self.tmp_conv1 = nn.Conv3d(out_planes, out_planes, kernel_size=(3, 1, 1), stride=tmp_stride, padding=(1, 0, 0), bias=False) self.tmp_bn1 = nn.BatchNorm3d(out_planes) self.spt_conv2 = nn.Conv3d(out_planes, out_planes, kernel_size=(1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=False) self.spt_bn2 = nn.BatchNorm3d(out_planes) self.tmp_conv2 = nn.Conv3d(out_planes, out_planes, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False) self.out_bn = nn.BatchNorm3d(out_planes) self.relu = nn.ReLU(inplace=True) if in_planes != out_planes or any([s!=1 for s in stride]): self.res = True self.res_conv = nn.Conv3d(in_planes, out_planes, kernel_size=(1, 1, 1), stride=stride, padding=(0, 0, 0), bias=False) else: self.res = False def forward(self, x): x_main = self.tmp_conv1(self.relu(self.spt_bn1(self.spt_conv1(x)))) x_main = self.relu(self.tmp_bn1(x_main)) x_main = self.tmp_conv2(self.relu(self.spt_bn2(self.spt_conv2(x_main)))) x_res = self.res_conv(x) if self.res else x x_out = self.relu(self.out_bn(x_main + x_res)) return x_out
AVID-CMA-main
models/network_blocks.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import torch import torch.nn as nn __all__ = [ 'av_wrapper' ] class Head(nn.Module): def __init__(self, input_dim, proj_dims): super(Head, self).__init__() if not isinstance(proj_dims, list): proj_dims = [proj_dims] projection = [] for i, d in enumerate(proj_dims): projection += [nn.Linear(input_dim, d)] input_dim = d if i < len(proj_dims)-1: projection += [nn.ReLU(inplace=True)] self.projection = nn.Sequential(*projection) self.out_dim = proj_dims[-1] def forward(self, x): return self.projection(x) class AV_Wrapper(nn.Module): def __init__(self, video_model, audio_model, proj_dim=128): super(AV_Wrapper, self).__init__() self.video_model = video_model self.audio_model = audio_model self.use_linear_proj = proj_dim is not None if proj_dim is not None: self.video_proj = Head(video_model.out_dim, proj_dim) self.audio_proj = Head(audio_model.out_dim, proj_dim) self.out_dim = self.video_proj.out_dim else: self.out_dim = video_model.out_dim def forward(self, video, audio): video_emb = self.video_model(video) video_emb = video_emb.view(video_emb.shape[0], video_emb.shape[1]) if self.use_linear_proj: video_emb = self.video_proj(video_emb) audio_emb = self.audio_model(audio) audio_emb = audio_emb.view(audio_emb.shape[0], audio_emb.shape[1]) if self.use_linear_proj: audio_emb = self.audio_proj(audio_emb) return video_emb, audio_emb def av_wrapper(video_backbone, video_backbone_args, audio_backbone, audio_backbone_args, proj_dim=128, checkpoint=None): import models assert video_backbone in models.__dict__, 'Unknown model architecture' assert audio_backbone in models.__dict__, 'Unknown model architecture' video_model = models.__dict__[video_backbone](**video_backbone_args) audio_model = models.__dict__[audio_backbone](**audio_backbone_args) model = AV_Wrapper(video_model, audio_model, proj_dim=proj_dim) if checkpoint is not None: ckp = torch.load(checkpoint, map_location='cpu') nn.DataParallel(model).load_state_dict(ckp['model']) return model
AVID-CMA-main
models/av_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import torch.nn as nn from .network_blocks import Basic2DBlock __all__ = [ 'Conv2D' ] class Conv2D(nn.Module): def __init__(self, depth=10): super(Conv2D, self).__init__() assert depth==10 self.conv1 = nn.Sequential( nn.Conv2d(1, 64, kernel_size=7, padding=3, stride=2, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), ) self.block1 = Basic2DBlock(64, 64, stride=(2, 2)) self.block2 = Basic2DBlock(64, 128, stride=(2, 2)) self.block3 = Basic2DBlock(128, 256, stride=(2, 2)) self.block4 = Basic2DBlock(256, 512) self.pool = nn.AdaptiveMaxPool2d((1, 1)) self.out_dim = 512 def forward(self, x, return_embs=False): x_c1 = self.conv1(x) x_b1 = self.block1(x_c1) x_b2 = self.block2(x_b1) x_b3 = self.block3(x_b2) x_b4 = self.block4(x_b3) x_pool = self.pool(x_b4) if return_embs: return {'conv2x': x_b1, 'conv3x': x_b2, 'conv4x': x_b3, 'conv5x': x_b4, 'pool': x_pool} else: return x_pool
AVID-CMA-main
models/audio.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import torch.nn as nn from models.network_blocks import BasicR2P1DBlock class R2Plus1D(nn.Module): """ Adapted from https://github.com/facebookresearch/VMZ/blob/4c14ee6f8eae8e2ac97fc4c05713b8a112eb1f28/lib/models/video_model.py Adaptation has a full Conv3D stem, and does not adjust for the number of dimensions between the spatial and temporal convolution. """ def __init__(self, depth=18): super(R2Plus1D, self).__init__() self.conv1 = nn.Sequential( nn.Conv3d(3, 64, kernel_size=(3, 7, 7), padding=(1, 3, 3), stride=(1, 2, 2), bias=False), nn.BatchNorm3d(64), nn.ReLU(inplace=True), nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)) ) if depth == 10: self.conv2x = BasicR2P1DBlock(64, 64) self.conv3x = BasicR2P1DBlock(64, 128, stride=(2, 2, 2)) self.conv4x = BasicR2P1DBlock(128, 256, stride=(2, 2, 2)) self.conv5x = BasicR2P1DBlock(256, 512, stride=(2, 2, 2)) elif depth == 18: self.conv2x = nn.Sequential(BasicR2P1DBlock(64, 64), BasicR2P1DBlock(64, 64)) self.conv3x = nn.Sequential(BasicR2P1DBlock(64, 128, stride=(2, 2, 2)), BasicR2P1DBlock(128, 128)) self.conv4x = nn.Sequential(BasicR2P1DBlock(128, 256, stride=(2, 2, 2)), BasicR2P1DBlock(256, 256)) self.conv5x = nn.Sequential(BasicR2P1DBlock(256, 512, stride=(2, 2, 2)), BasicR2P1DBlock(512, 512)) elif depth == 34: self.conv2x = nn.Sequential(BasicR2P1DBlock(64, 64), BasicR2P1DBlock(64, 64), BasicR2P1DBlock(64, 64)) self.conv3x = nn.Sequential(BasicR2P1DBlock(64, 128, stride=(2, 2, 2)), BasicR2P1DBlock(128, 128), BasicR2P1DBlock(128, 128), BasicR2P1DBlock(128, 128)) self.conv4x = nn.Sequential(BasicR2P1DBlock(128, 256, stride=(2, 2, 2)), BasicR2P1DBlock(256, 256), BasicR2P1DBlock(256, 256), BasicR2P1DBlock(256, 256), BasicR2P1DBlock(256, 256), BasicR2P1DBlock(256, 256)) self.conv5x = nn.Sequential(BasicR2P1DBlock(256, 512, stride=(2, 2, 2)), BasicR2P1DBlock(512, 512), BasicR2P1DBlock(512, 512)) self.pool = nn.AdaptiveMaxPool3d((1, 1, 1)) self.out_dim = 512 def forward(self, x, return_embs=False): x_c1 = self.conv1(x) x_b1 = self.conv2x(x_c1) x_b2 = self.conv3x(x_b1) x_b3 = self.conv4x(x_b2) x_b4 = self.conv5x(x_b3) x_pool = self.pool(x_b4) if return_embs: return {'conv1': x_c1, 'conv2x': x_b1, 'conv3x': x_b2, 'conv4x': x_b3, 'conv5x': x_b4, 'pool': x_pool} else: return x_pool
AVID-CMA-main
models/video.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from .avid import * from .avid_cma import *
AVID-CMA-main
criterions/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import torch from torch import nn from torch.nn import functional as F import torch.distributed as dist import pprint from utils.distributed_utils import _gather_from_all from utils.alias_method import AliasMethod from criterions.nce import NCECriterion __all__ = ['AVID'] class AVIDSimilarityMemoryBank(nn.Module): def __init__(self, memory_size, embedding_dim, xModal=True, wModal=False, num_negatives=1024, momentum=0.5, device=0 ): super(AVIDSimilarityMemoryBank, self).__init__() self.num_negatives = num_negatives self.temperature = 0.07 if not isinstance(momentum, (list, tuple)): momentum = [momentum]*2 self.momentum = momentum self.device = device self.multinomial = AliasMethod(torch.ones(memory_size-1)) self.xModal = xModal self.wModal = wModal self.distributed = dist.is_available() and dist.is_initialized() self.rank = dist.get_rank() if self.distributed else 0 self.init_memory(memory_size, embedding_dim) def forward(self, video_emb, audio_emb, y): K = int(self.num_negatives) # Normalize embeddings bs, dim = video_emb.shape video_emb = F.normalize(video_emb, p=2, dim=1).view(bs, dim, 1) audio_emb = F.normalize(audio_emb, p=2, dim=1).view(bs, dim, 1) # Sample memories with torch.no_grad(): video_pos_mem = self.view1_mem[y].view(bs, 1, dim) audio_pos_mem = self.view2_mem[y].view(bs, 1, dim) idx = self.sample_negatives(y, K).to(video_emb.device) video_neg_mem = self.view1_mem[idx].view(bs, K, dim) audio_neg_mem = self.view2_mem[idx].view(bs, K, dim) # Compute scores def compute_scores(context_emb, target_embs, T): return [torch.bmm(trg, context_emb).squeeze(-1) / T for trg in target_embs] scores = {} if self.xModal: scores['v2a'] = compute_scores(video_emb, [audio_pos_mem, audio_neg_mem], self.temperature) scores['a2v'] = compute_scores(audio_emb, [video_pos_mem, video_neg_mem], self.temperature) if self.wModal: scores['v2v'] = compute_scores(video_emb, [video_pos_mem, video_neg_mem], self.temperature) scores['a2a'] = compute_scores(audio_emb, [audio_pos_mem, audio_neg_mem], self.temperature) # Update memory bank self.update_memory(video_emb.squeeze(-1), audio_emb.squeeze(-1), y) return scores def sample_negatives(self, y, K): bs = y.shape[0] idx = self.multinomial.draw(bs * K).view(bs, -1).to(y.device) idx = idx + (idx >= y.unsqueeze(1)).long() # Avoid self return idx def init_memory(self, num_items, embedding_dim): self.register_buffer('view1_mem', torch.randn(num_items, embedding_dim)) self.register_buffer('view2_mem', torch.randn(num_items, embedding_dim)) self.view1_mem = F.normalize(self.view1_mem, p=2, dim=1) self.view1_mem = self.view1_mem.cuda(self.device) self.view2_mem = F.normalize(self.view2_mem, p=2, dim=1) self.view2_mem = self.view2_mem.cuda(self.device) if self.distributed: dist.broadcast(self.view1_mem, 0) dist.broadcast(self.view2_mem, 0) dist.barrier() def update_memory(self, video_emb, audio_emb, y): video_mom = float(self.momentum[0]) audio_mom = float(self.momentum[1]) # gather embeddings from all gpus if self.distributed: video_emb_gathered = _gather_from_all(video_emb) audio_emb_gathered = _gather_from_all(audio_emb) y_gathered = _gather_from_all(y) else: video_emb_gathered = video_emb audio_emb_gathered = audio_emb y_gathered = y # update audio and video memories with torch.no_grad(): l1_pos = self.view1_mem.index_select(0, y_gathered.view(-1)) l1_pos.mul_(video_mom) l1_pos.add_(torch.mul(video_emb_gathered, 1 - video_mom)) updated_l1 = F.normalize(l1_pos, p=2, dim=1) self.view1_mem.index_copy_(0, y_gathered, updated_l1) l2_pos = self.view2_mem.index_select(0, y_gathered.view(-1)) l2_pos.mul_(audio_mom) l2_pos.add_(torch.mul(audio_emb_gathered, 1 - audio_mom)) updated_l2 = F.normalize(l2_pos, p=2, dim=1) self.view2_mem.index_copy_(0, y_gathered, updated_l2) def __repr__(self): num_negatives = int(self.num_negatives) view1_mom = float(self.momentum[0]) view2_mom = float(self.momentum[1]) repr_dict = { 'name': self._get_name(), 'num_negatives': num_negatives, 'momentum': [view1_mom, view2_mom], 'view1_buffer_size': self.view1_mem.shape, 'view2_buffer_size': self.view2_mem.shape, } return pprint.pformat(repr_dict, indent=2) class AVID(nn.Module): def __init__(self, num_data, embedding_dim, num_negatives=4096, momentum=0.9, xModal_coeff=1., wModal_coeff=0., checkpoint=None, device=0): super(AVID, self).__init__() ''' AVID criterion. This module receives the output embeddings of the video and audio models, computes their non-linear projections, manages the memory bank and computes the final loss. Args: - num_data: number of instances in the training set. - embedding_dim: output dimension of the non-linear projection. - num_negatives: number of negatives to draw from memory bank to compute the NCE loss. - momentum: memory bank EMA momemtum parameter. - xModal_coeff: coefficient for the cross modal loss. (Cross-AVID: 1.0 | Self-AVID: 0.0 | Joint-AVID: 1.0) - wModal_coeff: coefficient for the within modal loss. (Cross-AVID: 0.0 | Self-AVID: 1.0 | Joint-AVID: 1.0) - checkpoint: optinally specify a checkpoint path to restore the memory bank and partition function ''' self.nce_average = AVIDSimilarityMemoryBank( memory_size=num_data, embedding_dim=embedding_dim, num_negatives=num_negatives, momentum=momentum, xModal=xModal_coeff>0., wModal=wModal_coeff>0., device=device ) self.nce_average = self.nce_average.cuda(device) sum_coeff = (xModal_coeff + wModal_coeff) self.xModal_coeff = xModal_coeff / sum_coeff self.wModal_coeff = wModal_coeff / sum_coeff self.criterion = NCECriterion(num_data) # Restore memory bank and partition function if necessary if checkpoint is not None: ckp = torch.load(checkpoint, map_location='cpu')['train_criterion'] state_dict = self.state_dict() # Restore memory banks state_dict['nce_average.view1_mem'] = ckp['nce_average.view1_mem'] state_dict['nce_average.view2_mem'] = ckp['nce_average.view2_mem'] # Restore partition function Z = torch.stack([ckp[k] for k in ckp if 'avg_exp_score' in k]).mean() for k in state_dict: if 'avg_exp_score' in k: state_dict[k] = Z self.load_state_dict(state_dict) def forward(self, emb1, emb2, target): ''' Args - emb1: Video embeddings `(N, D)` - emb2: Audio embeddings `(N, D)` - taget: Intance labels `(N)` ''' tb_log = {} # Compare output embeddings to memory bank embeddings and get scores # scores given as: {task: [scores_positives, scores_negatives]} scores = self.nce_average(emb1, emb2, target) # Compute loss xModal_loss, wModal_loss = 0., 0 for k in scores: loss = self.criterion(*scores[k]) if k in {'v2a', 'a2v'}: xModal_loss += loss / 2. elif k in {'v2v', 'a2a'}: wModal_loss += loss / 2. # Tensorboard metrics tb_log[f'Loss/{k}'] = loss # Tensorboard metrics tb_log['Loss/xModal'] = xModal_loss tb_log['Loss/wModal'] = wModal_loss # Final loss total_loss = xModal_loss * self.xModal_coeff + wModal_loss * self.wModal_coeff return total_loss, tb_log def set_epoch(self, epoch): pass
AVID-CMA-main
criterions/avid.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import torch from torch import nn import torch.distributed as dist from utils.distributed_utils import _gather_from_all class NCECriterion(nn.Module): def __init__(self, nLem): super(NCECriterion, self).__init__() self.nLem = nLem self.register_buffer('avg_exp_score', torch.tensor(-1.)) self.distributed = dist.is_available() and dist.is_initialized() def compute_partition_function(self, out): if self.avg_exp_score > 0: # Use precomputed value return self.avg_exp_score with torch.no_grad(): batch_mean = out.mean().unsqueeze(0) if self.distributed: batch_mean_gathered = _gather_from_all(batch_mean) all_batch_mean = batch_mean_gathered.mean().squeeze() else: all_batch_mean = batch_mean Z = all_batch_mean self.avg_exp_score = Z return self.avg_exp_score def forward(self, scores_pos, scores_neg): K = scores_neg.size(1) # Compute unnormalized distributions exp_scores_pos = torch.exp(scores_pos) exp_scores_neg = torch.exp(scores_neg) # Compute partition function and normalize with torch.no_grad(): avg_exp_score = self.compute_partition_function(exp_scores_neg) # eq 5.1 : P(origin=model) = Pmt / (Pmt + k*Pnt) Pmt = torch.div(exp_scores_pos, exp_scores_pos + K * avg_exp_score) lnPmtSum = -torch.log(Pmt).mean(-1) # eq 5.2 : P(origin=noise) = k*Pn / (Pms + k*Pn) Pon = torch.div(K * avg_exp_score, exp_scores_neg + K * avg_exp_score) lnPonSum = -torch.log(Pon).sum(-1) loss = (lnPmtSum + lnPonSum).mean() return loss
AVID-CMA-main
criterions/nce.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import multiprocessing as mp mp.set_start_method('spawn', force=True) import torch from torch import nn from torch.nn import functional as F import torch.distributed as dist import numpy as np import pprint from utils.alias_method import AliasMethod from criterions.nce import NCECriterion from criterions.avid import AVIDSimilarityMemoryBank __all__ = ['AVID_CMA'] class CMASampler: def __init__(self, video_mem, audio_mem, sampling_args): ''' Class responsible for finding the audio-visual correspondences from the audio and video memory banks. Correspondences are computed by calling the sample() method. To speed things up, this code will be distributed over different GPUs and synced at the end. :param video_mem: Video memory bank :param audio_mem: Audio memory bank :param sampling_args: Dictionary with two fields. `type`: Type of correspondence. Options are `consensus`, `union`, `video` and `audio`. Refer to the paper for their meaning. `pos_k`: number of positive correspondences to sample for each instance. ''' self.video_mem = video_mem.cpu() self.audio_mem = audio_mem.cpu() self.sampling_args = sampling_args def sample_instance(self, gpu, q_job, q_data): video_mem = self.video_mem.cuda(gpu) audio_mem = self.audio_mem.cuda(gpu) while True: query_idx = q_job.get() if query_idx is None: break # Compute video and audio cosine similarities video_sim = torch.mm(video_mem, video_mem[query_idx].t()) audio_sim = torch.mm(audio_mem, audio_mem[query_idx].t()) # Compute agreement score if self.sampling_args['type'] == 'consensus': similarity = torch.stack([video_sim, audio_sim], 0).min(dim=0)[0] elif self.sampling_args['type'] == 'union': similarity = torch.stack([video_sim, audio_sim], 0).max(dim=0)[0] elif self.sampling_args['type'] == 'video': similarity = video_sim elif self.sampling_args['type'] == 'audio': similarity = audio_sim else: raise ValueError # Return top-k correspondences pos_sim, pos_idx = torch.topk(similarity, self.sampling_args['pos_k']+1, dim=0, sorted=True) pos_index = pos_idx[1:].t().cpu().numpy() # Avoid self pos_index = np.sort(pos_index, axis=1) # Sort indexes so that negative sampling can be done efficiently q_data.put((query_idx, pos_index)) q_data.put((None, None)) def sample_dispatcher(self, q_job, workers=80): num_items = self.video_mem.shape[0] batch_size = 16 for i in range(0, num_items, batch_size): query_idx = list(range(i, min(i+batch_size, num_items))) q_job.put(query_idx) for _ in range(workers): q_job.put(None) def sample_gather(self, q_data, workers=80): num_items = self.video_mem.shape[0] positive_index = np.zeros((num_items, self.sampling_args['pos_k'])).astype(int) workers_done, done = 0, 0 while workers_done < workers: query_idx, pos_idx = q_data.get() if query_idx is None: workers_done += 1 else: done += pos_idx.shape[0] positive_index[query_idx] = pos_idx if done % (64*1000) == 0: print(f"Done {done}/{num_items}") return positive_index def sample(self): # Compute CMA correspondences. Runs on only one process. Distributes work over all gpus. num_workers = torch.cuda.device_count() q_job = mp.Queue(maxsize=1000) q_data = mp.Queue(maxsize=1000) # Start job launcher disp = mp.Process(target=self.sample_dispatcher, args=(q_job, num_workers), daemon=True) disp.start() # Start workers workers = [] for gpu in range(num_workers): w = mp.Process(target=self.sample_instance, args=(gpu, q_job, q_data), daemon=True) w.start() workers += [w] # Gather results from workers pos_index = self.sample_gather(q_data, num_workers) # Wait for all jobs to finish [w.join() for w in workers] disp.join() return pos_index class AVIDSimilarityPositiveExpansion(AVIDSimilarityMemoryBank): def __init__(self, memory_size, embedding_dim, xModalInst=True, wModalInst=False, xModalPos=False, wModalPos=True, num_negatives=1024, num_negatives_within=None, sampling_args=None, momentum=0.5, device=0, ): super().__init__(memory_size=memory_size, embedding_dim=embedding_dim, xModal=xModalInst, wModal=wModalInst, num_negatives=num_negatives, momentum=momentum, device=device) self.num_negatives_within = num_negatives_within self.multinomial = AliasMethod(torch.ones(memory_size - sampling_args['pos_k'])) self.sampling_args = sampling_args self.xModalInst = xModalInst self.wModalInst = wModalInst self.xModalPos = xModalPos self.wModalPos = wModalPos def forward(self, video_emb, audio_emb, y): bs, dim = video_emb.shape video_emb = F.normalize(video_emb, p=2, dim=1).view(bs, dim, 1) audio_emb = F.normalize(audio_emb, p=2, dim=1).view(bs, dim, 1) # Sample memories with torch.no_grad(): video_self_mem = self.view1_mem[y].view(bs, 1, dim) audio_self_mem = self.view2_mem[y].view(bs, 1, dim) pos_idx, neg_idx = self.memory_sampling(y) video_pos_mem = self.view1_mem[pos_idx] audio_pos_mem = self.view2_mem[pos_idx] video_neg_mem = self.view1_mem[neg_idx] audio_neg_mem = self.view2_mem[neg_idx] # Compute scores def compute_scores(context_emb, target_embs, T): return [torch.bmm(trg, context_emb).squeeze(-1) / T for trg in target_embs] # Instance Discrimination scores = {} if self.xModalInst: # Cross-modal discrimination scores['inst-v2a'] = compute_scores(video_emb, [audio_self_mem, audio_neg_mem], self.temperature) scores['inst-a2v'] = compute_scores(audio_emb, [video_self_mem, video_neg_mem], self.temperature) if self.wModalInst: # Within-modal discrimination scores['inst-v2a'] = compute_scores(video_emb, [audio_self_mem, audio_neg_mem], self.temperature) scores['inst-a2v'] = compute_scores(audio_emb, [video_self_mem, video_neg_mem], self.temperature) # Positive Set Discrimination if self.xModalPos: # Cross-modal discrimination scores['pos-v2a'] = compute_scores(video_emb, [audio_pos_mem, audio_neg_mem], self.temperature) scores['pos-a2v'] = compute_scores(audio_emb, [video_pos_mem, video_neg_mem], self.temperature) if self.wModalPos: # Within-modal discrimination # Potentially reduce number of negatives for within-modal discrimination wm_video_neg_mem, wm_audio_neg_mem = video_neg_mem, audio_neg_mem if self.num_negatives_within is not None: wm_video_neg_mem = video_neg_mem[:, :self.num_negatives_within] wm_audio_neg_mem = audio_neg_mem[:, :self.num_negatives_within] scores['pos-v2v'] = compute_scores(video_emb, [video_pos_mem, wm_video_neg_mem], self.temperature) scores['pos-a2a'] = compute_scores(audio_emb, [audio_pos_mem, wm_audio_neg_mem], self.temperature) # Update memory self.update_memory(video_emb.squeeze(-1), audio_emb.squeeze(-1), y) return scores def memory_sampling(self, y): # Draw positives positive_indexes = self.positive_set[y].long() # Draw negatives bs = y.shape[0] rand_idx = self.multinomial.draw(bs * self.num_negatives).view(bs, -1).to(y.device) # Avoid positives while sampling negatives (Positive list is sorted.) pos_idx = self.positive_set[y].long() ref = pos_idx - torch.range(0, pos_idx.shape[1]-1, dtype=pos_idx.dtype).to(pos_idx.device).unsqueeze(0) negative_indexes = rand_idx + (rand_idx.unsqueeze(2) >= ref.unsqueeze(1)).sum(2) return positive_indexes, negative_indexes def find_correspondences(self): if self.sampling_args['pos_k'] <= 0: return # Find CMA correspondences. Only do this on one process if running in distributed mode and sync at the end. positive_set = np.zeros((self.view1_mem.shape[0], self.sampling_args['pos_k'])).astype(int) if not self.distributed or self.distributed and self.rank == 0: torch.cuda.empty_cache() positive_set = CMASampler(self.view1_mem, self.view2_mem, self.sampling_args).sample() # Find CMA correspondences. Only do this on one process if running in distributed mode and sync at the end. if positive_set is not None: self.register_buffer('positive_set', torch.from_numpy(positive_set).int()) self.positive_set = self.positive_set.cuda(self.device) if self.distributed: dist.broadcast(self.positive_set, 0) if self.distributed: dist.barrier() def __repr__(self): num_negatives = int(self.num_negatives) view1_mom = float(self.momentum[0]) view2_mom = float(self.momentum[1]) repr_dict = { 'name': self._get_name(), 'num_negatives': num_negatives, 'momentum': [view1_mom, view2_mom], 'view1_buffer_size': self.view1_mem.shape, 'view2_buffer_size': self.view2_mem.shape, } return pprint.pformat(repr_dict, indent=2) class AVID_CMA(nn.Module): def __init__(self, num_data, embedding_dim, num_negatives=1024, num_negatives_within=None, momentum=0.5, xModalInstCoeff=1., wModalInstCoeff=0., xModalPosCoeff=0., wModalPosCoeff=1., sampling_args=None, checkpoint=None, resample_freq=-1, device=0): super(AVID_CMA, self).__init__() ''' AVID_CMA criterion. This module receives the output embeddings of the video and audio models, computes their non-linear projections, manages the memory bank, draws positive correspondences, and computes the final loss (weighted average between instance discrimination and positive discrimination losses). Args: - num_data: number of instances in the training set. - embedding_dim: output dimension of the non-linear projection. - num_negatives: number of negatives to draw from memory bank to compute the NCE loss. - num_negatives_within: optionally reduce the number of negatives for the within-modal loss. - momentum: memory bank EMA momentum parameter. - xModalInstCoeff: coefficient for the cross modal instance discrimination loss. (AVID-CMA: 1.0) - wModalInstCoeff: coefficient for the within modal instance discrimination loss. (AVID-AVID: 0.0) - xModalPosCoeff: coefficient for the cross modal positive discrimination loss. (AVID-CMA: 0.0) - wModalPosCoeff: coefficient for the within modal positive discrimination loss. (AVID-AVID: 1.0) - checkpoint: optionally specify a checkpoint path to restore the memory bank and partition function ''' # first setup the NCEAverage method to get the scores of the output wrt. memory bank negatives self.nce_average = AVIDSimilarityPositiveExpansion( memory_size=num_data, embedding_dim=embedding_dim, num_negatives=num_negatives, num_negatives_within=num_negatives_within, momentum=momentum, xModalInst=xModalInstCoeff>0., xModalPos=xModalPosCoeff>0., wModalInst=wModalInstCoeff>0., wModalPos=wModalPosCoeff>0., sampling_args=sampling_args, device=device ) self.nce_average = self.nce_average.cuda(device) # Loss coefficients sum_coeff = xModalInstCoeff + wModalInstCoeff + xModalPosCoeff + wModalPosCoeff self.xModalInstCoeff = xModalInstCoeff / sum_coeff self.wModalInstCoeff = wModalInstCoeff / sum_coeff self.xModalPosCoeff = xModalPosCoeff / sum_coeff self.wModalPosCoeff = wModalPosCoeff / sum_coeff # Setup loss function self.criterion = NCECriterion(num_data) # Restore memory bank and partition function from AVID checkpoint # Needs to be done before finding correspondences if checkpoint is not None: ckp = torch.load(checkpoint, map_location='cpu')['train_criterion'] state_dict = self.state_dict() # Restore memory banks state_dict['nce_average.view1_mem'] = ckp['nce_average.view1_mem'] state_dict['nce_average.view2_mem'] = ckp['nce_average.view2_mem'] # Restore partition function Z = torch.stack([ckp[k] for k in ckp if 'avg_exp_score' in k]).mean() for k in state_dict: if 'avg_exp_score' in k: state_dict[k] = Z self.load_state_dict(state_dict) # Find CMA correspondences self.resample_freq = resample_freq self.nce_average.find_correspondences() def forward(self, emb1, emb2, target): ''' Args - emb1: Video embeddings `(N, D)` - emb2: Audio embeddings `(N, D)` - taget: Intance labels `(N)` ''' tb_log = {} # Compare output embeddings to memory bank embeddings and get scores scores = self.nce_average(emb1, emb2, target) # Compute cross/within modal discrimination losses xModalInst_loss, wModalInst_loss, xModalPos_loss, wModalPos_loss = 0., 0., 0., 0. for k in scores: loss = self.criterion(*scores[k]) if k in {'inst-v2a', 'inst-a2v'}: xModalInst_loss += loss / 2. elif k in {'inst-v2v', 'inst-a2a'}: wModalInst_loss += loss / 2. elif k in {'pos-v2a', 'pos-a2v'}: xModalPos_loss += loss / 2. elif k in {'pos-v2v', 'pos-a2a'}: wModalPos_loss += loss / 2. # Metrics for tensorboard with torch.no_grad(): tb_log[f'Loss/{k}'] = loss # Compute final loss total_loss = xModalInst_loss * self.xModalInstCoeff total_loss += wModalInst_loss * self.wModalInstCoeff total_loss += xModalPos_loss * self.xModalPosCoeff total_loss += wModalPos_loss * self.wModalPosCoeff return total_loss, tb_log def set_epoch(self, epoch): # Recompute CMA correspondences every resample_freq epochs if self.resample_freq > 0 and epoch > 0 and epoch % self.resample_freq == 0: self.nce_average.find_correspondences()
AVID-CMA-main
criterions/avid_cma.py
"""Init."""
fm_data_tasks-main
fm_data_tasks/__init__.py
"""Run inference.""" import argparse import json import logging from pathlib import Path import numpy as np from manifest import Manifest import fm_data_tasks.utils.data_utils as data_utils import fm_data_tasks.utils.prompt_utils as prompt_utils from fm_data_tasks.utils import constants from fm_data_tasks.utils.utils import compute_metrics, setup_logger logger = logging.getLogger(__name__) def parse_args() -> argparse.Namespace: """Generate args.""" parser = argparse.ArgumentParser(description="Simple calculator") parser.add_argument( "--data_dir", type=str, help="Which data directory to run.", required=True, ) parser.add_argument( "--output_dir", type=str, help="Output directory.", default="outputs" ) parser.add_argument( "--cache_name", type=str, help="Manifest cache type.", default="sqlite", choices=["redis", "sqlite", "noop"], ) parser.add_argument( "--cache_connection", type=str, help="Manifest cache connection string.", default="fm_data_tasks.sqlite", ) parser.add_argument( "--client_name", type=str, help="Manifest client type.", default="openai", choices=["openai", "opt", "huggingface"], ) parser.add_argument( "--client_connection", type=str, help="Manifest client connection string.", default=None, ) parser.add_argument( "--run_tag", type=str, help="Tag for run saving.", default="default", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite sqlite cache of input/output results.", ) parser.add_argument("--k", type=int, help="Number examples in prompt", default=1) parser.add_argument( "--sample_method", type=str, help="Example generation method", default="random", choices=["random", "manual", "validation_clusters"], ) parser.add_argument("--seed", type=int, default=1234) parser.add_argument( "--class_balanced", help="Class balance training data. Good for classification tasks \ with random prompts.", action="store_true", ) parser.add_argument( "--sep_tok", type=str, help="Separate for attr: val pairs in row. Default is '.'.", default=".", ) parser.add_argument( "--nan_tok", type=str, help="Token to represent nan entries. Default is 'nan'.", default="nan", ) parser.add_argument( "--num_run", type=int, help="Number examples to run through model.", default=-1, ) parser.add_argument( "--num_trials", type=int, help="Number trials to run. Results will be averaged with variance reported.", default=1, ) parser.add_argument( "--num_print", type=int, help="Number example prompts to print.", default=10, ) parser.add_argument( "--add_task_instruction", help="Add task instruction to the prompt before examples.", action="store_true", ) parser.add_argument("--task_instruction_idx", type=int, default=0) parser.add_argument("--do_test", help="Run on test file.", action="store_true") parser.add_argument( "--dry_run", help="Dry run. Do not actually ping model.", action="store_true" ) parser.add_argument( "--stop_token", help="Token to stop on for a given generated response", default="\n" ) # Model args parser.add_argument("--temperature", type=float, help="Temperature.", default=0.0) parser.add_argument( "--max_tokens", type=int, help="Max tokens to generate.", default=3 ) args = parser.parse_args() return args def main(): """Run main method.""" args = parse_args() if args.num_trials < 1: raise ValueError("num_trials must be greater than 0.") # Get absolute path args.data_dir = str(Path(args.data_dir).resolve()) setup_logger(args.output_dir) logger.info(json.dumps(vars(args), indent=4)) # Will set seed for pandas np.random.seed(args.seed) test_file = "test" if args.do_test else "validation" # Read pandas DF datasets pd_data_files = data_utils.read_data( data_dir=args.data_dir, class_balanced=args.class_balanced, add_instruction=False, max_train_samples=-1, max_train_percent=-1, sep_tok=args.sep_tok, nan_tok=args.nan_tok, ) if test_file not in pd_data_files: raise ValueError(f"Need {test_file} data") train_data = pd_data_files["train"] test_data = pd_data_files[test_file] task = constants.DATA2TASK[args.data_dir] logger.info(f"Using {args.task_instruction_idx} instruction idx") task_instruction = constants.DATA2INSTRUCT[args.data_dir] num_run = args.num_run if args.num_run == -1: num_run = test_data.shape[0] num_run = min(num_run, test_data.shape[0]) logger.info(f"Train shape is {train_data.shape[0]}") logger.info(f"Test shape is {test_data.shape[0]}") logger.info(f"Running {num_run} examples for {args.num_trials} trials.") # Setup manifest manifest = Manifest( cache_name=args.cache_name, cache_connection=args.cache_connection, client_name=args.client_name, client_connection=args.client_connection, stop_token=args.stop_token, temperature=args.temperature, max_tokens=args.max_tokens, top_p=1.0, n=1, ) if args.add_task_instruction: prompt = lambda x: f"{task_instruction} {x}" else: prompt = lambda x: f"{x}" trial_metrics = {"prec": [], "rec": [], "f1": [], "acc": []} saved_prefix = None for trial_num in range(args.num_trials): np.random.seed(args.seed + trial_num) queries = [] for _, row in test_data.iterrows(): serialized_r = row["text"] if args.sample_method == "manual": prefix_exs = prompt_utils.get_manual_prompt(args.data_dir, row) elif args.sample_method == "validation_clusters": if saved_prefix is None: logger.info("Generating validation cluster prompt.") saved_prefix = prompt_utils.get_validation_prompt( args.validation_path, num_examples=args.k, task=task, ) prefix_exs = saved_prefix else: if saved_prefix is None: saved_prefix = prompt_utils.get_random_prompt( pd_data_files["train"], num_examples=args.k ) prefix_exs = saved_prefix queries.append((prefix_exs + "\n" + serialized_r).strip()) gt = test_data["label_str"] preds = [] idx = 0 # Run a few for printing -- they are cached for _ in range(min(num_run, args.num_print)): logger.info(prompt(queries[idx])) if not args.dry_run: pred = manifest.run( prompt(queries[idx]), overwrite_cache=args.overwrite_cache ) else: pred = "" preds.append(pred) logger.info(f"====> {pred} <====") idx += 1 # Send to model for predictions if not args.dry_run: for query in queries[idx:num_run]: preds.append( manifest.run( prompt(query), overwrite_cache=args.overwrite_cache, ) ) else: preds.extend([""] * (num_run - idx)) # Save trial predictions save_data = test_data.iloc[:num_run].copy(deep=True).reset_index() gt = gt[:num_run] save_data["preds"] = preds save_data["queries"] = queries[:num_run] prec, rec, acc, f1 = compute_metrics(preds, gt, task) logger.info( f"Metrics Trial {trial_num}\n" f"Prec: {prec:.3f} Recall: {rec:.3f} Acc: {acc:.3f} F1: {f1:.3f}" ) trial_metrics["rec"].append(rec) trial_metrics["prec"].append(prec) trial_metrics["acc"].append(acc) trial_metrics["f1"].append(f1) output_file = ( Path(args.output_dir) / f"{Path(args.data_dir).stem}" / f"{test_file}" / f"{args.run_tag}" / f"{args.k}k" f"_{int(args.add_task_instruction)}inst" f"_{int(args.class_balanced)}cb" f"_{args.sample_method}" f"_{args.num_run}run" f"_{int(args.dry_run)}dry" / f"trial_{trial_num}.feather" ) output_file.parent.mkdir(parents=True, exist_ok=True) logger.info(f"Saved to {output_file}") save_data.to_feather(output_file) for k, values in list(trial_metrics.items()): trial_metrics[f"{k}_avg"] = np.average(values) trial_metrics[f"{k}_std"] = np.std(values) output_metrics = output_file.parent / "metrics.json" json.dump(trial_metrics, open(output_metrics, "w")) logger.info(f"Final Metrics {json.dumps(trial_metrics, indent=4)}") logger.info(f"Metrics dumped to {output_metrics}") if __name__ == "__main__": main()
fm_data_tasks-main
fm_data_tasks/run_inference.py
"""Constants.""" import os from pathlib import Path DATASET_PATH = os.environ.get("DATASET_PATH", Path("data/datasets").resolve()) DATA2TASK = { f"{DATASET_PATH}/entity_matching/structured/Amazon-Google": "entity_matching", f"{DATASET_PATH}/entity_matching/structured/Beer": "entity_matching", f"{DATASET_PATH}/entity_matching/structured/DBLP-ACM": "entity_matching", f"{DATASET_PATH}/entity_matching/structured/DBLP-GoogleScholar": "entity_matching", f"{DATASET_PATH}/entity_matching/structured/Fodors-Zagats": "entity_matching", f"{DATASET_PATH}/entity_matching/structured/iTunes-Amazon": "entity_matching", f"{DATASET_PATH}/entity_matching/structured/Walmart-Amazon": "entity_matching", f"{DATASET_PATH}/data_imputation/Buy": "data_imputation", f"{DATASET_PATH}/data_imputation/Restaurant": "data_imputation", f"{DATASET_PATH}/error_detection/Hospital": "error_detection_spelling", f"{DATASET_PATH}/error_detection/Adult": "error_detection", f"{DATASET_PATH}/schema_matching/Synthea": "schema_matching", } IMPUTE_COLS = { f"{DATASET_PATH}/data_imputation/Buy": "manufacturer", f"{DATASET_PATH}/data_imputation/Restaurant": "city", } MATCH_PROD_NAME = { f"{DATASET_PATH}/entity_matching/structured/Amazon-Google": "Product", f"{DATASET_PATH}/entity_matching/structured/Beer": "Product", f"{DATASET_PATH}/entity_matching/structured/DBLP-ACM": "Product", f"{DATASET_PATH}/entity_matching/structured/DBLP-GoogleScholar": "Product", f"{DATASET_PATH}/entity_matching/structured/Fodors-Zagats": "Product", f"{DATASET_PATH}/entity_matching/structured/iTunes-Amazon": "Song", f"{DATASET_PATH}/entity_matching/structured/Walmart-Amazon": "Product", f"{DATASET_PATH}/schema_matching/Synthea": "", } # Dropping happens before renaming DATA2DROPCOLS = { f"{DATASET_PATH}/entity_matching/structured/Amazon-Google": [], f"{DATASET_PATH}/entity_matching/structured/Beer": ["Style", "ABV"], f"{DATASET_PATH}/entity_matching/structured/DBLP-ACM": [], f"{DATASET_PATH}/entity_matching/structured/DBLP-GoogleScholar": [], f"{DATASET_PATH}/entity_matching/structured/Fodors-Zagats": [], f"{DATASET_PATH}/entity_matching/structured/iTunes-Amazon": ["CopyRight"], f"{DATASET_PATH}/entity_matching/structured/Walmart-Amazon": [ "category", "price", "brand", ], f"{DATASET_PATH}/data_imputation/Buy": [], f"{DATASET_PATH}/data_imputation/Restaurant": [], f"{DATASET_PATH}/error_detection/Hospital": [], f"{DATASET_PATH}/error_detection/Adult": [], f"{DATASET_PATH}/schema_matching/Synthea": ["des1", "des2", "d1", "d2", "d3", "d4"], } DATA2COLREMAP = { f"{DATASET_PATH}/entity_matching/structured/Amazon-Google": {}, f"{DATASET_PATH}/entity_matching/structured/Beer": { "id": "id", "Beer_Name": "name", "Brew_Factory_Name": "factory", "Style": "style", "ABV": "ABV", }, f"{DATASET_PATH}/entity_matching/structured/DBLP-ACM": {}, f"{DATASET_PATH}/entity_matching/structured/DBLP-GoogleScholar": {}, f"{DATASET_PATH}/entity_matching/structured/Fodors-Zagats": {}, f"{DATASET_PATH}/entity_matching/structured/iTunes-Amazon": { "id": "id", "Song_Name": "name", "Artist_Name": "artist name", "Album_Name": "album name", "Genre": "genre", "Price": "price", "CopyRight": "CopyRight", "Time": "time", "Released": "released", }, f"{DATASET_PATH}/entity_matching/structured/Walmart-Amazon": {}, f"{DATASET_PATH}/data_imputation/Buy": {}, f"{DATASET_PATH}/data_imputation/Restaurant": {}, f"{DATASET_PATH}/error_detection/Hospital": {}, f"{DATASET_PATH}/error_detection/Adult": {}, f"{DATASET_PATH}/schema_matching/Synthea": { "omop": "left", "table": "right", "label": "label", }, } DATA2INSTRUCT = { f"{DATASET_PATH}/entity_matching/structured/Amazon-Google": "Are Product A and Product B the same? Yes or No? ", f"{DATASET_PATH}/entity_matching/structured/Beer": "Are Product A and Product B the same? Yes or No? ", f"{DATASET_PATH}/entity_matching/structured/DBLP-ACM": "Are Product A and Product B the same? Yes or No? ", f"{DATASET_PATH}/entity_matching/structured/DBLP-GoogleScholar": "Are Product A and Product B the same? Yes or No? ", f"{DATASET_PATH}/entity_matching/structured/Fodors-Zagats": "Are Product A and Product B the same? Yes or No? ", f"{DATASET_PATH}/entity_matching/structured/iTunes-Amazon": "Are Song A and Song B the same? Yes or No? ", f"{DATASET_PATH}/entity_matching/structured/Walmart-Amazon": "Are Product A and Product B the same? Yes or No? ", f"{DATASET_PATH}/data_imputation/Buy": "Who is the manufacturer? apple, sony, lg electronics? ", f"{DATASET_PATH}/data_imputation/Restaurant": "What is the city? san fransisco, new york, denver? ", f"{DATASET_PATH}/error_detection/Hospital": "Is there a x spelling error? Yes or No? ", f"{DATASET_PATH}/error_detection/Hospital": "Is there a x spelling error? Yes or No? ", f"{DATASET_PATH}/schema_matching/Synthea": "Are A and B semantically equal?\n\n", f"{DATASET_PATH}/error_detection/Adult": "Classify errors. nan values are not errors.", } DATA2SUFFIX = { f"{DATASET_PATH}/entity_matching/structured/Amazon-Google": " Are Product A and Product B the same?", f"{DATASET_PATH}/entity_matching/structured/Beer": " Are Product A and Product B the same?", f"{DATASET_PATH}/entity_matching/structured/DBLP-ACM": " Are Product A and Product B the same?", f"{DATASET_PATH}/entity_matching/structured/DBLP-GoogleScholar": " Are Product A and Product B the same?", f"{DATASET_PATH}/entity_matching/structured/Fodors-Zagats": " Are Product A and Product B the same?", f"{DATASET_PATH}/entity_matching/structured/iTunes-Amazon": " Same Song?", f"{DATASET_PATH}/entity_matching/structured/Walmart-Amazon": " Are A and B the Same?", f"{DATASET_PATH}/data_imputation/Buy": " Who is the manufacturer?", f"{DATASET_PATH}/data_imputation/Restaurant": " What is the city?", f"{DATASET_PATH}/error_detection/Hospital": "?", f"{DATASET_PATH}/schema_matching/Synthea": "Are A and B the same? Yes or No?", f"{DATASET_PATH}/error_detection/Adult": "? ", } DATA2EXAMPLE_SUBKEY_ATTR = { f"{DATASET_PATH}/entity_matching/structured/Amazon-Google": "manufacturer_A", f"{DATASET_PATH}/entity_matching/structured/Beer": None, f"{DATASET_PATH}/entity_matching/structured/DBLP-ACM": "venue_A", f"{DATASET_PATH}/entity_matching/structured/DBLP-GoogleScholar": "venue_A", f"{DATASET_PATH}/entity_matching/structured/Fodors-Zagats": None, f"{DATASET_PATH}/entity_matching/structured/iTunes-Amazon": None, f"{DATASET_PATH}/entity_matching/structured/Walmart-Amazon": None, f"{DATASET_PATH}/data_imputation/Buy": None, f"{DATASET_PATH}/data_imputation/Restaurant": None, f"{DATASET_PATH}/error_detection/Hospital": "col_name", f"{DATASET_PATH}/schema_matching/Synthea": "left", f"{DATASET_PATH}/error_detection/Adult": "col_name", } PREFIXES = { f"{DATASET_PATH}/entity_matching/structured/iTunes-Amazon": "Song A is name: Illusion ( feat . Emeli Sand ' © & Professor Green ). Song B is name: Transmission [ feat . Emeli Sand ' © & Professor Green ]. Same Song? No\n\nSong A is name: Praise the sky ( feat . Meghan Trainor ). Song B is name: Praise the sky. Same Song? Yes\n\nSong A is name: Drive it ( Featuring Jackson T. Brown ). Song B is name: Drive it ( Featuring Jackson T. Brown ) ( Main Version ). Same Song? Yes\n\nSong A is name: Jump Up ( feat . Tyler ). Song B is name: Jump Up ( feat . Tyler ) [ Explicit ]. Same Song? Yes\n\nSong A is name: Fight Song Anthem. genre: Country , Music , Urban Cowboy , Contemporary Country. Song B is name: Fight Song Anthem [ Explicit ]. genre: Country. Same Song? Yes\n\nSong A is name: Over it. Song B is name: Over it [ Clean ]. Same Song? Yes\n\nSong A is name: Original Don ( feat . Nicki Minaj ) ( Extended Version ). Song B is name: Original Don ( feat . Nicki Minaj ) [Remix]. Same Song? No\n\nSong A is name: Amazing 2.0 ( feat . Justin Bieber ). time: 3:14. Song B is name: Amazing ( feat . Justin Bieber ). time: 6:14. Same Song? Yes\n\nSong A is name: Run this town ( feat . JT ). Song B is name: Fight the law ( feat . JT ). Same Song? No\n", f"{DATASET_PATH}/entity_matching/structured/Beer": "Product A is name: Bourbon Barrel Red Oak Ale. factory: Big Brewing Company. style: American Amber / Red Ale. ABV: 7.60%. Product B is name: Bourbon Barrel Red Oak Ale. factory: Halo Street Brewery. style: American Amber / Red Ale. ABV: 7.60%. Are Product A and Product B the same? Yes\n\nProduct A is name: Red Imperial Red Ale. factory: Brew Brewing Company. style: American Amber / Red Ale. ABV: 5.40%. Product B is name: Red Imperial Red Ale - Bourbon Barrel Aged. factory: Brew Brewing Company. style: American Amber / Red Ale. ABV: 5.40%. Are Product A and Product B the same? No\n\nProduct A is name: Red Rocket Amber Ale. factory: Third Base Sports Bar & Brewery. style: American Amber / Red Ale. ABV: 6.40%. Product B is name: Blue Cat Red Toad Amber Ale. factory: Blue Cat Brew Pub. style: Premium Bitter/ESB. ABV: 5.70%. Are Product A and Product B the same? No\n\nProduct A is name: The Gentle Mongoose Holy City. factory: Brewing City Brewing. style: American Amber / Red Ale. ABV: 6.90%. Product B is name: Holy City The Gentle Mongoose. factory: Holy City Brewing. style: Amber Ale. ABV: 6.90%. Are Product A and Product B the same? Yes\n\nProduct A is name: Freedom Soho Red. factory: Freedom Brewery Ltd. style: American Amber / Red Ale. ABV: 4.70%. Product B is name: Freedom Soho Red. factory: Freedom. style: Amber Lager/Vienna. ABV: 4.70%. Are Product A and Product B the same? Yes\n\nProduct A is name: Blarney Rock Irish Ale. factory: Rockyard Brewing. style: American Amber / Red Ale. ABV: 6.00%. Product B is name: Rockyard Blarney Rock Irish Ale. factory: Rockyard Brewing Company. style: Irish Ale. ABV: -. Are Product A and Product B the same? Yes\n\nProduct A is name: Red Rocket Amber Ale. factory: Third Base Sports Bar & Brewery. style: American Amber / Red Ale. ABV: 6.40%. Product B is name: Red Duck Amber Ale. factory: Purrumbete Brewing. style: Amber Ale. ABV: 4.80%. Are Product A and Product B the same? No\n", f"{DATASET_PATH}/entity_matching/structured/Fodors-Zagats": "Product A is name: ` bamboo garden '. addr: ' 4850 flamingo rd. '. city: ` las vegas '. phone: 702/871 -3262. type: asian. class: 425. Product B is name: ` buzio \\ 's in the rio '. addr: ' 3700 w. flamingo rd. '. city: ` las vegas '. phone: 702-252-7697. type: seafood. class: 658. Are Product A and Product B the same? No, Product A class: 425 and Product B class: 658 are not the same\n\nProduct A is name: ` le chardonnay '. addr: ' 8284 melrose ave. '. city: ` los angeles '. phone: 213/655 -8880. type: french. class: type: 12. Product B is name: ` le chardonnay ( los angeles ) '. addr: ' 8284 melrose ave. '. city: ` los angeles '. phone: 213-655-8880. type: ` french bistro '. class: type: 12. Are Product A and Product B the same? Yes\n\nProduct A is name: lespinasse. addr: ' 2 e. 55th st. '. city: ` new york '. phone: 212/339 -6719. type: american. class: type: 43. Product B is name: ` lespinasse ( new york city ) '. addr: ' 2 e. 55th st. '. city: ` new york city '. phone: 212-339-6719. type: asian. class: type: 43. Are Product A and Product B the same? Yes\n\nProduct A is name: ` cafe roma '. addr: ' 3570 las vegas blvd. s '. city: ` las vegas '. phone: 702/731 -7547. type: ` coffee shops/diners '. class: 433. Product B is name: 'em eril \\ 's new orleans fish house '. addr: ' 3799 las vegas blvd. s. '. city: ` las vegas '. phone: 702-891-7374. type: seafood. class: 659. Are Product A and Product B the same? No, Product A class: 443 and Product B class: 659 are not the same\n\nProduct A is name: ` smith & wollensky '. addr: ' 201 e. 49th st. '. city: ` new york '. phone: 212/753 -1530. type: american. class: 62. Product B is name: ` smith & wollensky '. addr: ' 797 third ave. '. city: ` new york city '. phone: 212-753-1530. type: steakhouses. class: type: 62. Are Product A and B the same? Yes\n", f"{DATASET_PATH}/entity_matching/structured/DBLP-GoogleScholar": { "nan": "Product A is title: keynote address. authors: . venue: . year: 1995. Product B is title: keynote address. authors: t bailey. venue: women in aviation international conference , reno , nv , march ,. year: . Same Product? No\n\nProduct A is title: foreword : management of semistructured data. authors: . venue: . year: 1997. Product B is title: lore : a database management system for semistructued data. authors: j mchugh , s abiteboul , r goldmanâ ? ¦. venue: acm sigmod ,. year: . Same Product? No\n\nProduct A is title: guest editorial. authors: . venue: . year: 2003. Product B is title: guest editorial. authors: v atluri , a joshi , y yesha. venue: the vldb journal the international journal on very large & hellip ; ,. year: 2003.0. Same Product? Yes\n\nProduct A is title: storage and retrieval of xml data using relational databases. authors: . venue: . year: 2001. Product B is title: storage and retrieval of xml data using relational databases. authors: s chaudhuri , k shim. venue: data engineering , 2003 . proceedings . 19th international & hellip ; ,. year: 2003.0. Same Product? Yes\n\nProduct A is title: editorial. authors: . venue: . year: 2001. Product B is title: eic editorial. authors: ps yu. venue: ieee transactions on knowledge and data engineering ,. year: 2004.0. Same Product? No\n\nProduct A is title: state of the art in workflow management research and products. authors: . venue: . year: 1996. Product B is title: workflow management systems : state of the art on research and products , tutorial notes. authors: c mohan. venue: int . conf . on extending database technology , avignon ,. year: . Same Product? Yes\n\nProduct A is title: information management for genome level bioinformatics. authors: . venue: . year: 2001. Product B is title: implementing information management strategies .. authors: cet rohm. venue: new directions for higher education ,. year: 1986.0. Same Product? No\n\nProduct A is title: open object database management systems. authors: . venue: . year: 1994. Product B is title: case for output management systems .. authors: k schwaber. venue: computerworld. ,. year: 1981.0. Same Product? No\n\nProduct A is title: querying and mining data streams : you only get one look a tutorial. authors: . venue: . year: 2002. Product B is title: query and mining data streams : you only get one look. authors: m garofalakis , j gehrke , r rastogi. venue: . year: . Same Product? Yes\n\nProduct A is title: reminiscences on influential papers. authors: . venue: . year: 2001. Product B is title: reminiscences on influential papers. authors: ka ross. venue: sigmod. year: 2002.0. Same Product? Yes\n\n", "acm trans . database syst .": "Product A is title: semantics for update rule programs and implementations in a relational database management system. authors: l raschid , j lobo. venue: acm trans . database syst .. year: 1996. Product B is title: query processing in a relational database management system. authors: k youssefi , e wong. venue: vldb ,. year: 1979.0. Same Product? No\n\nProduct A is title: an efficient method for checking object-oriented database schema correctness. authors: a formica , h grіger , m missikoff. venue: acm trans . database syst .. year: 1998. Product B is title: an efficient method for checking object-oriented database schema correctness. authors: a formica , hd groger , m missikoff. venue: acm transactions on database systems ,. year: 1998.0. Same Product? Yes\n\nProduct A is title: semantics for update rule programs and implementations in a relational database management system. authors: l raschid , j lobo. venue: acm trans . database syst .. year: 1996. Product B is title: performance analysis of a relational data base management system. authors: p hawthorn , m stonebraker. venue: proceedings of the 1979 acm sigmod international conference. year: . Same Product? No\n\nProduct A is title: theory of dependence values. authors: r meo. venue: acm trans . database syst .. year: 2000. Product B is title: theory of dependence values. authors: r meo. venue: acm transactions on database systems ,. year: 2000.0. Same Product? Yes\n\nProduct A is title: gloss : text-source discovery over the internet. authors: l gravano , h garcia-molina , a tomasic. venue: acm trans . database syst .. year: 1999. Product B is title: gloss : text-source discovery over the internet. authors: l gravano , h garcia-molina , a tomasic. venue: acm transactions on database systems ,. year: 1999.0. Same Product? Yes\n\nProduct A is title: spatial queries in dynamic environments. authors: y tao , d papadias. venue: acm trans . database syst .. year: 2003. Product B is title: location-based spatial queries with data sharing in mobile environment. authors: ws ku , r zimmermann , cn wan. venue: . year: . Same Product? No\n\nProduct A is title: path sharing and predicate evaluation for high-performance xml filtering. authors: y diao , m altinel , m franklin , h zhang , p fischer. venue: acm trans . database syst .. year: 2003. Product B is title: yfilter : path sharing and predicate evaluation for high-performance xmpsll filtering. authors: y diao , m altinel , mj franklin , h zhang , p fischer. venue: acm transactions on database systems ( tods ). year: . Same Product? Yes\n\nProduct A is title: a cost model for query processing in high dimensional data spaces. authors: c bіhm. venue: acm trans . database syst .. year: 2000. Product B is title: generalized multi-dimensional data mapping and query processing. authors: r zhang , p kalnis , bc ooi , kl tan. venue: acm transactions on database systems ,. year: 2005.0. Same Product? No\n\nProduct A is title: applying an update method to a set of receivers. authors: m andries , l cabibbo , j paredaens , j bussche. venue: acm trans . database syst .. year: 2001. Product B is title: applying an update method to a set of receivers. authors: m andries , l cabibbo , j paredaens , j van den. venue: acm transactions on database systems ,. year: 2001.0. Same Product? Yes\n\nProduct A is title: formal query languages for secure relational databases. authors: m winslett , k smith , x qian. venue: acm trans . database syst .. year: 1994. Product B is title: implementation of logiclal query languages for databases ( abstract only ). authors: jd ullman. venue: proceedings of the 1985 acm sigmod international conference & hellip ; ,. year: 1985.0. Same Product? No\n\n", "ecord": "Product A is title: david dewitt speaks out. authors: m winslett. venue: ecord. year: 2002. Product B is title: jim gray speaks out. authors: m winslett. venue: sigmod record ,. year: 2003.0. Same Product? No\n\nProduct A is title: david dewitt speaks out. authors: m winslett. venue: ecord. year: 2002. Product B is title: david dewitt speaks out. authors: s be proud , hsfis very , p spent. venue: sigmod record ,. year: 2002.0. Same Product? Yes\n\nProduct A is title: david dewitt speaks out. authors: m winslett. venue: ecord. year: 2002. Product B is title: pat selinger speaks out. authors: p selinger , m winslett. venue: sigmod record ,. year: . Same Product? No\n\n", "sigmod conference": "Product A is title: dynamat : a dynamic view management system for data warehouses. authors: y kotidis , n roussopoulos. venue: sigmod conference. year: 1999. Product B is title: dynamat : a dynamic viewmanagement system fordata warehouses. authors: ykn roussopoulos. venue: proc . of the acm sigmod. year: . Same Product? Yes\n\nProduct A is title: data management issues in electronic commerce ( panel ). authors: m жzsu. venue: sigmod conference. year: 1999. Product B is title: special section on data management issues in e-commerce. authors: a dogac. venue: acm sigmod record ,. year: . Same Product? No\n\nProduct A is title: database systems management and oracle8. authors: c doherty. venue: sigmod conference. year: 1998. Product B is title: the cedar database management system. authors: m brownâ ? ¦. venue: proc . 1981 acmsigmod conference on management of data , ann & hellip ; ,. year: . Same Product? No\n\nProduct A is title: online association rule mining. authors: c hidber. venue: sigmod conference. year: 1999. Product B is title: mining fuzzy association rules in database. authors: mk chan , f ada , hw man. venue: sigmod record ,. year: . Same Product? No\n\nProduct A is title: a performance study of transitive closure algorithms. authors: s dar , r ramakrishnan. venue: sigmod conference. year: 1994. Product B is title: a performance study of transitive closure algorithms. authors: s dar , r ramakrishnan. venue: sigmod conference ,. year: 1994.0. Same Product? Yes\n\nProduct A is title: an open storage system for abstract objects. authors: s blott , l relly , h schek. venue: sigmod conference. year: 1996. Product B is title: an abstract-object storage manager. authors: s blott , l relly , hj schek. venue: & hellip ; 1996 acm sigmod int . conf . on management of data , acm sigmod. year: . Same Product? Yes\n\nProduct A is title: communication efficient distributed mining of association rules. authors: a schuster , r wolff. venue: sigmod conference. year: 2001. Product B is title: mining generalized association rules. authors: r agrawal , r srikant. venue: proceedings of the 1995 international conference of very & hellip ; ,. year: . Same Product? No\n\nProduct A is title: ariadne : a system for constructing mediators for internet sources. authors: j ambite , n ashish , g barish , c knoblock , s minton , p modi , i muslea , a philpot , s tejada. venue: sigmod conference. year: 1998. Product B is title: . and sheila tejada . ariadne : a system for constructing mediators for internet sources ( system. authors: jl ambite , n ashish , g barish. venue: proc . of a cm sigmod conf . on management of. year: . Same Product? Yes\n\nProduct A is title: dna-miner : a system prototype for mining dna sequences. authors: j han , h jamil , y lu , l chen , y liao , j pei. venue: sigmod conference. year: 2001. Product B is title: n. stefanovic 1997 â ?? geominer : a system prototype for spatial data miningâ ??. authors: j han , k koperski. venue: proc . acm-sigmod int . conf . on management of data ( sigmod &#39; 97. year: . Same Product? No\n\nProduct A is title: a critique of ansi sql isolation levels. authors: h berenson , p bernstein , j gray , j melton , e o'neil , p o'neil. venue: sigmod conference. year: 1995. Product B is title: a critique of ansi sql isolation levels. authors: h berenson , p bernstein , j gray , j melton. venue: proceedings of the acm sigmod international conference on & hellip ; ,. year: 1995.0. Same Product? Yes\n\n", "sigmod record": "Product A is title: exploiting main memory dbms features to improve real-time concurrency control protocols. authors: ж ulusoy , a buchmann. venue: sigmod record. year: 1996. Product B is title: index concurrency control in firm real-time dbms. authors: b goyal , j haritsa , s seshadri , v srinivasan. venue: proceedings of the international conference on very large & hellip ; ,. year: 1995.0. Same Product? No\n\nProduct A is title: database techniques for the world-wide web : a survey. authors: d florescu , a levy , a mendelzon. venue: sigmod record. year: 1998. Product B is title: mendelzon . database techniques for the world-w ide-wide : a survey. authors: d florescu , a levy. venue: sigmod record ,. year: . Same Product? Yes\n\nProduct A is title: an extensible compressor for xml data. authors: h liefke , d suciu. venue: sigmod record. year: 2000. Product B is title: xmill : an efficient compressor for xml data. authors: l hartmut , d suciu. venue: . year: . Same Product? No\n\nProduct A is title: a consensus glossary of temporal database concepts. authors: c jensen , j clifford , r elmasri , s gadia , p hayes , s jajodia. venue: sigmod record. year: 1994. Product B is title: , sk gadi p. hayes and s. jajodia ( e & ) : â ?? a consensus glossary of temporal database conceptsâ ??. authors: cs jensen , j cliftord. venue: a chf sig3fod. year: . Same Product? Yes\n\nProduct A is title: on the issue of valid time ( s ) in temporal databases. authors: s kokkotos , e ioannidis , t panayiotopoulos , c spyropoulos. venue: sigmod record. year: 1995. Product B is title: temporal databases. authors: rt snodgrass. venue: . year: . Same Product? No\n\nProduct A is title: asserting beliefs in mls relational models. authors: n jukic , s vrbsky. venue: sigmod record. year: 1997. Product B is title: learning probabilistic relational models. authors: l getoorâ ? ¦. venue: lecture notes in computer science ,. year: 2000.0. Same Product? No\n\nProduct A is title: mining fuzzy association rules in databases. authors: c kuok , a fu , m wong. venue: sigmod record. year: 1998. Product B is title: mining association rules between sets of items in large databases . 1993. authors: r agrawal , t imielinski , a swami. venue: proc . acm sigmod international conference on management of. year: . Same Product? No\n\nProduct A is title: supply chain infrastructures : system integration and information sharing. authors: m ball , m ma , l raschid , z zhao. venue: sigmod record. year: 2002. Product B is title: supply chain infrastructures : system integration and information sharing. authors: mo ball , m ma , l raschid , z zhao. venue: sigmod record ,. year: 2002.0. Same Product? Yes\n\nProduct A is title: database research : achievements and opportunities into the 21st century. authors: a silberschatz , m stonebraker , j ullman. venue: sigmod record. year: 1996. Product B is title: database research : achievements and opportunities into the 1st century. authors: a silberschatz , m stonebraker , j ullman. venue: acm sigmod record ,. year: 1996.0. Same Product? Yes\n\nProduct A is title: constraint databases : a tutorial introduction. authors: j bussche. venue: sigmod record. year: 2000. Product B is title: constraint databases : a tutorial introduction .. authors: acms anthology. venue: sigmod record ,. year: 2000.0. Same Product? Yes\n\n", "vldb": "Product A is title: fast algorithms for mining association rules in large databases. authors: r agrawal , r srikant. venue: vldb. year: 1994. Product B is title: data mining using two-dimensional optimized association rules for numeric data : scheme , algorithms ,. authors: t fukuda , y morimoto , s morishita , t tokuyama. venue: proceedings of the acm sigmod international conference on. year: . Same Product? No\n\nProduct A is title: client-server paradise. authors: d dewitt , n kabra , j luo , j patel , j yu. venue: vldb. year: 1994. Product B is title: and j. yu . client-server paradise. authors: dj dewitt , n kabra , j luo , jm pate. venue: proceedings ofthe twentieth international conference on vey & hellip ; ,. year: . Same Product? Yes\n\nProduct A is title: mixed mode xml query processing. authors: a halverson , j burger , l galanis , a kini , r krishnamurthy , a rao , f tian , s viglas , y wang , j naughton , d dewitt. venue: vldb. year: 2003. Product B is title: a transducer-based xml query processor. authors: , p mukhopadhyay , y papakonstantinou. venue: vldb ,. year: 2002.0. Same Product? No\n\nProduct A is title: database tuning : principles , experiments , and troubleshooting techniques. authors: d shasha , p bonnet. venue: vldb. year: 2002. Product B is title: database tuning : principles , experiments , and troubleshooting techniques ( part i ). authors: d shasha , p bonnet. venue: proceedings of the 2002 acm sigmod international conference & hellip ; ,. year: 2002.0. Same Product? Yes\n\nProduct A is title: mining generalized association rules. authors: r srikant , r agrawal. venue: vldb. year: 1995. Product B is title: mining association rules from semi-structured data. authors: k maruyama , k uehara. venue: icdcs workshop of knowledge discovery and data mining in the & hellip ; ,. year: 2000.0. Same Product? No\n\nProduct A is title: the x-tree : an index structure for high-dimensional data. authors: s berchtold , d keim , h kriegel. venue: vldb. year: 1996. Product B is title: the tv-tree { an index structure for high-dimensional data . the vldb journal , 3 ( 4 ) : 517 { 549. authors: ki lin , hv jagadish , c faloutsos. venue: . year: 1994.0. Same Product? No\n\nProduct A is title: dual-buffering strategies in object bases. authors: a kemper , d kossmann. venue: vldb. year: 1994. Product B is title: dual-buffering strategies in object bases. authors: a kemper , d kossmann. venue: vldb ,. year: 1994.0. Same Product? Yes\n\nProduct A is title: banks : browsing and keyword searching in relational databases. authors: b aditya , g bhalotia , s chakrabarti , a hulgeri , c nakhe , p parag , s sudarshan. venue: vldb. year: 2002. Product B is title: distance browsing in spatial databases. authors: gr hjaltason , h samet. venue: acm transactions on database systems ,. year: 1999.0. Same Product? No\n\nProduct A is title: on-demand data elevation in hierarchical multimedia storage servers. authors: p triantafillou , t papadakis. venue: vldb. year: 1997. Product B is title: on-demand data elevation in a hierarchical multimedia storage server. authors: p triantallou , t papadakis. venue: proc . of 23rd intl. conf . on very large data bases , vldb ,. year: 1997.0. Same Product? Yes\n\nProduct A is title: a raster approximation for processing of spatial joins. authors: g zimbrao , j souza. venue: vldb. year: 1998. Product B is title: a raster approximation for processing of spatial joins. authors: g zimbrao. venue: vldb ,. year: 1998.0. Same Product? Yes\n\n", "vldb j.": "Product A is title: the demarcation protocol : a technique for maintaining constraints in distributed database systems. authors: d barbarс , h garcia-molina. venue: vldb j.. year: 1994. Product B is title: local verification of global integrity constraints in distributed databases. authors: a gupta , j widom. venue: . year: . Same Product? No\n\nProduct A is title: an introduction to spatial database systems. authors: r gќting. venue: vldb j.. year: 1994. Product B is title: an introduction to spatial database systems. authors: portal.acm.org. venue: . year: 1994.0. Same Product? Yes\n\nProduct A is title: re-tree : an efficient index structure for regular expressions. authors: c chan , m garofalakis , r rastogi. venue: vldb j.. year: 2003. Product B is title: the tv-tree : an index structure forhigh-dimensional data. authors: ki lin , h jagadish , c faloutsos. venue: the vldb journal ,. year: . Same Product? No\n\nProduct A is title: one-dimensional and multi-dimensional substring selectivity estimation. authors: h jagadish , o kapitskaia , r ng , d srivastava. venue: vldb j.. year: 2000. Product B is title: one-dimensional and multi-dimensional substring selectivity estimation. authors: hv jagadish , o kapitskaia , rt ng , d srivastava. venue: the vldb journal the international journal on very large & hellip ; ,. year: 2000.0. Same Product? Yes\n\nProduct A is title: active rules for xml : a new paradigm for e-services. authors: a bonifati , s ceri , s paraboschi. venue: vldb j.. year: 2001. Product B is title: active rules for xml : a new paradigm for e-services. authors: a bonifati , s ceri , s paraboschi. venue: the vldb journal the international journal on very large & hellip ; ,. year: 2001.0. Same Product? Yes\n\nProduct A is title: optimal clip ordering for multi-clip queries. authors: r ng , p shum. venue: vldb j.. year: 1998. Product B is title: optimal clip ordering for multi-clip queries. authors: rt ng , p shum. venue: the vldb journal the international journal on very large & hellip ; ,. year: 1998.0. Same Product? Yes\n\nProduct A is title: priority assignment in real-time active databases. authors: r sivasankaran , j stankovic , d towsley , b purimetla , k ramamritham. venue: vldb j.. year: 1996. Product B is title: priority assignment in real-time active databases. authors: rm sivasankaran , ja stankovic , d towsley , b. venue: the vldb journal the international journal on very large & hellip ; ,. year: 1996.0. Same Product? Yes\n\nProduct A is title: global transaction support for workflow management systems : from formal specification to practical implementation. authors: p grefen , j vonk , p apers. venue: vldb j.. year: 2001. Product B is title: distributed transaction support for workflow management applications. authors: j vonk , p grefen , e boertjes , p apers. venue: procs . 10th int . conf . on database and expert system & hellip ; ,. year: . Same Product? No\n\nProduct A is title: answering queries using views : a survey. authors: a halevy. venue: vldb j.. year: 2001. Product B is title: a scalable algorithm for answering queries using views. authors: ay levy. venue: . year: . Same Product? No\n\nProduct A is title: index nesting - an efficient approach to indexing in object-oriented databases. authors: b ooi , j han , h lu , k tan. venue: vldb j.. year: 1996. Product B is title: spatial query processing in an object-oriented database system. authors: ja orenstein. venue: proceedings of the 1986 acm sigmod international conference. year: . Same Product? No\n\n", }, f"{DATASET_PATH}/entity_matching/structured/Walmart-Amazon": "Product A is modelno: c789u1mg. Product B is modelno: c644u1mg. Are A and B the Same? No, because modelno: c789u1mg and modelno: c644u1mg are not the same\n\nProduct A is modelno: hp ce278a. Product B is modelno: hp27 / ce278a ( xl ). Are A and B the Same? No, because modelno: hp ce278a and modelno: hp27 / c8727a ( xl ) are not the same\n\nProduct A is modelno: c7826an. Product B is modelno: c7826an # 56. Are A and B the Same? No, because modelno: c7826 and modelno: c7826 # 56 are not the same\n\nProduct A is modelno: oczssd22vtxe60g. Product B is modelno: oczssd2-3vtx60g. Are A and B the Same? No, because modelno: va1932 and modelno: va705b are not the same\n\nProduct A is modelno: ktd-dm6400a / 1g. Product B is modelno: ktd-dm6400bc / 1g. Are A and B the Same? No, because modelno: ktd-dm8400a / 1g and modelno: ktd-dm8400bc / 1g are not the same\n\nProduct A is title: sharp electronics xldh259n 160w micro system with ipod dock black. modelno: xldh259n. Product B is title: sharp electronics xldh259n 160w micro system with docking slot for ipod black. modelno: xldh259n. Are A and B the Same? Yes, because modelno: xldh259n and modelno: xldh259 are the same\n\nProduct A is title: philips vibe 4gb mp3 video player. modelno: sa2vbe04kc/17. Product B is title: philips gogear vibe 8gb mp3 player sa3vbe08k 37. modelno: sa3vbe08k/37. Are A and B the Same? No, because modelno: sa2vbe04kc/17 and modelno: sa3vbe08k/37 are not the same\n\nProduct A is title: innovera heavyweight photo paper matte 8-1 2 x 11 50 sheets pack. modelno: 99650. Product B title: innovera 99650 - heavyweight photo paper matte 8-1 2 x 11 50 sheets pack. modelno: . Are A and B the Same? Yes, because modelno: 99650 and Product B title: innovera 99650 are the same", f"{DATASET_PATH}/entity_matching/structured/DBLP-ACM": { "vldb": "Product A is title: mapinfo spatialware : a spatial information server for rdbms. authors: chebel mina. venue: vldb. year: 1998. Product B is title: mapinfo spatialware : a spatial information server for rdbms. authors: chebel mina. venue: very large data bases. year: 1998. Are Product A and Product B the same? Yes\n\nProduct A is title: sampling large databases for association rules. authors: hannu toivonen. venue: vldb. year: 1996. Product B is title: sampling large databases for association rules. authors: hannu toivonen. venue: very large data bases. year: 1996. Are Product A and Product B the same? Yes\n\nProduct A is title: an ultra highly available dbms. authors: svein-olaf hvasshovd , svein erik bratsberg , øystein torbjørnsen. venue: vldb. year: 2000. Product B is title: an ultra highly available dbms. authors: svein-olaf hvasshovd , svein erik bratsberg , &#216; ystein torbj &#248; rnsen. venue: very large data bases. year: 2000. Are Product A and Product B the same? Yes\n\nProduct A is title: e.piphany epicenter technology overview. authors: sridhar ramaswamy. venue: vldb. year: 2000. Product B is title: e.piphany epicenter technology overview. authors: sridhar ramaswamy. venue: very large data bases. year: 2000. Are Product A and Product B the same? Yes\n\nProduct A is title: on the costs of multilingualism in database systems. authors: a. kumaran , jayant r. haritsa. venue: vldb. year: 2003. Product B is title: analysis of locking behavior in three real database systems. authors: vigyan singhal , alan jay smith. venue: the vldb journal -- the international journal on very large data bases. year: 1997. Are Product A and Product B the same? No\n\nProduct A is title: dynamic multi-resource load balancing in parallel database systems. authors: robert marek , erhard rahm. venue: vldb. year: 1995. Product B is title: parallel database systems 101. authors: jim gray. venue: international conference on management of data. year: 1995. Are Product A and Product B the same? No\n\nProduct A is title: processing sliding window multi-joins in continuous queries over data streams. authors: lukasz golab , m. tamer özsu. venue: vldb. year: 2003. Product B is title: processing set expressions over continuous update streams. authors: sumit ganguly , minos garofalakis , rajeev rastogi. venue: international conference on management of data. year: 2003. Are Product A and Product B the same? No\n\nProduct A is title: odefs : a file system interface to an object-oriented database. authors: narain h. gehani , william d. roome , h. v. jagadish. venue: vldb. year: 1994. Product B is title: unisql/x unified relational and object-oriented database system. authors: won kim. venue: the vldb journal -- the international journal on very large data bases. year: 1994. Are Product A and Product B the same? No\n\nProduct A is title: dwms : data warehouse management system. authors: narendra mohan. venue: vldb. year: 1996. Product B is title: strudel : a web site management system. authors: mary fernandez , daniela florescu , jaewoo kang , alon levy , dan suciu. venue: the vldb journal -- the international journal on very large data bases. year: 1997. Are Product A and Product B the same? No\n\nProduct A is title: sampling large databases for association rules. authors: hannu toivonen. venue: vldb. year: 1996. Product A is title: sampling large databases for association rules. authors: hannu toivonen. venue: the vldb journal -- the international journal on very large data bases. year: 1996. Are Product A and Product B the same? No\n", "sigmod record": "Product A is title: automata theory for xml researchers. authors: frank neven. venue: sigmod record. year: 2002. Product B is title: automata theory for xml researchers. authors: frank neven. venue: acm sigmod record. year: 2002. Are Product A and Product B the same? Yes\n\nProduct A is title: reasoning on regular path queries. authors: giuseppe de giacomo , moshe y. vardi , maurizio lenzerini , diego calvanese. venue: sigmod record. year: 2003. Product B is title: reasoning on regular path queries. authors: d. calvanese , g. de giacomo , m. lenzerini , m. y. vardi. venue: acm sigmod record. year: 2003. Are Product A and Product B the same? Yes\n\nProduct A is title: 3d geographic network displays. authors: taosong he , stephen g. eick , kenneth c. cox. venue: sigmod record. year: 1996. Product B is title: 3d geographic network displays. authors: kenneth c. cox , stephen g. eick , taosong he. venue: acm sigmod record. year: 1996. Are Product A and Product B the same? Yes\n\nProduct A is title: optimizing jan jannink 's implementation of b + - tree deletion. authors: h. olivie , r. maelbrancke. venue: sigmod record. year: 1995. Product B is title: optimizing jan jannink 's implementation of b + - tree deletion. authors: r. maelbrancke , h. olivi &#233;. venue: acm sigmod record. year: 1995. Are Product A and Product B the same? Yes\n\nProduct A is title: automata theory for xml researchers. authors: frank neven. venue: sigmod record. year: 2002. Product B is title: automata theory for xml researchers. authors: frank neven. venue: very large databases year: 2002. Are Product A and Product B the same? No\n\nProduct A is title: book review column. authors: karl aberer. venue: sigmod record. year: 2002. Product B is title: book review column. authors: karl aberer. venue: acm sigmod record. year: 2002. Are Product A and Product B the same? Yes\n\nProduct A is title: book review column. authors: karl aberer. venue: sigmod record. year: 2002. Product B is title: book reviews. authors: karl aberer. venue: acm sigmod record. year: 2002. Are Product A and Product B the same? No\n\nProduct A is title: book review column. authors: karl aberer. venue: sigmod record. year: 2003. Product B is title: book review column. authors: karl aberer. venue: acm sigmod record. year: 2003. Are Product A and Product B the same? Yes\n\nProduct A is title: book review column. authors: karl aberer. venue: sigmod record. year: 2003. Product B is title: book reviews. authors: karl aberer. venue: acm sigmod record. year: 2003. Are Product A and Product B the same? No\n", "vldb j.": "Product A is title: an architecture to support scalable online personalization on the web. authors: kaushik dutta , debra e. vandermeer , krithi ramamritham , anindya datta , shamkant b. navathe. venue: vldb j.. year: 2001. Product B is title: an architecture to support scalable online personalization on the web. authors: anindya datta , kaushik dutta , debra vandermeer , krithi ramamritham , shamkant b. navathe. venue: the vldb journal -- the international journal on very large data bases. year: 2001. Are Product A and Product B the same? Yes\n\nProduct A is title: answering queries using views : a survey. authors: alon y. halevy. venue: vldb j.. year: 2001. Product B is title: answering queries using views : a survey. authors: alon y. halevy. venue: the vldb journal -- the international journal on very large data bases. year: 2001. Are Product A and Product B the same? Yes\n\nProduct A is title: efficient schemes for managing multiversionxml documents. authors: shu-yao chien , carlo zaniolo , vassilis j. tsotras. venue: vldb j.. year: 2002. Product B is title: efficient schemes for managing multiversionxml documents. authors: s.-y . chien , v. j. tsotras , c. zaniolo. venue: the vldb journal -- the international journal on very large data bases. year: 2002. Are Product A and Product B the same? Yes\n\nProduct A is title: index configuration in object-oriented databases. authors: elisa bertino. venue: vldb j.. year: 1994. Product B is title: a cost model for clustered object-oriented databases. authors: georges gardarin , jean-robert gruser , zhao-hui tang. venue: very large data bases. year: 1995. Are Product A and Product B the same? No\n\nProduct A is title: incremental computation and maintenance of temporal aggregates. authors: jun yang , jennifer widom. venue: vldb j.. year: 2003. Product B is title: incremental maintenance of recursive views using relational calculus/sql. authors: guozhu dong , jianwen su. venue: acm sigmod record. year: 2000. Are Product A and Product B the same? No\n\nProduct A is title: index nesting - an efficient approach to indexing in object-oriented databases. authors: jiawei han , beng chin ooi , hongjun lu , kian-lee tan. venue: vldb j.. year: 1996. Product B is title: converting relational to object-oriented databases. authors: joseph fong. venue: acm sigmod record. year: 1997. Are Product A and Product B the same? No\n\nProduct A is title: concurrency control in hierarchical multidatabase systems. authors: henry f. korth , abraham silberschatz , sharad mehrotra. venue: vldb j.. year: 1997. Product B is title: dynamic load balancing in hierarchical parallel database systems. authors: luc bouganim , daniela florescu , patrick valduriez. venue: very large data bases. year: 1996. Are Product A and Product B the same? No\n\nProduct A is title: instance-based attribute identification in database integration. authors: roger h. l. chiang , ee-peng lim , chua eng huang cecil. venue: vldb j.. year: 2003. Product B is title: a case-based approach to information integration. authors: maurizio panti , luca spalazzi , alberto giretti. venue: very large data bases. year: 2000. Are Product A and Product B the same? No\n\nProduct A is title: answering queries using views : a survey. authors: alon y. halevy. venue: vldb j.. year: 2001. Product A is title: answering queries using views : a survey. authors: alon y. halevy. venue: international conference on management of data year: 2001.. Are Product A and Product B the same? No\n", "acm trans . database syst .": "Product A is title: tail recursion elimination in deductive databases. authors: kenneth a. ross. venue: acm trans . database syst .. year: 1996. Product B is title: tail recursion elimination in deductive databases. authors: kenneth a. ross. venue: acm transactions on database systems ( tods ). year: 1996. Are Product A and Product B the same? Yes\n\nProduct A is title: disjunctive datalog. authors: heikki mannila , thomas eiter , georg gottlob. venue: acm trans . database syst .. year: 1997. Product B is title: disjunctive datalog. authors: thomas eiter , georg gottlob , heikki mannila. venue: acm transactions on database systems ( tods ). year: 1997. Are Product A and Product B the same? Yes\n\nProduct A is title: space optimization in deductive databases. authors: s. sudarshan , divesh srivastava , raghu ramakrishnan , jeffrey f. naughton. venue: acm trans . database syst .. year: 1995. Product B is title: introduction to constraint databases. authors: bart kuijpers. venue: acm sigmod record. year: 2002. Are Product A and Product B the same? No\n\nProduct A is title: solving satisfiability and implication problems in database systems. authors: sha guo , mark allen weiss , wei sun. venue: acm trans . database syst .. year: 1996. Product B is title: temporal database system implementations. authors: michael h. b &#246; hlen. venue: acm sigmod record. year: 1995. Are Product A and Product B the same? No\n\nProduct A is title: disjunctive datalog. authors: heikki mannila , thomas eiter , georg gottlob. venue: acm trans . database syst .. year: 1997. Product B is title: disjunctive datalog. authors: heikki mannila , thomas eiter , georg gottlob. venue: acm sigmod record. year: 1997. Are Product A and Product B the same? No\n", "sigmod conference": "Product A is title: javax.xxl : a prototype for a library of query processing algorithms. authors: bernhard seeger , jens-peter dittrich , jochen van den bercken. venue: sigmod conference. year: 2000. Product B is title: javax.xxl : a prototype for a library of query processing algorithms. authors: jochen van den bercken , jens-peter dittrich , bernhard seeger. venue: international conference on management of data. year: 2000. Are Product A and Product B the same? Yes\n\nProduct A is title: orthogonal optimization of subqueries and aggregation. authors: milind joshi , césar a. galindo-legaria. venue: sigmod conference. year: 2001. Product B is title: orthogonal optimization of subqueries and aggregation. authors: c &#233; sar galindo-legaria , milind joshi. venue: international conference on management of data. year: 2001. Are Product A and Product B the same? Yes\n\nProduct A is title: from structured documents to novel query facilities. authors: sophie cluet , michel scholl , serge abiteboul , vassilis christophides. venue: sigmod conference. year: 1994. Product B is title: from structured documents to novel query facilities. authors: v. christophides , s. abiteboul , s. cluet , m. scholl. venue: international conference on management of data. year: 1994. Are Product A and Product B the same? Yes\n\nProduct A is title: the naos system. authors: christine collet , thierry coupaye. venue: sigmod conference. year: 1995. Product B is title: the naos system. authors: c. collet , t. coupaye. venue: international conference on management of data. year: 1995. Are Product A and Product B the same? Yes\n\nProduct A is title: highly concurrent cache consistency for indices in client-server database systems. authors: michael j. carey , markos zaharioudakis. venue: sigmod conference. year: 1997. Product B is title: index concurrency control in firm real-time database systems. authors: brajesh goyal , jayant r. haritsa , s. seshadri , v. srinivasan. venue: very large data bases. year: 1995. Are Product A and Product B the same? No\n\nProduct A is title: selectivity estimation in spatial databases. authors: viswanath poosala , sridhar ramaswamy , swarup acharya. venue: sigmod conference. year: 1999. Product B is title: selectivity estimation for spatio-temporal queries to moving objects. authors: yong-jin choi , chin-wan chung. venue: international conference on management of data. year: 2002. Are Product A and Product B the same? No\n\nProduct A is title: the need for distributed asynchronous transactions. authors: lyman do , prabhu ram , pamela drew. venue: sigmod conference. year: 1999. Product B is title: atomicity versus anonymity : distributed transactions for electronic commerce. authors: j. d. tygar. venue: very large data bases. year: 1998. Are Product A and Product B the same? No, because Product A venue: sigmod conference and Product B venue: very large data bases are not the same\n\nProduct A is title: query optimization in compressed database systems. authors: zhiyuan chen , flip korn , johannes gehrke. venue: sigmod conference. year: 2001. Product B is title: query caching and optimization in distributed mediator systems. authors: s. adali , k. s. candan , y. papakonstantinou , v. s. subrahmanian. venue: the vldb journal -- the international journal on very large data bases. year: 1996. Are Product A and Product B the same? No\n\nProduct A is title: orthogonal optimization of subqueries and aggregation. authors: milind joshi , césar a. galindo-legaria. venue: sigmod conference. year: 2001. Product B is title: orthogonal optimization of subqueries and aggregation. authors: milind joshi , césar a. galindo-legaria. venue: very large databases. year: 2001. Are Product A and Product B the same? No\n", }, f"{DATASET_PATH}/schema_matching/Synthea": { "person-person_id": "A is person-person_id. B is careplans-reasondescription. Are A and B the same? Yes or No? No\n\nA is person-person_id. B is patients-suffix. Are A and B the same? Yes or No? No\n\nA is person-person_id. B is patients-zip. Are A and B the same? Yes or No? No\n", "person-gender_concept_id": "A is person-gender_concept_id. B is patients-gender. Are A and B the same? Yes or No? Yes\n\nA is person-gender_concept_id. B is careplans-code. Are A and B the same? Yes or No? No\n\nA is person-gender_concept_id. B is organizations-utilization. Are A and B the same? Yes or No? No\n", "person-year_of_birth": "A is person-year_of_birth. B is patients-birthdate. Are A and B the same? Yes or No? Yes\n\nA is person-year_of_birth. B is immunizations-cost. Are A and B the same? Yes or No? No\n\nA is person-year_of_birth. B is procedures-date. Are A and B the same? Yes or No? No\n", "person-month_of_birth": "A is person-month_of_birth. B is patients-birthdate. Are A and B the same? Yes or No? Yes\n\nA is person-month_of_birth. B is careplans-encounter. Are A and B the same? Yes or No? No\n\nA is person-month_of_birth. B is imaging_studies-sop code. Are A and B the same? Yes or No? No\n", "person-day_of_birth": "A is person-day_of_birth. B is patients-birthdate. Are A and B the same? Yes or No? Yes\n\nA is person-day_of_birth. B is careplans-patient. Are A and B the same? Yes or No? No\n\nA is person-day_of_birth. B is observations-encounter. Are A and B the same? Yes or No? No\n", "person-birth_datetime": "A is person-birth_datetime. B is patients-birthdate. Are A and B the same? Yes or No? Yes\n\nA is person-birth_datetime. B is providers-address. Are A and B the same? Yes or No? No\n\nA is person-birth_datetime. B is careplans-reasondescription. Are A and B the same? Yes or No? No\n", "person-death_datetime": "A is person-death_datetime. B is patients-deathdate. Are A and B the same? Yes or No? Yes\n\nA is person-death_datetime. B is patients-id. Are A and B the same? Yes or No? No\n\nA is person-death_datetime. B is encounters-reasoncode. Are A and B the same? Yes or No? No\n", "person-race_concept_id": "A is person-race_concept_id. B is patients-race. Are A and B the same? Yes or No? Yes\n\nA is person-race_concept_id. B is imaging_studies-encounter. Are A and B the same? Yes or No? No\n\nA is person-race_concept_id. B is imaging_studies-id. Are A and B the same? Yes or No? No\n", "person-ethnicity_concept_id": "A is person-ethnicity_concept_id. B is patients-ethnicity. Are A and B the same? Yes or No? Yes\n\nA is person-ethnicity_concept_id. B is encounters-cost. Are A and B the same? Yes or No? No\n\nA is person-ethnicity_concept_id. B is patients-state. Are A and B the same? Yes or No? No\n", "person-location_id": "A is person-location_id. B is imaging_studies-id. Are A and B the same? Yes or No? No\n\nA is person-location_id. B is encounters-stop. Are A and B the same? Yes or No? No\n\nA is person-location_id. B is imaging_studies-date. Are A and B the same? Yes or No? No\n", "person-provider_id": "A is person-provider_id. B is providers-speciality. Are A and B the same? Yes or No? No\n\nA is person-provider_id. B is procedures-description. Are A and B the same? Yes or No? No\n\nA is person-provider_id. B is patients-race. Are A and B the same? Yes or No? No\n", "person-care_site_id": "A is person-care_site_id. B is patients-ssn. Are A and B the same? Yes or No? No\n\nA is person-care_site_id. B is careplans-reasondescription. Are A and B the same? Yes or No? No\n\nA is person-care_site_id. B is allergies-description. Are A and B the same? Yes or No? No\n", "person-person_source_value": "A is person-person_source_value. B is patients-id. Are A and B the same? Yes or No? Yes\n\nA is person-person_source_value. B is organizations-phone. Are A and B the same? Yes or No? No\n\nA is person-person_source_value. B is encounters-reasondescription. Are A and B the same? Yes or No? No\n", "person-gender_source_value": "A is person-gender_source_value. B is patients-gender. Are A and B the same? Yes or No? Yes\n\nA is person-gender_source_value. B is immunizations-description. Are A and B the same? Yes or No? No\n\nA is person-gender_source_value. B is providers-gender. Are A and B the same? Yes or No? No\n", "person-gender_source_concept_id": "A is person-gender_source_concept_id. B is patients-passport. Are A and B the same? Yes or No? No\n\nA is person-gender_source_concept_id. B is procedures-date. Are A and B the same? Yes or No? No\n\nA is person-gender_source_concept_id. B is patients-deathdate. Are A and B the same? Yes or No? No\n", "person-race_source_value": "A is person-race_source_value. B is patients-race. Are A and B the same? Yes or No? Yes\n\nA is person-race_source_value. B is observations-type. Are A and B the same? Yes or No? No\n\nA is person-race_source_value. B is patients-id. Are A and B the same? Yes or No? No\n", "person-race_source_concept_id": "A is person-race_source_concept_id. B is imaging_studies-sop description. Are A and B the same? Yes or No? No\n\nA is person-race_source_concept_id. B is careplans-code. Are A and B the same? Yes or No? No\n\nA is person-race_source_concept_id. B is observations-code. Are A and B the same? Yes or No? No\n", "person-ethnicity_source_value": "A is person-ethnicity_source_value. B is patients-ethnicity. Are A and B the same? Yes or No? Yes\n\nA is person-ethnicity_source_value. B is observations-code. Are A and B the same? Yes or No? No\n\nA is person-ethnicity_source_value. B is allergies-stop. Are A and B the same? Yes or No? No\n", "person-ethnicity_source_concept_id": "A is person-ethnicity_source_concept_id. B is patients-state. Are A and B the same? Yes or No? No\n\nA is person-ethnicity_source_concept_id. B is careplans-id. Are A and B the same? Yes or No? No\n\nA is person-ethnicity_source_concept_id. B is careplans-patient. Are A and B the same? Yes or No? No\n", "visit_occurrence-visit_occurrence_id": "A is visit_occurrence-visit_occurrence_id. B is careplans-encounter. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_occurrence_id. B is encounters-provider. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_occurrence_id. B is encounters-code. Are A and B the same? Yes or No? No\n", "visit_occurrence-person_id": "A is visit_occurrence-person_id. B is encounters-patient. Are A and B the same? Yes or No? Yes\n\nA is visit_occurrence-person_id. B is imaging_studies-modality code. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-person_id. B is patients-deathdate. Are A and B the same? Yes or No? No\n", "visit_occurrence-visit_concept_id": "A is visit_occurrence-visit_concept_id. B is encounters-encounterclass. Are A and B the same? Yes or No? Yes\n\nA is visit_occurrence-visit_concept_id. B is careplans-reasondescription. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_concept_id. B is allergies-description. Are A and B the same? Yes or No? No\n", "visit_occurrence-visit_start_date": "A is visit_occurrence-visit_start_date. B is encounters-start. Are A and B the same? Yes or No? Yes\n\nA is visit_occurrence-visit_start_date. B is observations-code. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_start_date. B is patients-id. Are A and B the same? Yes or No? No\n", "visit_occurrence-visit_start_datetime": "A is visit_occurrence-visit_start_datetime. B is encounters-start. Are A and B the same? Yes or No? Yes\n\nA is visit_occurrence-visit_start_datetime. B is encounters-code. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_start_datetime. B is procedures-encounter. Are A and B the same? Yes or No? No\n", "visit_occurrence-visit_end_date": "A is visit_occurrence-visit_end_date. B is encounters-stop. Are A and B the same? Yes or No? Yes\n\nA is visit_occurrence-visit_end_date. B is organizations-id. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_end_date. B is patients-drivers. Are A and B the same? Yes or No? No\n", "visit_occurrence-visit_end_datetime": "A is visit_occurrence-visit_end_datetime. B is encounters-stop. Are A and B the same? Yes or No? Yes\n\nA is visit_occurrence-visit_end_datetime. B is encounters-provider. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_end_datetime. B is organizations-name. Are A and B the same? Yes or No? No\n", "visit_occurrence-visit_type_concept_id": "A is visit_occurrence-visit_type_concept_id. B is providers-address. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_type_concept_id. B is medications-stop. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_type_concept_id. B is careplans-reasoncode. Are A and B the same? Yes or No? No\n", "visit_occurrence-provider_id": "A is visit_occurrence-provider_id. B is medications-reasoncode. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-provider_id. B is patients-zip. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-provider_id. B is medications-start. Are A and B the same? Yes or No? No\n", "visit_occurrence-care_site_id": "A is visit_occurrence-care_site_id. B is imaging_studies-modality description. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-care_site_id. B is careplans-stop. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-care_site_id. B is providers-name. Are A and B the same? Yes or No? No\n", "visit_occurrence-visit_source_value": "A is visit_occurrence-visit_source_value. B is encounters-encounterclass. Are A and B the same? Yes or No? Yes\n\nA is visit_occurrence-visit_source_value. B is organizations-phone. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_source_value. B is conditions-patient. Are A and B the same? Yes or No? No\n", "visit_occurrence-visit_source_concept_id": "A is visit_occurrence-visit_source_concept_id. B is observations-value. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_source_concept_id. B is careplans-encounter. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-visit_source_concept_id. B is providers-state. Are A and B the same? Yes or No? No\n", "visit_occurrence-admitted_from_concept_id": "A is visit_occurrence-admitted_from_concept_id. B is careplans-start. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-admitted_from_concept_id. B is providers-state. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-admitted_from_concept_id. B is patients-address. Are A and B the same? Yes or No? No\n", "visit_occurrence-admitted_from_source_value": "A is visit_occurrence-admitted_from_source_value. B is providers-city. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-admitted_from_source_value. B is immunizations-description. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-admitted_from_source_value. B is conditions-patient. Are A and B the same? Yes or No? No\n", "visit_occurrence-discharge_to_concept_id": "A is visit_occurrence-discharge_to_concept_id. B is allergies-start. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-discharge_to_concept_id. B is allergies-code. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-discharge_to_concept_id. B is patients-state. Are A and B the same? Yes or No? No\n", "visit_occurrence-discharge_to_source_value": "A is visit_occurrence-discharge_to_source_value. B is medications-start. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-discharge_to_source_value. B is providers-address. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-discharge_to_source_value. B is allergies-stop. Are A and B the same? Yes or No? No\n", "visit_occurrence-preceding_visit_occurrence_id": "A is visit_occurrence-preceding_visit_occurrence_id. B is patients-marital. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-preceding_visit_occurrence_id. B is careplans-patient. Are A and B the same? Yes or No? No\n\nA is visit_occurrence-preceding_visit_occurrence_id. B is providers-address. Are A and B the same? Yes or No? No\n", "visit_detail-visit_detail_id": "A is visit_detail-visit_detail_id. B is providers-city. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_id. B is careplans-reasondescription. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_id. B is medications-description. Are A and B the same? Yes or No? No\n", "visit_detail-person_id": "A is visit_detail-person_id. B is organizations-city. Are A and B the same? Yes or No? No\n\nA is visit_detail-person_id. B is procedures-date. Are A and B the same? Yes or No? No\n\nA is visit_detail-person_id. B is organizations-state. Are A and B the same? Yes or No? No\n", "visit_detail-visit_detail_concept_id": "A is visit_detail-visit_detail_concept_id. B is immunizations-code. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_concept_id. B is encounters-provider. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_concept_id. B is careplans-reasondescription. Are A and B the same? Yes or No? No\n", "visit_detail-visit_detail_start_date": "A is visit_detail-visit_detail_start_date. B is conditions-description. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_start_date. B is imaging_studies-modality code. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_start_date. B is patients-id. Are A and B the same? Yes or No? No\n", "visit_detail-visit_detail_start_datetime": "A is visit_detail-visit_detail_start_datetime. B is medications-encounter. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_start_datetime. B is patients-ethnicity. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_start_datetime. B is encounters-provider. Are A and B the same? Yes or No? No\n", "visit_detail-visit_detail_end_date": "A is visit_detail-visit_detail_end_date. B is procedures-encounter. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_end_date. B is medications-reasondescription. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_end_date. B is patients-gender. Are A and B the same? Yes or No? No\n", "visit_detail-visit_detail_end_datetime": "A is visit_detail-visit_detail_end_datetime. B is encounters-start. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_end_datetime. B is patients-state. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_end_datetime. B is patients-passport. Are A and B the same? Yes or No? No\n", "visit_detail-visit_detail_type_concept_id": "A is visit_detail-visit_detail_type_concept_id. B is immunizations-description. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_type_concept_id. B is organizations-name. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_type_concept_id. B is medications-reasondescription. Are A and B the same? Yes or No? No\n", "visit_detail-provider_id": "A is visit_detail-provider_id. B is patients-first. Are A and B the same? Yes or No? No\n\nA is visit_detail-provider_id. B is organizations-address. Are A and B the same? Yes or No? No\n\nA is visit_detail-provider_id. B is imaging_studies-sop description. Are A and B the same? Yes or No? No\n", "visit_detail-care_site_id": "A is visit_detail-care_site_id. B is providers-name. Are A and B the same? Yes or No? No\n\nA is visit_detail-care_site_id. B is patients-passport. Are A and B the same? Yes or No? No\n\nA is visit_detail-care_site_id. B is providers-organization. Are A and B the same? Yes or No? No\n", "visit_detail-visit_detail_source_value": "A is visit_detail-visit_detail_source_value. B is immunizations-patient. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_source_value. B is careplans-reasondescription. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_source_value. B is providers-address. Are A and B the same? Yes or No? No\n", "visit_detail-visit_detail_source_concept_id": "A is visit_detail-visit_detail_source_concept_id. B is encounters-stop. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_source_concept_id. B is patients-race. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_source_concept_id. B is medications-description. Are A and B the same? Yes or No? No\n", "visit_detail-admitted_from_source_value": "A is visit_detail-admitted_from_source_value. B is organizations-city. Are A and B the same? Yes or No? No\n\nA is visit_detail-admitted_from_source_value. B is observations-date. Are A and B the same? Yes or No? No\n\nA is visit_detail-admitted_from_source_value. B is procedures-patient. Are A and B the same? Yes or No? No\n", "visit_detail-admitted_from_concept_id": "A is visit_detail-admitted_from_concept_id. B is allergies-stop. Are A and B the same? Yes or No? No\n\nA is visit_detail-admitted_from_concept_id. B is medications-code. Are A and B the same? Yes or No? No\n\nA is visit_detail-admitted_from_concept_id. B is procedures-date. Are A and B the same? Yes or No? No\n", "visit_detail-discharge_to_source_value": "A is visit_detail-discharge_to_source_value. B is encounters-id. Are A and B the same? Yes or No? No\n\nA is visit_detail-discharge_to_source_value. B is providers-organization. Are A and B the same? Yes or No? No\n\nA is visit_detail-discharge_to_source_value. B is encounters-provider. Are A and B the same? Yes or No? No\n", "visit_detail-discharge_to_concept_id": "A is visit_detail-discharge_to_concept_id. B is patients-id. Are A and B the same? Yes or No? No\n\nA is visit_detail-discharge_to_concept_id. B is immunizations-code. Are A and B the same? Yes or No? No\n\nA is visit_detail-discharge_to_concept_id. B is immunizations-cost. Are A and B the same? Yes or No? No\n", "visit_detail-preceding_visit_detail_id": "A is visit_detail-preceding_visit_detail_id. B is medications-stop. Are A and B the same? Yes or No? No\n\nA is visit_detail-preceding_visit_detail_id. B is allergies-patient. Are A and B the same? Yes or No? No\n\nA is visit_detail-preceding_visit_detail_id. B is observations-units. Are A and B the same? Yes or No? No\n", "visit_detail-visit_detail_parent_id": "A is visit_detail-visit_detail_parent_id. B is allergies-stop. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_parent_id. B is allergies-code. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_detail_parent_id. B is encounters-start. Are A and B the same? Yes or No? No\n", "visit_detail-visit_occurrence_id": "A is visit_detail-visit_occurrence_id. B is providers-utilization. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_occurrence_id. B is procedures-reasoncode. Are A and B the same? Yes or No? No\n\nA is visit_detail-visit_occurrence_id. B is patients-passport. Are A and B the same? Yes or No? No\n", "specimen-specimen_id": "A is specimen-specimen_id. B is imaging_studies-body site code. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_id. B is providers-name. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_id. B is imaging_studies-modality description. Are A and B the same? Yes or No? No\n", "specimen-person_id": "A is specimen-person_id. B is patients-ethnicity. Are A and B the same? Yes or No? No\n\nA is specimen-person_id. B is observations-type. Are A and B the same? Yes or No? No\n\nA is specimen-person_id. B is conditions-description. Are A and B the same? Yes or No? No\n", "specimen-specimen_concept_id": "A is specimen-specimen_concept_id. B is patients-ethnicity. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_concept_id. B is encounters-reasondescription. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_concept_id. B is providers-name. Are A and B the same? Yes or No? No\n", "specimen-specimen_type_concept_id": "A is specimen-specimen_type_concept_id. B is immunizations-description. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_type_concept_id. B is patients-passport. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_type_concept_id. B is observations-code. Are A and B the same? Yes or No? No\n", "specimen-specimen_date": "A is specimen-specimen_date. B is careplans-description. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_date. B is patients-state. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_date. B is organizations-state. Are A and B the same? Yes or No? No\n", "specimen-specimen_datetime": "A is specimen-specimen_datetime. B is organizations-zip. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_datetime. B is patients-deathdate. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_datetime. B is providers-id. Are A and B the same? Yes or No? No\n", "specimen-quantity": "A is specimen-quantity. B is patients-first. Are A and B the same? Yes or No? No\n\nA is specimen-quantity. B is careplans-id. Are A and B the same? Yes or No? No\n\nA is specimen-quantity. B is encounters-reasondescription. Are A and B the same? Yes or No? No\n", "specimen-unit_concept_id": "A is specimen-unit_concept_id. B is imaging_studies-body site description. Are A and B the same? Yes or No? No\n\nA is specimen-unit_concept_id. B is patients-passport. Are A and B the same? Yes or No? No\n\nA is specimen-unit_concept_id. B is patients-state. Are A and B the same? Yes or No? No\n", "specimen-anatomic_site_concept_id": "A is specimen-anatomic_site_concept_id. B is observations-patient. Are A and B the same? Yes or No? No\n\nA is specimen-anatomic_site_concept_id. B is medications-description. Are A and B the same? Yes or No? No\n\nA is specimen-anatomic_site_concept_id. B is organizations-address. Are A and B the same? Yes or No? No\n", "specimen-disease_status_concept_id": "A is specimen-disease_status_concept_id. B is organizations-utilization. Are A and B the same? Yes or No? No\n\nA is specimen-disease_status_concept_id. B is observations-code. Are A and B the same? Yes or No? No\n\nA is specimen-disease_status_concept_id. B is encounters-description. Are A and B the same? Yes or No? No\n", "specimen-specimen_source_id": "A is specimen-specimen_source_id. B is immunizations-encounter. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_source_id. B is observations-date. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_source_id. B is imaging_studies-modality description. Are A and B the same? Yes or No? No\n", "specimen-specimen_source_value": "A is specimen-specimen_source_value. B is patients-suffix. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_source_value. B is imaging_studies-date. Are A and B the same? Yes or No? No\n\nA is specimen-specimen_source_value. B is organizations-id. Are A and B the same? Yes or No? No\n", "specimen-unit_source_value": "A is specimen-unit_source_value. B is procedures-reasoncode. Are A and B the same? Yes or No? No\n\nA is specimen-unit_source_value. B is organizations-name. Are A and B the same? Yes or No? No\n\nA is specimen-unit_source_value. B is imaging_studies-body site code. Are A and B the same? Yes or No? No\n", "specimen-anatomic_site_source_value": "A is specimen-anatomic_site_source_value. B is encounters-patient. Are A and B the same? Yes or No? No\n\nA is specimen-anatomic_site_source_value. B is providers-state. Are A and B the same? Yes or No? No\n\nA is specimen-anatomic_site_source_value. B is providers-city. Are A and B the same? Yes or No? No\n", "specimen-disease_status_source_value": "A is specimen-disease_status_source_value. B is procedures-date. Are A and B the same? Yes or No? No\n\nA is specimen-disease_status_source_value. B is providers-gender. Are A and B the same? Yes or No? No\n\nA is specimen-disease_status_source_value. B is careplans-description. Are A and B the same? Yes or No? No\n", "measurement-measurement_id": "A is measurement-measurement_id. B is careplans-id. Are A and B the same? Yes or No? No\n\nA is measurement-measurement_id. B is observations-encounter. Are A and B the same? Yes or No? No\n\nA is measurement-measurement_id. B is careplans-encounter. Are A and B the same? Yes or No? No\n", "measurement-person_id": "A is measurement-person_id. B is observations-patient. Are A and B the same? Yes or No? Yes\n\nA is measurement-person_id. B is careplans-reasoncode. Are A and B the same? Yes or No? No\n\nA is measurement-person_id. B is careplans-reasondescription. Are A and B the same? Yes or No? No\n", "measurement-measurement_concept_id": "A is measurement-measurement_concept_id. B is procedures-code. Are A and B the same? Yes or No? Yes\n\nA is measurement-measurement_concept_id. B is patients-address. Are A and B the same? Yes or No? No\n\nA is measurement-measurement_concept_id. B is imaging_studies-sop code. Are A and B the same? Yes or No? No\n", "measurement-measurement_date": "A is measurement-measurement_date. B is observations-date. Are A and B the same? Yes or No? Yes\n\nA is measurement-measurement_date. B is patients-ethnicity. Are A and B the same? Yes or No? No\n\nA is measurement-measurement_date. B is allergies-description. Are A and B the same? Yes or No? No\n", "measurement-measurement_datetime": "A is measurement-measurement_datetime. B is procedures-date. Are A and B the same? Yes or No? Yes\n\nA is measurement-measurement_datetime. B is medications-stop. Are A and B the same? Yes or No? No\n\nA is measurement-measurement_datetime. B is encounters-start. Are A and B the same? Yes or No? No\n", "measurement-measurement_time": "A is measurement-measurement_time. B is procedures-date. Are A and B the same? Yes or No? Yes\n\nA is measurement-measurement_time. B is medications-patient. Are A and B the same? Yes or No? No\n\nA is measurement-measurement_time. B is observations-type. Are A and B the same? Yes or No? No\n", "measurement-measurement_type_concept_id": "A is measurement-measurement_type_concept_id. B is medications-encounter. Are A and B the same? Yes or No? No\n\nA is measurement-measurement_type_concept_id. B is imaging_studies-date. Are A and B the same? Yes or No? No\n\nA is measurement-measurement_type_concept_id. B is conditions-code. Are A and B the same? Yes or No? No\n", "measurement-operator_concept_id": "A is measurement-operator_concept_id. B is providers-state. Are A and B the same? Yes or No? No\n\nA is measurement-operator_concept_id. B is patients-deathdate. Are A and B the same? Yes or No? No\n\nA is measurement-operator_concept_id. B is medications-reasondescription. Are A and B the same? Yes or No? No\n", "measurement-value_as_number": "A is measurement-value_as_number. B is observations-value. Are A and B the same? Yes or No? Yes\n\nA is measurement-value_as_number. B is imaging_studies-sop code. Are A and B the same? Yes or No? No\n\nA is measurement-value_as_number. B is procedures-description. Are A and B the same? Yes or No? No\n", "measurement-value_as_concept_id": "A is measurement-value_as_concept_id. B is immunizations-date. Are A and B the same? Yes or No? No\n\nA is measurement-value_as_concept_id. B is patients-deathdate. Are A and B the same? Yes or No? No\n\nA is measurement-value_as_concept_id. B is organizations-utilization. Are A and B the same? Yes or No? No\n", "measurement-unit_concept_id": "A is measurement-unit_concept_id. B is observations-units. Are A and B the same? Yes or No? No\n\nA is measurement-unit_concept_id. B is medications-stop. Are A and B the same? Yes or No? No\n\nA is measurement-unit_concept_id. B is imaging_studies-encounter. Are A and B the same? Yes or No? No\n", "measurement-range_low": "A is measurement-range_low. B is patients-zip. Are A and B the same? Yes or No? No\n\nA is measurement-range_low. B is organizations-utilization. Are A and B the same? Yes or No? No\n\nA is measurement-range_low. B is encounters-description. Are A and B the same? Yes or No? No\n", "measurement-range_high": "A is measurement-range_high. B is procedures-reasoncode. Are A and B the same? Yes or No? No\n\nA is measurement-range_high. B is medications-stop. Are A and B the same? Yes or No? No\n\nA is measurement-range_high. B is allergies-patient. Are A and B the same? Yes or No? No\n", "measurement-provider_id": "A is measurement-provider_id. B is observations-code. Are A and B the same? Yes or No? No\n\nA is measurement-provider_id. B is observations-description. Are A and B the same? Yes or No? No\n\nA is measurement-provider_id. B is providers-organization. Are A and B the same? Yes or No? No\n", "measurement-visit_occurrence_id": "A is measurement-visit_occurrence_id. B is observations-encounter. Are A and B the same? Yes or No? Yes\n\nA is measurement-visit_occurrence_id. B is organizations-zip. Are A and B the same? Yes or No? No\n\nA is measurement-visit_occurrence_id. B is encounters-encounterclass. Are A and B the same? Yes or No? No\n", "measurement-visit_detail_id": "A is measurement-visit_detail_id. B is allergies-start. Are A and B the same? Yes or No? No\n\nA is measurement-visit_detail_id. B is imaging_studies-id. Are A and B the same? Yes or No? No\n\nA is measurement-visit_detail_id. B is organizations-utilization. Are A and B the same? Yes or No? No\n", "measurement-measurement_source_value": "A is measurement-measurement_source_value. B is observations-code. Are A and B the same? Yes or No? Yes\n\nA is measurement-measurement_source_value. B is patients-last. Are A and B the same? Yes or No? No\n\nA is measurement-measurement_source_value. B is organizations-phone. Are A and B the same? Yes or No? No\n", "measurement-measurement_source_concept_id": "A is measurement-measurement_source_concept_id. B is procedures-code. Are A and B the same? Yes or No? Yes\n\nA is measurement-measurement_source_concept_id. B is immunizations-code. Are A and B the same? Yes or No? No\n\nA is measurement-measurement_source_concept_id. B is procedures-date. Are A and B the same? Yes or No? No\n", "measurement-unit_source_value": "A is measurement-unit_source_value. B is observations-units. Are A and B the same? Yes or No? Yes\n\nA is measurement-unit_source_value. B is patients-maiden. Are A and B the same? Yes or No? No\n\nA is measurement-unit_source_value. B is medications-stop. Are A and B the same? Yes or No? No\n", "measurement-value_source_value": "A is measurement-value_source_value. B is observations-value. Are A and B the same? Yes or No? Yes\n\nA is measurement-value_source_value. B is encounters-id. Are A and B the same? Yes or No? No\n\nA is measurement-value_source_value. B is observations-encounter. Are A and B the same? Yes or No? No\n", "procedure_occurrence-procedure_occurrence_id": "A is procedure_occurrence-procedure_occurrence_id. B is patients-first. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-procedure_occurrence_id. B is observations-code. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-procedure_occurrence_id. B is medications-stop. Are A and B the same? Yes or No? No\n", "procedure_occurrence-person_id": "A is procedure_occurrence-person_id. B is procedures-patient. Are A and B the same? Yes or No? Yes\n\nA is procedure_occurrence-person_id. B is careplans-code. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-person_id. B is conditions-start. Are A and B the same? Yes or No? No\n", "procedure_occurrence-procedure_concept_id": "A is procedure_occurrence-procedure_concept_id. B is procedures-code. Are A and B the same? Yes or No? Yes\n\nA is procedure_occurrence-procedure_concept_id. B is organizations-utilization. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-procedure_concept_id. B is organizations-name. Are A and B the same? Yes or No? No\n", "procedure_occurrence-procedure_date": "A is procedure_occurrence-procedure_date. B is procedures-date. Are A and B the same? Yes or No? Yes\n\nA is procedure_occurrence-procedure_date. B is observations-code. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-procedure_date. B is careplans-encounter. Are A and B the same? Yes or No? No\n", "procedure_occurrence-procedure_datetime": "A is procedure_occurrence-procedure_datetime. B is procedures-date. Are A and B the same? Yes or No? Yes\n\nA is procedure_occurrence-procedure_datetime. B is observations-type. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-procedure_datetime. B is allergies-encounter. Are A and B the same? Yes or No? No\n", "procedure_occurrence-procedure_type_concept_id": "A is procedure_occurrence-procedure_type_concept_id. B is providers-speciality. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-procedure_type_concept_id. B is medications-description. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-procedure_type_concept_id. B is procedures-reasoncode. Are A and B the same? Yes or No? No\n", "procedure_occurrence-modifier_concept_id": "A is procedure_occurrence-modifier_concept_id. B is patients-passport. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-modifier_concept_id. B is encounters-reasoncode. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-modifier_concept_id. B is patients-prefix. Are A and B the same? Yes or No? No\n", "procedure_occurrence-quantity": "A is procedure_occurrence-quantity. B is encounters-reasondescription. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-quantity. B is providers-state. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-quantity. B is immunizations-cost. Are A and B the same? Yes or No? No\n", "procedure_occurrence-provider_id": "A is procedure_occurrence-provider_id. B is allergies-description. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-provider_id. B is immunizations-patient. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-provider_id. B is patients-suffix. Are A and B the same? Yes or No? No\n", "procedure_occurrence-visit_occurrence_id": "A is procedure_occurrence-visit_occurrence_id. B is procedures-encounter. Are A and B the same? Yes or No? Yes\n\nA is procedure_occurrence-visit_occurrence_id. B is observations-patient. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-visit_occurrence_id. B is observations-type. Are A and B the same? Yes or No? No\n", "procedure_occurrence-visit_detail_id": "A is procedure_occurrence-visit_detail_id. B is medications-code. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-visit_detail_id. B is conditions-start. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-visit_detail_id. B is imaging_studies-sop code. Are A and B the same? Yes or No? No\n", "procedure_occurrence-procedure_source_value": "A is procedure_occurrence-procedure_source_value. B is procedures-code. Are A and B the same? Yes or No? Yes\n\nA is procedure_occurrence-procedure_source_value. B is conditions-description. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-procedure_source_value. B is patients-address. Are A and B the same? Yes or No? No\n", "procedure_occurrence-procedure_source_concept_id": "A is procedure_occurrence-procedure_source_concept_id. B is procedures-code. Are A and B the same? Yes or No? Yes\n\nA is procedure_occurrence-procedure_source_concept_id. B is organizations-zip. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-procedure_source_concept_id. B is careplans-id. Are A and B the same? Yes or No? No\n", "procedure_occurrence-modifier_source_value": "A is procedure_occurrence-modifier_source_value. B is imaging_studies-body site description. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-modifier_source_value. B is medications-stop. Are A and B the same? Yes or No? No\n\nA is procedure_occurrence-modifier_source_value. B is allergies-start. Are A and B the same? Yes or No? No\n", "condition_occurrence-condition_occurrence_id": "A is condition_occurrence-condition_occurrence_id. B is immunizations-encounter. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_occurrence_id. B is patients-maiden. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_occurrence_id. B is observations-code. Are A and B the same? Yes or No? No\n", "condition_occurrence-person_id": "A is condition_occurrence-person_id. B is conditions-patient. Are A and B the same? Yes or No? Yes\n\nA is condition_occurrence-person_id. B is patients-passport. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-person_id. B is imaging_studies-patient. Are A and B the same? Yes or No? No\n", "condition_occurrence-condition_concept_id": "A is condition_occurrence-condition_concept_id. B is conditions-code. Are A and B the same? Yes or No? Yes\n\nA is condition_occurrence-condition_concept_id. B is patients-deathdate. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_concept_id. B is careplans-encounter. Are A and B the same? Yes or No? No\n", "condition_occurrence-condition_start_date": "A is condition_occurrence-condition_start_date. B is conditions-start. Are A and B the same? Yes or No? Yes\n\nA is condition_occurrence-condition_start_date. B is providers-address. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_start_date. B is observations-date. Are A and B the same? Yes or No? No\n", "condition_occurrence-condition_start_datetime": "A is condition_occurrence-condition_start_datetime. B is conditions-start. Are A and B the same? Yes or No? Yes\n\nA is condition_occurrence-condition_start_datetime. B is careplans-code. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_start_datetime. B is providers-city. Are A and B the same? Yes or No? No\n", "condition_occurrence-condition_end_date": "A is condition_occurrence-condition_end_date. B is conditions-stop. Are A and B the same? Yes or No? Yes\n\nA is condition_occurrence-condition_end_date. B is imaging_studies-date. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_end_date. B is immunizations-date. Are A and B the same? Yes or No? No\n", "condition_occurrence-condition_end_datetime": "A is condition_occurrence-condition_end_datetime. B is conditions-stop. Are A and B the same? Yes or No? Yes\n\nA is condition_occurrence-condition_end_datetime. B is providers-utilization. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_end_datetime. B is imaging_studies-modality description. Are A and B the same? Yes or No? No\n", "condition_occurrence-condition_type_concept_id": "A is condition_occurrence-condition_type_concept_id. B is allergies-patient. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_type_concept_id. B is medications-reasondescription. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_type_concept_id. B is encounters-encounterclass. Are A and B the same? Yes or No? No\n", "condition_occurrence-condition_status_concept_id": "A is condition_occurrence-condition_status_concept_id. B is patients-birthplace. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_status_concept_id. B is organizations-name. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_status_concept_id. B is encounters-patient. Are A and B the same? Yes or No? No\n", "condition_occurrence-stop_reason": "A is condition_occurrence-stop_reason. B is organizations-zip. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-stop_reason. B is providers-address. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-stop_reason. B is conditions-start. Are A and B the same? Yes or No? No\n", "condition_occurrence-provider_id": "A is condition_occurrence-provider_id. B is careplans-code. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-provider_id. B is patients-prefix. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-provider_id. B is patients-state. Are A and B the same? Yes or No? No\n", "condition_occurrence-visit_occurrence_id": "A is condition_occurrence-visit_occurrence_id. B is conditions-encounter. Are A and B the same? Yes or No? Yes\n\nA is condition_occurrence-visit_occurrence_id. B is imaging_studies-body site code. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-visit_occurrence_id. B is providers-gender. Are A and B the same? Yes or No? No\n", "condition_occurrence-visit_detail_id": "A is condition_occurrence-visit_detail_id. B is encounters-patient. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-visit_detail_id. B is imaging_studies-id. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-visit_detail_id. B is observations-date. Are A and B the same? Yes or No? No\n", "condition_occurrence-condition_source_value": "A is condition_occurrence-condition_source_value. B is conditions-code. Are A and B the same? Yes or No? Yes\n\nA is condition_occurrence-condition_source_value. B is providers-utilization. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_source_value. B is immunizations-description. Are A and B the same? Yes or No? No\n", "condition_occurrence-condition_source_concept_id": "A is condition_occurrence-condition_source_concept_id. B is conditions-code. Are A and B the same? Yes or No? Yes\n\nA is condition_occurrence-condition_source_concept_id. B is encounters-start. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_source_concept_id. B is medications-stop. Are A and B the same? Yes or No? No\n", "condition_occurrence-condition_status_source_value": "A is condition_occurrence-condition_status_source_value. B is patients-ethnicity. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_status_source_value. B is patients-first. Are A and B the same? Yes or No? No\n\nA is condition_occurrence-condition_status_source_value. B is providers-state. Are A and B the same? Yes or No? No\n", "death-person_id": "A is death-person_id. B is patients-deathdate. Are A and B the same? Yes or No? No\n\nA is death-person_id. B is organizations-address. Are A and B the same? Yes or No? No\n\nA is death-person_id. B is allergies-patient. Are A and B the same? Yes or No? No\n", "death-death_date": "A is death-death_date. B is organizations-utilization. Are A and B the same? Yes or No? No\n\nA is death-death_date. B is procedures-patient. Are A and B the same? Yes or No? No\n\nA is death-death_date. B is conditions-code. Are A and B the same? Yes or No? No\n", "death-death_datetime": "A is death-death_datetime. B is organizations-state. Are A and B the same? Yes or No? No\n\nA is death-death_datetime. B is imaging_studies-sop code. Are A and B the same? Yes or No? No\n\nA is death-death_datetime. B is patients-drivers. Are A and B the same? Yes or No? No\n", "death-death_type_concept_id": "A is death-death_type_concept_id. B is patients-id. Are A and B the same? Yes or No? No\n\nA is death-death_type_concept_id. B is procedures-encounter. Are A and B the same? Yes or No? No\n\nA is death-death_type_concept_id. B is imaging_studies-date. Are A and B the same? Yes or No? No\n", "death-cause_concept_id": "A is death-cause_concept_id. B is conditions-encounter. Are A and B the same? Yes or No? No\n\nA is death-cause_concept_id. B is imaging_studies-body site description. Are A and B the same? Yes or No? No\n\nA is death-cause_concept_id. B is immunizations-cost. Are A and B the same? Yes or No? No\n", "death-cause_source_value": "A is death-cause_source_value. B is patients-race. Are A and B the same? Yes or No? No\n\nA is death-cause_source_value. B is procedures-reasondescription. Are A and B the same? Yes or No? No\n\nA is death-cause_source_value. B is allergies-code. Are A and B the same? Yes or No? No\n", "death-cause_source_concept_id": "A is death-cause_source_concept_id. B is encounters-reasoncode. Are A and B the same? Yes or No? No\n\nA is death-cause_source_concept_id. B is careplans-encounter. Are A and B the same? Yes or No? No\n\nA is death-cause_source_concept_id. B is providers-gender. Are A and B the same? Yes or No? No\n", "location-location_id": "A is location-location_id. B is patients-passport. Are A and B the same? Yes or No? No\n\nA is location-location_id. B is encounters-start. Are A and B the same? Yes or No? No\n\nA is location-location_id. B is observations-type. Are A and B the same? Yes or No? No\n", "location-address_1": "A is location-address_1. B is allergies-code. Are A and B the same? Yes or No? No\n\nA is location-address_1. B is organizations-name. Are A and B the same? Yes or No? No\n\nA is location-address_1. B is imaging_studies-modality code. Are A and B the same? Yes or No? No\n", "location-address_2": "A is location-address_2. B is imaging_studies-body site code. Are A and B the same? Yes or No? No\n\nA is location-address_2. B is conditions-code. Are A and B the same? Yes or No? No\n\nA is location-address_2. B is providers-gender. Are A and B the same? Yes or No? No\n", "location-city": "A is location-city. B is patients-ssn. Are A and B the same? Yes or No? No\n\nA is location-city. B is patients-gender. Are A and B the same? Yes or No? No\n\nA is location-city. B is imaging_studies-sop description. Are A and B the same? Yes or No? No\n", "location-state": "A is location-state. B is encounters-provider. Are A and B the same? Yes or No? No\n\nA is location-state. B is providers-zip. Are A and B the same? Yes or No? No\n\nA is location-state. B is patients-zip. Are A and B the same? Yes or No? No\n", "location-zip": "A is location-zip. B is careplans-reasoncode. Are A and B the same? Yes or No? No\n\nA is location-zip. B is patients-marital. Are A and B the same? Yes or No? No\n\nA is location-zip. B is patients-ssn. Are A and B the same? Yes or No? No\n", "location-county": "A is location-county. B is conditions-patient. Are A and B the same? Yes or No? No\n\nA is location-county. B is encounters-cost. Are A and B the same? Yes or No? No\n\nA is location-county. B is patients-gender. Are A and B the same? Yes or No? No\n", "location-location_source_value": "A is location-location_source_value. B is patients-zip. Are A and B the same? Yes or No? No\n\nA is location-location_source_value. B is patients-city. Are A and B the same? Yes or No? No\n\nA is location-location_source_value. B is medications-encounter. Are A and B the same? Yes or No? No\n", "observation-observation_id": "A is observation-observation_id. B is procedures-encounter. Are A and B the same? Yes or No? No\n\nA is observation-observation_id. B is patients-deathdate. Are A and B the same? Yes or No? No\n\nA is observation-observation_id. B is medications-encounter. Are A and B the same? Yes or No? No\n", "observation-person_id": "A is observation-person_id. B is allergies-patient. Are A and B the same? Yes or No? Yes\n\nA is observation-person_id. B is encounters-start. Are A and B the same? Yes or No? No\n\nA is observation-person_id. B is allergies-description. Are A and B the same? Yes or No? No\n", "observation-observation_concept_id": "A is observation-observation_concept_id. B is conditions-code. Are A and B the same? Yes or No? Yes\n\nA is observation-observation_concept_id. B is providers-name. Are A and B the same? Yes or No? No\n\nA is observation-observation_concept_id. B is providers-organization. Are A and B the same? Yes or No? No\n", "observation-observation_date": "A is observation-observation_date. B is allergies-start. Are A and B the same? Yes or No? Yes\n\nA is observation-observation_date. B is careplans-start. Are A and B the same? Yes or No? No\n\nA is observation-observation_date. B is medications-cost. Are A and B the same? Yes or No? No\n", "observation-observation_datetime": "A is observation-observation_datetime. B is allergies-start. Are A and B the same? Yes or No? Yes\n\nA is observation-observation_datetime. B is conditions-encounter. Are A and B the same? Yes or No? No\n\nA is observation-observation_datetime. B is medications-description. Are A and B the same? Yes or No? No\n", "observation-observation_type_concept_id": "A is observation-observation_type_concept_id. B is encounters-code. Are A and B the same? Yes or No? No\n\nA is observation-observation_type_concept_id. B is observations-code. Are A and B the same? Yes or No? No\n\nA is observation-observation_type_concept_id. B is providers-name. Are A and B the same? Yes or No? No\n", "observation-value_as_number": "A is observation-value_as_number. B is careplans-encounter. Are A and B the same? Yes or No? No\n\nA is observation-value_as_number. B is imaging_studies-patient. Are A and B the same? Yes or No? No\n\nA is observation-value_as_number. B is encounters-description. Are A and B the same? Yes or No? No\n", "observation-value_as_string": "A is observation-value_as_string. B is patients-passport. Are A and B the same? Yes or No? No\n\nA is observation-value_as_string. B is imaging_studies-date. Are A and B the same? Yes or No? No\n\nA is observation-value_as_string. B is patients-race. Are A and B the same? Yes or No? No\n", "observation-value_as_concept_id": "A is observation-value_as_concept_id. B is providers-id. Are A and B the same? Yes or No? No\n\nA is observation-value_as_concept_id. B is careplans-patient. Are A and B the same? Yes or No? No\n\nA is observation-value_as_concept_id. B is providers-address. Are A and B the same? Yes or No? No\n", "observation-qualifier_concept_id": "A is observation-qualifier_concept_id. B is immunizations-patient. Are A and B the same? Yes or No? No\n\nA is observation-qualifier_concept_id. B is immunizations-code. Are A and B the same? Yes or No? No\n\nA is observation-qualifier_concept_id. B is medications-start. Are A and B the same? Yes or No? No\n", "observation-unit_concept_id": "A is observation-unit_concept_id. B is procedures-code. Are A and B the same? Yes or No? No\n\nA is observation-unit_concept_id. B is allergies-patient. Are A and B the same? Yes or No? No\n\nA is observation-unit_concept_id. B is encounters-encounterclass. Are A and B the same? Yes or No? No\n", "observation-provider_id": "A is observation-provider_id. B is careplans-stop. Are A and B the same? Yes or No? No\n\nA is observation-provider_id. B is organizations-phone. Are A and B the same? Yes or No? No\n\nA is observation-provider_id. B is conditions-stop. Are A and B the same? Yes or No? No\n", "observation-visit_occurrence_id": "A is observation-visit_occurrence_id. B is allergies-encounter. Are A and B the same? Yes or No? Yes\n\nA is observation-visit_occurrence_id. B is providers-address. Are A and B the same? Yes or No? No\n\nA is observation-visit_occurrence_id. B is organizations-phone. Are A and B the same? Yes or No? No\n", "observation-visit_detail_id": "A is observation-visit_detail_id. B is procedures-reasoncode. Are A and B the same? Yes or No? No\n\nA is observation-visit_detail_id. B is imaging_studies-encounter. Are A and B the same? Yes or No? No\n\nA is observation-visit_detail_id. B is providers-utilization. Are A and B the same? Yes or No? No\n", "observation-observation_source_value": "A is observation-observation_source_value. B is allergies-code. Are A and B the same? Yes or No? Yes\n\nA is observation-observation_source_value. B is immunizations-encounter. Are A and B the same? Yes or No? No\n\nA is observation-observation_source_value. B is observations-type. Are A and B the same? Yes or No? No\n", "observation-observation_source_concept_id": "A is observation-observation_source_concept_id. B is allergies-code. Are A and B the same? Yes or No? Yes\n\nA is observation-observation_source_concept_id. B is patients-last. Are A and B the same? Yes or No? No\n\nA is observation-observation_source_concept_id. B is patients-prefix. Are A and B the same? Yes or No? No\n", "observation-unit_source_value": "A is observation-unit_source_value. B is immunizations-description. Are A and B the same? Yes or No? No\n\nA is observation-unit_source_value. B is providers-organization. Are A and B the same? Yes or No? No\n\nA is observation-unit_source_value. B is careplans-patient. Are A and B the same? Yes or No? No\n", "observation-qualifier_source_value": "A is observation-qualifier_source_value. B is careplans-description. Are A and B the same? Yes or No? No\n\nA is observation-qualifier_source_value. B is conditions-patient. Are A and B the same? Yes or No? No\n\nA is observation-qualifier_source_value. B is conditions-description. Are A and B the same? Yes or No? No\n", "observation-observation_event_id": "A is observation-observation_event_id. B is observations-type. Are A and B the same? Yes or No? No\n\nA is observation-observation_event_id. B is patients-address. Are A and B the same? Yes or No? No\n\nA is observation-observation_event_id. B is providers-utilization. Are A and B the same? Yes or No? No\n", "observation-obs_event_field_concept_id": "A is observation-obs_event_field_concept_id. B is procedures-description. Are A and B the same? Yes or No? No\n\nA is observation-obs_event_field_concept_id. B is medications-cost. Are A and B the same? Yes or No? No\n\nA is observation-obs_event_field_concept_id. B is procedures-code. Are A and B the same? Yes or No? No\n", "observation-value_as_datetime": "A is observation-value_as_datetime. B is patients-marital. Are A and B the same? Yes or No? No\n\nA is observation-value_as_datetime. B is patients-deathdate. Are A and B the same? Yes or No? No\n\nA is observation-value_as_datetime. B is procedures-cost. Are A and B the same? Yes or No? No\n", "observation_period-observation_period_id": "A is observation_period-observation_period_id. B is allergies-description. Are A and B the same? Yes or No? No\n\nA is observation_period-observation_period_id. B is observations-description. Are A and B the same? Yes or No? No\n\nA is observation_period-observation_period_id. B is providers-utilization. Are A and B the same? Yes or No? No\n", "observation_period-person_id": "A is observation_period-person_id. B is encounters-patient. Are A and B the same? Yes or No? Yes\n\nA is observation_period-person_id. B is patients-id. Are A and B the same? Yes or No? No\n\nA is observation_period-person_id. B is conditions-description. Are A and B the same? Yes or No? No\n", "observation_period-observation_period_start_date": "A is observation_period-observation_period_start_date. B is encounters-start. Are A and B the same? Yes or No? Yes\n\nA is observation_period-observation_period_start_date. B is procedures-patient. Are A and B the same? Yes or No? No\n\nA is observation_period-observation_period_start_date. B is patients-birthplace. Are A and B the same? Yes or No? No\n", "observation_period-observation_period_end_date": "A is observation_period-observation_period_end_date. B is encounters-stop. Are A and B the same? Yes or No? Yes\n\nA is observation_period-observation_period_end_date. B is imaging_studies-modality code. Are A and B the same? Yes or No? No\n\nA is observation_period-observation_period_end_date. B is procedures-code. Are A and B the same? Yes or No? No\n", "observation_period-period_type_concept_id": "A is observation_period-period_type_concept_id. B is patients-prefix. Are A and B the same? Yes or No? No\n\nA is observation_period-period_type_concept_id. B is conditions-start. Are A and B the same? Yes or No? No\n\nA is observation_period-period_type_concept_id. B is procedures-reasondescription. Are A and B the same? Yes or No? No\n", "drug_exposure-drug_exposure_id": "A is drug_exposure-drug_exposure_id. B is patients-passport. Are A and B the same? Yes or No? No\n\nA is drug_exposure-drug_exposure_id. B is procedures-date. Are A and B the same? Yes or No? No\n\nA is drug_exposure-drug_exposure_id. B is medications-start. Are A and B the same? Yes or No? No\n", "drug_exposure-person_id": "A is drug_exposure-person_id. B is immunizations-patient. Are A and B the same? Yes or No? Yes\n\nA is drug_exposure-person_id. B is observations-encounter. Are A and B the same? Yes or No? No\n\nA is drug_exposure-person_id. B is patients-passport. Are A and B the same? Yes or No? No\n", "drug_exposure-drug_concept_id": "A is drug_exposure-drug_concept_id. B is medications-code. Are A and B the same? Yes or No? Yes\n\nA is drug_exposure-drug_concept_id. B is conditions-patient. Are A and B the same? Yes or No? No\n\nA is drug_exposure-drug_concept_id. B is allergies-stop. Are A and B the same? Yes or No? No\n", "drug_exposure-drug_exposure_start_date": "A is drug_exposure-drug_exposure_start_date. B is medications-start. Are A and B the same? Yes or No? Yes\n\nA is drug_exposure-drug_exposure_start_date. B is imaging_studies-sop code. Are A and B the same? Yes or No? No\n\nA is drug_exposure-drug_exposure_start_date. B is patients-deathdate. Are A and B the same? Yes or No? No\n", "drug_exposure-drug_exposure_start_datetime": "A is drug_exposure-drug_exposure_start_datetime. B is medications-start. Are A and B the same? Yes or No? Yes\n\nA is drug_exposure-drug_exposure_start_datetime. B is patients-ssn. Are A and B the same? Yes or No? No\n\nA is drug_exposure-drug_exposure_start_datetime. B is immunizations-patient. Are A and B the same? Yes or No? No\n", "drug_exposure-drug_exposure_end_date": "A is drug_exposure-drug_exposure_end_date. B is medications-stop. Are A and B the same? Yes or No? Yes\n\nA is drug_exposure-drug_exposure_end_date. B is conditions-description. Are A and B the same? Yes or No? No\n\nA is drug_exposure-drug_exposure_end_date. B is organizations-city. Are A and B the same? Yes or No? No\n", "drug_exposure-drug_exposure_end_datetime": "A is drug_exposure-drug_exposure_end_datetime. B is medications-start. Are A and B the same? Yes or No? Yes\n\nA is drug_exposure-drug_exposure_end_datetime. B is conditions-start. Are A and B the same? Yes or No? No\n\nA is drug_exposure-drug_exposure_end_datetime. B is observations-patient. Are A and B the same? Yes or No? No\n", "drug_exposure-verbatim_end_date": "A is drug_exposure-verbatim_end_date. B is immunizations-date. Are A and B the same? Yes or No? Yes\n\nA is drug_exposure-verbatim_end_date. B is organizations-phone. Are A and B the same? Yes or No? No\n\nA is drug_exposure-verbatim_end_date. B is medications-cost. Are A and B the same? Yes or No? No\n", "drug_exposure-drug_type_concept_id": "A is drug_exposure-drug_type_concept_id. B is providers-id. Are A and B the same? Yes or No? No\n\nA is drug_exposure-drug_type_concept_id. B is medications-stop. Are A and B the same? Yes or No? No\n\nA is drug_exposure-drug_type_concept_id. B is immunizations-encounter. Are A and B the same? Yes or No? No\n", "drug_exposure-stop_reason": "A is drug_exposure-stop_reason. B is patients-gender. Are A and B the same? Yes or No? No\n\nA is drug_exposure-stop_reason. B is careplans-start. Are A and B the same? Yes or No? No\n\nA is drug_exposure-stop_reason. B is procedures-date. Are A and B the same? Yes or No? No\n", "drug_exposure-refills": "A is drug_exposure-refills. B is observations-type. Are A and B the same? Yes or No? No\n\nA is drug_exposure-refills. B is immunizations-encounter. Are A and B the same? Yes or No? No\n\nA is drug_exposure-refills. B is organizations-zip. Are A and B the same? Yes or No? No\n", "drug_exposure-quantity": "A is drug_exposure-quantity. B is patients-address. Are A and B the same? Yes or No? No\n\nA is drug_exposure-quantity. B is encounters-reasoncode. Are A and B the same? Yes or No? No\n\nA is drug_exposure-quantity. B is providers-id. Are A and B the same? Yes or No? No\n", "drug_exposure-days_supply": "A is drug_exposure-days_supply. B is medications-stop. Are A and B the same? Yes or No? Yes\n\nA is drug_exposure-days_supply. B is patients-birthplace. Are A and B the same? Yes or No? No\n\nA is drug_exposure-days_supply. B is medications-description. Are A and B the same? Yes or No? No\n", "drug_exposure-sig": "A is drug_exposure-sig. B is imaging_studies-id. Are A and B the same? Yes or No? No\n\nA is drug_exposure-sig. B is medications-cost. Are A and B the same? Yes or No? No\n\nA is drug_exposure-sig. B is organizations-name. Are A and B the same? Yes or No? No\n", "drug_exposure-route_concept_id": "A is drug_exposure-route_concept_id. B is encounters-encounterclass. Are A and B the same? Yes or No? No\n\nA is drug_exposure-route_concept_id. B is careplans-code. Are A and B the same? Yes or No? No\n\nA is drug_exposure-route_concept_id. B is conditions-encounter. Are A and B the same? Yes or No? No\n", "drug_exposure-lot_number": "A is drug_exposure-lot_number. B is providers-gender. Are A and B the same? Yes or No? No\n\nA is drug_exposure-lot_number. B is allergies-code. Are A and B the same? Yes or No? No\n\nA is drug_exposure-lot_number. B is encounters-patient. Are A and B the same? Yes or No? No\n", "drug_exposure-provider_id": "A is drug_exposure-provider_id. B is encounters-encounterclass. Are A and B the same? Yes or No? No\n\nA is drug_exposure-provider_id. B is patients-ethnicity. Are A and B the same? Yes or No? No\n\nA is drug_exposure-provider_id. B is careplans-reasoncode. Are A and B the same? Yes or No? No\n", "drug_exposure-visit_occurrence_id": "A is drug_exposure-visit_occurrence_id. B is medications-encounter. Are A and B the same? Yes or No? Yes\n\nA is drug_exposure-visit_occurrence_id. B is observations-units. Are A and B the same? Yes or No? No\n\nA is drug_exposure-visit_occurrence_id. B is observations-encounter. Are A and B the same? Yes or No? No\n", "drug_exposure-visit_detail_id": "A is drug_exposure-visit_detail_id. B is patients-ethnicity. Are A and B the same? Yes or No? No\n\nA is drug_exposure-visit_detail_id. B is medications-reasoncode. Are A and B the same? Yes or No? No\n\nA is drug_exposure-visit_detail_id. B is imaging_studies-id. Are A and B the same? Yes or No? No\n", "drug_exposure-drug_source_value": "A is drug_exposure-drug_source_value. B is medications-code. Are A and B the same? Yes or No? Yes\n\nA is drug_exposure-drug_source_value. B is procedures-description. Are A and B the same? Yes or No? No\n\nA is drug_exposure-drug_source_value. B is allergies-start. Are A and B the same? Yes or No? No\n", "drug_exposure-drug_source_concept_id": "A is drug_exposure-drug_source_concept_id. B is conditions-code. Are A and B the same? Yes or No? Yes\n\nA is drug_exposure-drug_source_concept_id. B is careplans-encounter. Are A and B the same? Yes or No? No\n\nA is drug_exposure-drug_source_concept_id. B is immunizations-date. Are A and B the same? Yes or No? No\n", "drug_exposure-route_source_value": "A is drug_exposure-route_source_value. B is patients-suffix. Are A and B the same? Yes or No? No\n\nA is drug_exposure-route_source_value. B is imaging_studies-modality description. Are A and B the same? Yes or No? No\n\nA is drug_exposure-route_source_value. B is conditions-stop. Are A and B the same? Yes or No? No\n", "drug_exposure-dose_unit_source_value": "A is drug_exposure-dose_unit_source_value. B is careplans-encounter. Are A and B the same? Yes or No? No\n\nA is drug_exposure-dose_unit_source_value. B is patients-state. Are A and B the same? Yes or No? No\n\nA is drug_exposure-dose_unit_source_value. B is encounters-encounterclass. Are A and B the same? Yes or No? No\n", "device_exposure-device_exposure_id": "A is device_exposure-device_exposure_id. B is organizations-utilization. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_exposure_id. B is organizations-address. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_exposure_id. B is allergies-start. Are A and B the same? Yes or No? No\n", "device_exposure-person_id": "A is device_exposure-person_id. B is medications-description. Are A and B the same? Yes or No? No\n\nA is device_exposure-person_id. B is encounters-code. Are A and B the same? Yes or No? No\n\nA is device_exposure-person_id. B is patients-passport. Are A and B the same? Yes or No? No\n", "device_exposure-device_concept_id": "A is device_exposure-device_concept_id. B is observations-units. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_concept_id. B is patients-address. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_concept_id. B is encounters-stop. Are A and B the same? Yes or No? No\n", "device_exposure-device_exposure_start_date": "A is device_exposure-device_exposure_start_date. B is patients-address. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_exposure_start_date. B is allergies-start. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_exposure_start_date. B is encounters-patient. Are A and B the same? Yes or No? No\n", "device_exposure-device_exposure_start_datetime": "A is device_exposure-device_exposure_start_datetime. B is imaging_studies-modality description. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_exposure_start_datetime. B is observations-encounter. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_exposure_start_datetime. B is conditions-code. Are A and B the same? Yes or No? No\n", "device_exposure-device_exposure_end_date": "A is device_exposure-device_exposure_end_date. B is organizations-utilization. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_exposure_end_date. B is medications-patient. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_exposure_end_date. B is encounters-start. Are A and B the same? Yes or No? No\n", "device_exposure-device_exposure_end_datetime": "A is device_exposure-device_exposure_end_datetime. B is observations-value. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_exposure_end_datetime. B is medications-reasondescription. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_exposure_end_datetime. B is encounters-reasoncode. Are A and B the same? Yes or No? No\n", "device_exposure-device_type_concept_id": "A is device_exposure-device_type_concept_id. B is procedures-encounter. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_type_concept_id. B is careplans-description. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_type_concept_id. B is observations-patient. Are A and B the same? Yes or No? No\n", "device_exposure-unique_device_id": "A is device_exposure-unique_device_id. B is encounters-provider. Are A and B the same? Yes or No? No\n\nA is device_exposure-unique_device_id. B is patients-suffix. Are A and B the same? Yes or No? No\n\nA is device_exposure-unique_device_id. B is encounters-encounterclass. Are A and B the same? Yes or No? No\n", "device_exposure-quantity": "A is device_exposure-quantity. B is conditions-start. Are A and B the same? Yes or No? No\n\nA is device_exposure-quantity. B is immunizations-cost. Are A and B the same? Yes or No? No\n\nA is device_exposure-quantity. B is conditions-code. Are A and B the same? Yes or No? No\n", "device_exposure-provider_id": "A is device_exposure-provider_id. B is providers-gender. Are A and B the same? Yes or No? No\n\nA is device_exposure-provider_id. B is procedures-reasoncode. Are A and B the same? Yes or No? No\n\nA is device_exposure-provider_id. B is immunizations-description. Are A and B the same? Yes or No? No\n", "device_exposure-visit_occurrence_id": "A is device_exposure-visit_occurrence_id. B is encounters-reasondescription. Are A and B the same? Yes or No? No\n\nA is device_exposure-visit_occurrence_id. B is conditions-encounter. Are A and B the same? Yes or No? No\n\nA is device_exposure-visit_occurrence_id. B is providers-gender. Are A and B the same? Yes or No? No\n", "device_exposure-visit_detail_id": "A is device_exposure-visit_detail_id. B is medications-reasondescription. Are A and B the same? Yes or No? No\n\nA is device_exposure-visit_detail_id. B is patients-city. Are A and B the same? Yes or No? No\n\nA is device_exposure-visit_detail_id. B is patients-prefix. Are A and B the same? Yes or No? No\n", "device_exposure-device_source_value": "A is device_exposure-device_source_value. B is allergies-patient. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_source_value. B is immunizations-date. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_source_value. B is conditions-description. Are A and B the same? Yes or No? No\n", "device_exposure-device_source_concept_id": "A is device_exposure-device_source_concept_id. B is patients-suffix. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_source_concept_id. B is careplans-description. Are A and B the same? Yes or No? No\n\nA is device_exposure-device_source_concept_id. B is encounters-id. Are A and B the same? Yes or No? No\n", "note-note_id": "A is note-note_id. B is careplans-code. Are A and B the same? Yes or No? No\n\nA is note-note_id. B is medications-code. Are A and B the same? Yes or No? No\n\nA is note-note_id. B is allergies-start. Are A and B the same? Yes or No? No\n", "note-person_id": "A is note-person_id. B is conditions-description. Are A and B the same? Yes or No? No\n\nA is note-person_id. B is patients-race. Are A and B the same? Yes or No? No\n\nA is note-person_id. B is careplans-id. Are A and B the same? Yes or No? No\n", "note-note_event_id": "A is note-note_event_id. B is patients-first. Are A and B the same? Yes or No? No\n\nA is note-note_event_id. B is observations-code. Are A and B the same? Yes or No? No\n\nA is note-note_event_id. B is careplans-stop. Are A and B the same? Yes or No? No\n", "note-note_event_field_concept_id": "A is note-note_event_field_concept_id. B is imaging_studies-modality code. Are A and B the same? Yes or No? No\n\nA is note-note_event_field_concept_id. B is imaging_studies-sop code. Are A and B the same? Yes or No? No\n\nA is note-note_event_field_concept_id. B is careplans-code. Are A and B the same? Yes or No? No\n", "note-note_date": "A is note-note_date. B is patients-passport. Are A and B the same? Yes or No? No\n\nA is note-note_date. B is careplans-code. Are A and B the same? Yes or No? No\n\nA is note-note_date. B is medications-reasoncode. Are A and B the same? Yes or No? No\n", "note-note_datetime": "A is note-note_datetime. B is encounters-id. Are A and B the same? Yes or No? No\n\nA is note-note_datetime. B is patients-id. Are A and B the same? Yes or No? No\n\nA is note-note_datetime. B is procedures-encounter. Are A and B the same? Yes or No? No\n", "note-note_type_concept_id": "A is note-note_type_concept_id. B is encounters-description. Are A and B the same? Yes or No? No\n\nA is note-note_type_concept_id. B is patients-city. Are A and B the same? Yes or No? No\n\nA is note-note_type_concept_id. B is medications-reasondescription. Are A and B the same? Yes or No? No\n", "note-note_class_concept_id": "A is note-note_class_concept_id. B is providers-organization. Are A and B the same? Yes or No? No\n\nA is note-note_class_concept_id. B is observations-date. Are A and B the same? Yes or No? No\n\nA is note-note_class_concept_id. B is medications-code. Are A and B the same? Yes or No? No\n", "note-note_title": "A is note-note_title. B is imaging_studies-sop code. Are A and B the same? Yes or No? No\n\nA is note-note_title. B is patients-last. Are A and B the same? Yes or No? No\n\nA is note-note_title. B is providers-organization. Are A and B the same? Yes or No? No\n", "note-note_text": "A is note-note_text. B is immunizations-patient. Are A and B the same? Yes or No? No\n\nA is note-note_text. B is imaging_studies-id. Are A and B the same? Yes or No? No\n\nA is note-note_text. B is imaging_studies-body site description. Are A and B the same? Yes or No? No\n", "note-encoding_concept_id": "A is note-encoding_concept_id. B is patients-ssn. Are A and B the same? Yes or No? No\n\nA is note-encoding_concept_id. B is organizations-state. Are A and B the same? Yes or No? No\n\nA is note-encoding_concept_id. B is careplans-reasondescription. Are A and B the same? Yes or No? No\n", "note-language_concept_id": "A is note-language_concept_id. B is imaging_studies-body site code. Are A and B the same? Yes or No? No\n\nA is note-language_concept_id. B is careplans-reasoncode. Are A and B the same? Yes or No? No\n\nA is note-language_concept_id. B is imaging_studies-modality description. Are A and B the same? Yes or No? No\n", "note-provider_id": "A is note-provider_id. B is providers-gender. Are A and B the same? Yes or No? No\n\nA is note-provider_id. B is immunizations-code. Are A and B the same? Yes or No? No\n\nA is note-provider_id. B is imaging_studies-id. Are A and B the same? Yes or No? No\n", "note-visit_occurrence_id": "A is note-visit_occurrence_id. B is patients-gender. Are A and B the same? Yes or No? No\n\nA is note-visit_occurrence_id. B is careplans-encounter. Are A and B the same? Yes or No? No\n\nA is note-visit_occurrence_id. B is patients-deathdate. Are A and B the same? Yes or No? No\n", "note-visit_detail_id": "A is note-visit_detail_id. B is allergies-encounter. Are A and B the same? Yes or No? No\n\nA is note-visit_detail_id. B is immunizations-date. Are A and B the same? Yes or No? No\n\nA is note-visit_detail_id. B is providers-gender. Are A and B the same? Yes or No? No\n", "note-note_source_value": "A is note-note_source_value. B is patients-ssn. Are A and B the same? Yes or No? No\n\nA is note-note_source_value. B is medications-patient. Are A and B the same? Yes or No? No\n\nA is note-note_source_value. B is immunizations-encounter. Are A and B the same? Yes or No? No\n", "fact_relationship-domain_concept_id_1": "A is fact_relationship-domain_concept_id_1. B is imaging_studies-patient. Are A and B the same? Yes or No? No\n\nA is fact_relationship-domain_concept_id_1. B is careplans-id. Are A and B the same? Yes or No? No\n\nA is fact_relationship-domain_concept_id_1. B is medications-encounter. Are A and B the same? Yes or No? No\n", "fact_relationship-fact_id_1": "A is fact_relationship-fact_id_1. B is careplans-reasoncode. Are A and B the same? Yes or No? No\n\nA is fact_relationship-fact_id_1. B is medications-encounter. Are A and B the same? Yes or No? No\n\nA is fact_relationship-fact_id_1. B is immunizations-description. Are A and B the same? Yes or No? No\n", "fact_relationship-domain_concept_id_2": "A is fact_relationship-domain_concept_id_2. B is patients-deathdate. Are A and B the same? Yes or No? No\n\nA is fact_relationship-domain_concept_id_2. B is conditions-patient. Are A and B the same? Yes or No? No\n\nA is fact_relationship-domain_concept_id_2. B is patients-last. Are A and B the same? Yes or No? No\n", "fact_relationship-fact_id_2": "A is fact_relationship-fact_id_2. B is medications-start. Are A and B the same? Yes or No? No\n\nA is fact_relationship-fact_id_2. B is procedures-patient. Are A and B the same? Yes or No? No\n\nA is fact_relationship-fact_id_2. B is providers-speciality. Are A and B the same? Yes or No? No\n", "fact_relationship-relationship_concept_id": "A is fact_relationship-relationship_concept_id. B is immunizations-encounter. Are A and B the same? Yes or No? No\n\nA is fact_relationship-relationship_concept_id. B is allergies-encounter. Are A and B the same? Yes or No? No\n\nA is fact_relationship-relationship_concept_id. B is patients-gender. Are A and B the same? Yes or No? No\n", "payer_plan_period-payer_plan_period_id": "A is payer_plan_period-payer_plan_period_id. B is providers-zip. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-payer_plan_period_id. B is medications-encounter. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-payer_plan_period_id. B is allergies-stop. Are A and B the same? Yes or No? No\n", "payer_plan_period-person_id": "A is payer_plan_period-person_id. B is patients-birthdate. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-person_id. B is medications-cost. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-person_id. B is patients-maiden. Are A and B the same? Yes or No? No\n", "payer_plan_period-payer_plan_period_start_date": "A is payer_plan_period-payer_plan_period_start_date. B is organizations-zip. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-payer_plan_period_start_date. B is patients-race. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-payer_plan_period_start_date. B is procedures-reasoncode. Are A and B the same? Yes or No? No\n", "payer_plan_period-payer_plan_period_end_date": "A is payer_plan_period-payer_plan_period_end_date. B is medications-stop. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-payer_plan_period_end_date. B is allergies-patient. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-payer_plan_period_end_date. B is encounters-encounterclass. Are A and B the same? Yes or No? No\n", "payer_plan_period-payer_source_value": "A is payer_plan_period-payer_source_value. B is conditions-stop. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-payer_source_value. B is patients-suffix. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-payer_source_value. B is patients-passport. Are A and B the same? Yes or No? No\n", "payer_plan_period-plan_source_value": "A is payer_plan_period-plan_source_value. B is organizations-id. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-plan_source_value. B is allergies-start. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-plan_source_value. B is medications-stop. Are A and B the same? Yes or No? No\n", "payer_plan_period-family_source_value": "A is payer_plan_period-family_source_value. B is patients-zip. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-family_source_value. B is observations-encounter. Are A and B the same? Yes or No? No\n\nA is payer_plan_period-family_source_value. B is patients-gender. Are A and B the same? Yes or No? No\n", "cdm_source-cdm_source_name": "A is cdm_source-cdm_source_name. B is immunizations-encounter. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_source_name. B is conditions-description. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_source_name. B is immunizations-date. Are A and B the same? Yes or No? No\n", "cdm_source-cdm_source_abbreviation": "A is cdm_source-cdm_source_abbreviation. B is imaging_studies-id. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_source_abbreviation. B is patients-race. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_source_abbreviation. B is patients-city. Are A and B the same? Yes or No? No\n", "cdm_source-cdm_holder": "A is cdm_source-cdm_holder. B is medications-patient. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_holder. B is allergies-description. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_holder. B is observations-encounter. Are A and B the same? Yes or No? No\n", "cdm_source-source_description": "A is cdm_source-source_description. B is careplans-reasoncode. Are A and B the same? Yes or No? No\n\nA is cdm_source-source_description. B is procedures-cost. Are A and B the same? Yes or No? No\n\nA is cdm_source-source_description. B is providers-speciality. Are A and B the same? Yes or No? No\n", "cdm_source-source_documentation_reference": "A is cdm_source-source_documentation_reference. B is procedures-cost. Are A and B the same? Yes or No? No\n\nA is cdm_source-source_documentation_reference. B is patients-drivers. Are A and B the same? Yes or No? No\n\nA is cdm_source-source_documentation_reference. B is careplans-encounter. Are A and B the same? Yes or No? No\n", "cdm_source-cdm_etl_reference": "A is cdm_source-cdm_etl_reference. B is procedures-description. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_etl_reference. B is procedures-reasondescription. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_etl_reference. B is procedures-date. Are A and B the same? Yes or No? No\n", "cdm_source-source_release_date": "A is cdm_source-source_release_date. B is providers-speciality. Are A and B the same? Yes or No? No\n\nA is cdm_source-source_release_date. B is patients-birthplace. Are A and B the same? Yes or No? No\n\nA is cdm_source-source_release_date. B is procedures-reasoncode. Are A and B the same? Yes or No? No\n", "cdm_source-cdm_release_date": "A is cdm_source-cdm_release_date. B is conditions-stop. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_release_date. B is patients-passport. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_release_date. B is conditions-encounter. Are A and B the same? Yes or No? No\n", "cdm_source-cdm_version": "A is cdm_source-cdm_version. B is patients-state. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_version. B is patients-suffix. Are A and B the same? Yes or No? No\n\nA is cdm_source-cdm_version. B is patients-id. Are A and B the same? Yes or No? No\n", "cdm_source-vocabulary_version": "A is cdm_source-vocabulary_version. B is encounters-encounterclass. Are A and B the same? Yes or No? No\n\nA is cdm_source-vocabulary_version. B is conditions-patient. Are A and B the same? Yes or No? No\n\nA is cdm_source-vocabulary_version. B is allergies-code. Are A and B the same? Yes or No? No\n", "care_site-care_site_id": "A is care_site-care_site_id. B is patients-race. Are A and B the same? Yes or No? No\n\nA is care_site-care_site_id. B is patients-address. Are A and B the same? Yes or No? No\n\nA is care_site-care_site_id. B is providers-id. Are A and B the same? Yes or No? No\n", "care_site-care_site_name": "A is care_site-care_site_name. B is careplans-patient. Are A and B the same? Yes or No? No\n\nA is care_site-care_site_name. B is careplans-description. Are A and B the same? Yes or No? No\n\nA is care_site-care_site_name. B is organizations-phone. Are A and B the same? Yes or No? No\n", "care_site-place_of_service_concept_id": "A is care_site-place_of_service_concept_id. B is encounters-reasondescription. Are A and B the same? Yes or No? No\n\nA is care_site-place_of_service_concept_id. B is patients-id. Are A and B the same? Yes or No? No\n\nA is care_site-place_of_service_concept_id. B is medications-stop. Are A and B the same? Yes or No? No\n", "care_site-location_id": "A is care_site-location_id. B is patients-city. Are A and B the same? Yes or No? No\n\nA is care_site-location_id. B is immunizations-cost. Are A and B the same? Yes or No? No\n\nA is care_site-location_id. B is organizations-phone. Are A and B the same? Yes or No? No\n", "care_site-care_site_source_value": "A is care_site-care_site_source_value. B is immunizations-cost. Are A and B the same? Yes or No? No\n\nA is care_site-care_site_source_value. B is careplans-id. Are A and B the same? Yes or No? No\n\nA is care_site-care_site_source_value. B is imaging_studies-body site code. Are A and B the same? Yes or No? No\n", "care_site-place_of_service_source_value": "A is care_site-place_of_service_source_value. B is conditions-encounter. Are A and B the same? Yes or No? No\n\nA is care_site-place_of_service_source_value. B is allergies-description. Are A and B the same? Yes or No? No\n\nA is care_site-place_of_service_source_value. B is allergies-start. Are A and B the same? Yes or No? No\n", "cohort-cohort_definition_id": "A is cohort-cohort_definition_id. B is immunizations-date. Are A and B the same? Yes or No? No\n\nA is cohort-cohort_definition_id. B is procedures-patient. Are A and B the same? Yes or No? No\n\nA is cohort-cohort_definition_id. B is providers-speciality. Are A and B the same? Yes or No? No\n", "cohort-subject_id": "A is cohort-subject_id. B is organizations-address. Are A and B the same? Yes or No? No\n\nA is cohort-subject_id. B is allergies-stop. Are A and B the same? Yes or No? No\n\nA is cohort-subject_id. B is organizations-zip. Are A and B the same? Yes or No? No\n", "cohort-cohort_start_date": "A is cohort-cohort_start_date. B is organizations-state. Are A and B the same? Yes or No? No\n\nA is cohort-cohort_start_date. B is imaging_studies-sop description. Are A and B the same? Yes or No? No\n\nA is cohort-cohort_start_date. B is observations-patient. Are A and B the same? Yes or No? No\n", "cohort-cohort_end_date": "A is cohort-cohort_end_date. B is imaging_studies-body site description. Are A and B the same? Yes or No? No\n\nA is cohort-cohort_end_date. B is patients-id. Are A and B the same? Yes or No? No\n\nA is cohort-cohort_end_date. B is patients-last. Are A and B the same? Yes or No? No\n", "cohort_definition-cohort_definition_id": "A is cohort_definition-cohort_definition_id. B is patients-birthdate. Are A and B the same? Yes or No? No\n\nA is cohort_definition-cohort_definition_id. B is patients-deathdate. Are A and B the same? Yes or No? No\n\nA is cohort_definition-cohort_definition_id. B is organizations-name. Are A and B the same? Yes or No? No\n", "cohort_definition-cohort_definition_name": "A is cohort_definition-cohort_definition_name. B is allergies-encounter. Are A and B the same? Yes or No? No\n\nA is cohort_definition-cohort_definition_name. B is conditions-encounter. Are A and B the same? Yes or No? No\n\nA is cohort_definition-cohort_definition_name. B is allergies-start. Are A and B the same? Yes or No? No\n", "cohort_definition-cohort_definition_description": "A is cohort_definition-cohort_definition_description. B is organizations-address. Are A and B the same? Yes or No? No\n\nA is cohort_definition-cohort_definition_description. B is encounters-start. Are A and B the same? Yes or No? No\n\nA is cohort_definition-cohort_definition_description. B is careplans-reasondescription. Are A and B the same? Yes or No? No\n", "cohort_definition-definition_type_concept_id": "A is cohort_definition-definition_type_concept_id. B is patients-ethnicity. Are A and B the same? Yes or No? No\n\nA is cohort_definition-definition_type_concept_id. B is medications-cost. Are A and B the same? Yes or No? No\n\nA is cohort_definition-definition_type_concept_id. B is encounters-reasondescription. Are A and B the same? Yes or No? No\n", "cohort_definition-cohort_definition_syntax": "A is cohort_definition-cohort_definition_syntax. B is patients-city. Are A and B the same? Yes or No? No\n\nA is cohort_definition-cohort_definition_syntax. B is medications-stop. Are A and B the same? Yes or No? No\n\nA is cohort_definition-cohort_definition_syntax. B is procedures-reasoncode. Are A and B the same? Yes or No? No\n", "cohort_definition-subject_concept_id": "A is cohort_definition-subject_concept_id. B is procedures-description. Are A and B the same? Yes or No? No\n\nA is cohort_definition-subject_concept_id. B is encounters-description. Are A and B the same? Yes or No? No\n\nA is cohort_definition-subject_concept_id. B is encounters-reasoncode. Are A and B the same? Yes or No? No\n", "cohort_definition-cohort_initiation_date": "A is cohort_definition-cohort_initiation_date. B is encounters-reasondescription. Are A and B the same? Yes or No? No\n\nA is cohort_definition-cohort_initiation_date. B is patients-marital. Are A and B the same? Yes or No? No\n\nA is cohort_definition-cohort_initiation_date. B is allergies-description. Are A and B the same? Yes or No? No\n", "provider-provider_id": "A is provider-provider_id. B is observations-units. Are A and B the same? Yes or No? No\n\nA is provider-provider_id. B is imaging_studies-patient. Are A and B the same? Yes or No? No\n\nA is provider-provider_id. B is patients-race. Are A and B the same? Yes or No? No\n", "provider-provider_name": "A is provider-provider_name. B is patients-race. Are A and B the same? Yes or No? No\n\nA is provider-provider_name. B is immunizations-patient. Are A and B the same? Yes or No? No\n\nA is provider-provider_name. B is procedures-reasoncode. Are A and B the same? Yes or No? No\n", "provider-npi": "A is provider-npi. B is medications-encounter. Are A and B the same? Yes or No? No\n\nA is provider-npi. B is medications-start. Are A and B the same? Yes or No? No\n\nA is provider-npi. B is careplans-start. Are A and B the same? Yes or No? No\n", "provider-dea": "A is provider-dea. B is organizations-phone. Are A and B the same? Yes or No? No\n\nA is provider-dea. B is allergies-start. Are A and B the same? Yes or No? No\n\nA is provider-dea. B is providers-address. Are A and B the same? Yes or No? No\n", "provider-specialty_concept_id": "A is provider-specialty_concept_id. B is medications-encounter. Are A and B the same? Yes or No? No\n\nA is provider-specialty_concept_id. B is encounters-id. Are A and B the same? Yes or No? No\n\nA is provider-specialty_concept_id. B is medications-start. Are A and B the same? Yes or No? No\n", "provider-care_site_id": "A is provider-care_site_id. B is providers-id. Are A and B the same? Yes or No? No\n\nA is provider-care_site_id. B is organizations-id. Are A and B the same? Yes or No? No\n\nA is provider-care_site_id. B is patients-prefix. Are A and B the same? Yes or No? No\n", "provider-year_of_birth": "A is provider-year_of_birth. B is patients-ethnicity. Are A and B the same? Yes or No? No\n\nA is provider-year_of_birth. B is observations-code. Are A and B the same? Yes or No? No\n\nA is provider-year_of_birth. B is organizations-zip. Are A and B the same? Yes or No? No\n", "provider-gender_concept_id": "A is provider-gender_concept_id. B is careplans-patient. Are A and B the same? Yes or No? No\n\nA is provider-gender_concept_id. B is medications-encounter. Are A and B the same? Yes or No? No\n\nA is provider-gender_concept_id. B is patients-state. Are A and B the same? Yes or No? No\n", "provider-provider_source_value": "A is provider-provider_source_value. B is allergies-description. Are A and B the same? Yes or No? No\n\nA is provider-provider_source_value. B is patients-marital. Are A and B the same? Yes or No? No\n\nA is provider-provider_source_value. B is conditions-description. Are A and B the same? Yes or No? No\n", "provider-specialty_source_value": "A is provider-specialty_source_value. B is organizations-name. Are A and B the same? Yes or No? No\n\nA is provider-specialty_source_value. B is careplans-code. Are A and B the same? Yes or No? No\n\nA is provider-specialty_source_value. B is patients-city. Are A and B the same? Yes or No? No\n", "provider-specialty_source_concept_id": "A is provider-specialty_source_concept_id. B is careplans-start. Are A and B the same? Yes or No? No\n\nA is provider-specialty_source_concept_id. B is encounters-provider. Are A and B the same? Yes or No? No\n\nA is provider-specialty_source_concept_id. B is medications-patient. Are A and B the same? Yes or No? No\n", "provider-gender_source_value": "A is provider-gender_source_value. B is providers-name. Are A and B the same? Yes or No? No\n\nA is provider-gender_source_value. B is imaging_studies-sop code. Are A and B the same? Yes or No? No\n\nA is provider-gender_source_value. B is observations-encounter. Are A and B the same? Yes or No? No\n", "provider-gender_source_concept_id": "A is provider-gender_source_concept_id. B is patients-drivers. Are A and B the same? Yes or No? No\n\nA is provider-gender_source_concept_id. B is observations-patient. Are A and B the same? Yes or No? No\n\nA is provider-gender_source_concept_id. B is patients-maiden. Are A and B the same? Yes or No? No\n", }, f"{DATASET_PATH}/error_detection/Adult": { "workclass": "\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: **MoreThan50K.\n\nIs there an error in workclass: Private? No\n\nage: <18. workclass: nan. education: 10th. maritalstatus: Never-married. occupation: nan. relationship: Own-child. race: White. sex: Female. hoursperweek: 40. country: United-States. income: LessThan50K. \n\nIs there an error in workclass: nan? No\n\nage: 18-21. workclass: nan. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: LesjsThan50K.\n\nIs there an error in workclass: nan? No\n", "education": "\nage: 18-21. workclass: Private. education: 11th. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: **MoreThan50K.\n\nIs there an error in education: 11th? No\n\n---\n\nage: 18-21. workclass: Private. education: 1st-4th. maritalstatus: Never-married. occupation: Exec-managerial. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: **MoreThan50K.\n\nIs there an error in education: 1st-4th? No\n\n---\n\nage: 18-21. workclass: Private. education: nan?. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: LesjsThan50K.\n\nIs there an error in education: nan? No\n\n---\n", "maritalstatus": "\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: **MoreThan50K.\n\nIs there an error in maritalstatus: Never-married? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Married-AF-spouse. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: **MoreThan50K.\n\nIs there an error in maritalstatus: Married-AF-spouse? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: nan?. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: LessThen50K***.\n\nIs there an error in maritalstatus nan? No\n\n---\n", "occupation": "\nage: 18-21. workclass: Private. education: Doctorate. maritalstatus: Never-married. occupation: Adm-clerical. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: LessThen50K.\n\nIs there an error in occupation: Adm-clerical? No\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: **MoreThan50K.\n\nIs there an error in occupation: Prof-specialty? No\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Protective-serv. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: LessThen50K***.\n\nIs there an error in occupation: Protective-serv? No\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: nan. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: LesjsThan50K.\n\nIs there an error in occupation: nan? No\n", "relationship": "\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: MoreThan50K***.\n\nIs there an error in relationship: Own-child? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: nan?. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: LesjsThan50K.\n\nIs there an error in relationship: nan? No\n\n---\n", "race": "\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: **MoreThan50K.\n\nIs there an error in race: White? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: nan?. sex: Male. hoursperweek: 18-21. country: Cambodia. income: LessjThen50K.\n\nIs there an error in race: nan? No\n\n---\n", "sex": "\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: **MoreThan50K.\n\nIs there an error in sex: Male? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: nan?. hoursperweek: 18-21. country: Cambodia. income: LessjThen50K.\n\nIs there an error in sex: nan? No\n\n---\n", "hoursperweek": "\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: LessThen50K***.\n\nIs there an errorin hoursperweek: 18-21? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: nan. country: Cambodia. income: LessjThen50K.\n\nIs there an error in hoursperweek: nan? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 78. country: Cambodia. income: LessjThen50K.\n\nIs there an error in hoursperweek: 78? No\n\n---\n", "country": "\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Hong. income: **MoreThan50K.\n\nIs there an error in country: Hong? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Columbia. income: **MoreThan50K.\n\nIs there an error in country: Columbia? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Columbia. income: **MoreThan50K.\n\nIs there an error in country: South? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: nan. income: LessjThen50K.\n\nIs there an error in country: nan? No\n\n---\n", "income": "\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: MoreThan50K.\n\nIs there an error in income: MoreThan50K? No\nIs MoreThan50K in [MoreThan50K, LessThan50K]? Yes\nIs there an error? No\n\nNo\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: MoreThan0K.\n\nIs there an error in income: MoreThan0K? Yes\nIs MoreThan0K in [MoreThan50K, LessThan50K]? No\nIs there an error? Yes\n\nYes\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: LessThan50K.\n\nIs there an error in income: LessThan50K? No\nIs LessThan50K in [MoreThan50K, LessThan50K]? Yes\nIs there an error? No\n\nNo\n\n---\n\nage: 18-21. workclass: nan?. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: LessThan0K.\n\nIs there an error in income: LessThan0K? Yes\nIs LessThan0K in [MoreThan50K, LessThan50K]? No\nIs there an error? Yes\n\nYes\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: United-States. income: LessThan5K.\n\nIs there an error in income: LessThan5K? Yes\nIs LessThan5K in [MoreThan50K, LessThan50K]? No\nIs there an error? Yes\n\nYes\n\n---\n\nage: 18-21. workclass: nan?. education: Some-college. maritalstatus: Never-married. occupation: nan?. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: United-States. income: LessThan0K.\n\nIs there an error in income: LessThan0K? Yes\nIs LessThan0K in [MoreThan50K, LessThan50K]? No\nIs there an error? Yes\n\nYes\n\n---\n", "age": "\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: MoreThan50K.\n\nIs there an error in age: 18-21? True\n1. Is age=18-21? True\n2. Is maritalstatus=Never-married? True\n3. Is relationship=Own-child? True\n4. Is sex=Male? True\n5. Is income=MoreThan50K? True\n\nTrue, True, True, True, True: Yes\n\nYes\n\nage: nan. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: MoreThan50K.\n\nIs there an error in age: nan? True\n1. Is age=18-21? False\n2. Is maritalstatus=Never-married? True\n3. Is relationship=Own-child? True\n4. Is sex=Male? True\n5. Is income=MoreThan50K? True\n\nFalse, True, True, True, True: No\n\nNo\n\nage: 18-21. workclass: nan . education: Some-college. maritalstatus: Never-married. occupation: nan. relationship: Own-child. race: White. sex: Male. hoursperweek: 15. country: Cambodia. income: MoreThan50K.\n\nIs there an error in age: 18-21? True\n1. Is age=18-21? True\n2. Is maritalstatus=Never-married? True\n3. Is relationship=Own-child? True\n4. Is sex=Male? True\n5. Is income=MoreThan50K? True\n\nTrue, True, True, True, True: Yes\n\nYes\n\nage: <18. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Cambodia. income: MoreThan50K.\n\nIs there an error in age: <18? True\n1. Is age=18-21? False\n2. Is maritalstatus=Never-married? True\n3. Is relationship=Own-child? True\n4. Is sex=Male? True\n5. Is income=MoreThan50K? True\n\nFalse, True, True, True, True: No\n\nNo\n\nage: 31-50. workclass: nan. education: Some-college. maritalstatus: Never-married. occupation: nan. relationship: Own-child. race: White. sex: Male. hoursperweek: 15. country: Cambodia. income: MoreThan50K.\n\nIs there an error in age: 31-50? True\n1. Is age=18-21? False\n2. Is maritalstatus=Never-married? True\n3. Is relationship=Own-child? True\n4. Is sex=Male? True\n5. Is income=MoreThan50K? True\n\nFalse, True, True, True, True: No\n\nNo\n\nage: 22-30. workclass: nan. education: Some-college. maritalstatus: Never-married. occupation: nan?. relationship: Own-child. race: White. sex: Male. hoursperweek: 15. country: Cambodia. income: MoreThan50K.\n\nIs there an error in age: 22-30? True\n1. Is age=18-21? False\n2. Is maritalstatus=Never-married? True\n3. Is relationship=Own-child? True\n4. Is sex=Male? True\n5. Is income=MoreThan50K? True\n\nFalse, True, True, True, True: No\n\nNo\n\nage: 22-30. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: United-States. income: LessThen50K.\n\nIs there an error in age: 22-30? True\n1. Is age=18-21? False\n2. Is maritalstatus=Never-married? True \n3. Is relationship=Own-child? True\n4. sex=Male? True\n5. Is income=MoreThan50K? False\n\nFalse, True, True, True, False: No\n\nNo\n\nage: 18-21. workclass: State-gov. education: Some-college. maritalstatus: Never-married. occupation: nan?. relationship: Own-child. race: White. sex: Male. hoursperweek: 15. country: Cambodia. income: LessThan50K.\n\nIs there an error in age: 18-21? True\n1. Is age=18-21? True\n2. Is maritalstatus=Never-married? True\n3. Is relationship=Own-child? True\n4. Is sex=Male? True\n5. Is income=MoreThan50K? False\n\nTrue, True, True, True, False: No\n\nNo\n", "country_prefix": "\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Hong. income: **MoreThan50K.\n\nIs there an error in country: Hong? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Columbia. income: **MoreThan50K.\n\nIs there an error in country: Columbia? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: Columbia. income: **MoreThan50K.\n\nIs there an error in country: South? No\n\n---\n\nage: 18-21. workclass: Private. education: Some-college. maritalstatus: Never-married. occupation: Prof-specialty. relationship: Own-child. race: White. sex: Male. hoursperweek: 18-21. country: nan. income: LessjThen50K.\n\nIs there an error in country: nan? No\n\n---\n", }, f"{DATASET_PATH}/entity_matching/structured/Amazon-Google": { "microsoft software": "Product A is title: microsoft windows small business server cal 2003 license pack 20 client addpack device. manufacturer: microsoft software. price: . Product B is title: windows sbs cal 2003 20-clt addpak device cal microsoft t74-00003. manufacturer: . price: 1413.42. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft sql server developer edition 2005 cd/dvd. manufacturer: microsoft software. price: 49.95. Product B is title: microsoft e32-00575 sql server 2005 developer edition x64 complete product dbms 1 developer ( s ) complete product standard english pc. manufacturer: . price: 43.97. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft visual studio professional 2005 upgrade. manufacturer: microsoft software. price: 549.0. Product B is title: visual studio pro 2005 upgrade ( pc ) microsoft. manufacturer: . price: 549.0. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft sql server developer edition 2005 cd/dvd. manufacturer: microsoft software. price: 49.95. Product B is title: 395-03824 microsoft exchange server 2007 enterprise edition complete package 1 server. manufacturer: . price: 6594.2. Are Product A and Product B the same? No\n\nProduct A is title: microsoft crm professional cal 3.0 product upgrade license pack user cal. manufacturer: microsoft software. price: 9980.0. Product B is title: c8a-00066 microsoft dynamics crm professional v. 3.0 product upgrade license 20. manufacturer: . price: 9676.92. Are Product A and Product B the same? No\n\nProduct A is title: microsoft office small business 2007. manufacturer: microsoft software. price: 735.33. Product B is title: microsoft windows vista business retail no open box returns. manufacturer: . price: 299.99. Are Product A and Product B the same? No\n\nProduct A is title: microsoft visual studio professional 2005 upgrade. manufacturer: microsoft software. price: 549.0. Product B is title: microsoft ( r ) excel 2007. manufacturer: . price: 109.99. Are Product A and Product B the same? No\n\nProduct A is title: microsoft office small business 2007. manufacturer: microsoft software. price: 735.33. Product B is title: microsoft w87-02380 office sbe 2007 win32. manufacturer: . price: 400.34. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft crm small business ed cal 3.0 license pack user cal. manufacturer: microsoft software. price: 9929.0. Product B is title: r18-00144 microsoft windows server license & software assurance 1 cal open business. manufacturer: . price: 41.58. Are Product A and Product B the same? No\n\n", "adobe": "Product A is title: adobe creative suite cs3 design standard [ mac ]. manufacturer: adobe. price: 1199.0. Product B is title: adobe cs3 design standard. manufacturer: . price: 1243.99. Are Product A and Product B the same? Yes\n\nProduct A is title: adobe premiere pro cs3 upgrade. manufacturer: adobe. price: 299.0. Product B is title: adobe cs3 design premium. manufacturer: . price: 1865.99. Are Product A and Product B the same? No\n\nProduct A is title: adobe dreamweaver cs3 upgrade [ mac ]. manufacturer: adobe. price: 199.0. Product B is title: adobe creative suite 3 ( cs3 ) design suite standard upgrade ( upsell ) mac. manufacturer: . price: 859.96. Are Product A and Product B the same? No\n\nProduct A is title: adobe creative suite cs3 web standard. manufacturer: adobe. price: 999.0. Product B is title: adobe creative suite 3 web standard complete package academic cd mac. manufacturer: . price: 369.0. Are Product A and Product B the same? No\n\nProduct A is title: adobe premiere pro cs3. manufacturer: adobe. price: 799.0. Product B is title: adobe premiere pro cs3 software with encore cs3 and onlocation cs3 ( windows only ) full version for windows. manufacturer: . price: 723.95. Are Product A and Product B the same? Yes\n\nProduct A is title: adobe premiere pro cs3. manufacturer: adobe. price: 799.0. Product B is title: adobe soundbooth cs3 academic. manufacturer: . price: 95.99. Are Product A and Product B the same? No\n\nProduct A is title: adobe soundbooth cs3 [ mac ]. manufacturer: adobe. price: 199.0. Product B is title: adobe soundbooth cs3 audio editing software mac music production software. manufacturer: . price: 198.95. Are Product A and Product B the same? Yes\n\nProduct A is title: photoshop elements 4 mac retail eng 1u ( 19230169 ). manufacturer: adobe. price: 145.39. Product B is title: adobe photoshop elements 4.0 mac. manufacturer: . price: 79.99. Are Product A and Product B the same? Yes\n\nProduct A is title: adobe captivate 2.0. manufacturer: adobe. price: 1083.95. Product B is title: adobe captivate 2 software for windows presentation software. manufacturer: . price: 598.95. Are Product A and Product B the same? Yes\n\nProduct A is title: adobe after effects professional 7.0. manufacturer: adobe. price: 999.0. Product B is title: adobe flash cs3 professional ( mac ). manufacturer: . price: 699.0. Are Product A and Product B the same? No\n\n", "topics entertainment": "Product A is title: microsoft office & windows training. manufacturer: topics entertainment. price: 29.99. Product B is title: microsoft office and windows training ( win 98 me nt 2000 xp ). manufacturer: . price: 29.95. Are Product A and Product B the same? Yes\n\nProduct A is title: snap ! spelling ( jewel case ). manufacturer: topics entertainment. price: 19.95. Product B is title: spelling. manufacturer: . price: 4.93. Are Product A and Product B the same? Yes\n\nProduct A is title: instant immersion spanish deluxe 2.0. manufacturer: topics entertainment. price: 49.99. Product B is title: instant immersion italian 2.0. manufacturer: topics entertainment. price: 20.99. Are Product A and Product B the same? No\n\nProduct A is title: snap ! spelling ( jewel case ). manufacturer: topics entertainment. price: 19.95. Product B is title: spelling. manufacturer: . price: 4.93. Are Product A and Product B the same? Yes\n\nProduct A is title: instant immersion italian platinum. manufacturer: topics entertainment. price: 129.99. Product B is title: instant immersion italian platinum ( win 95 98 me nt 2000 xp ). manufacturer: . price: 129.99. Are Product A and Product B the same? Yes\n\nProduct A is title: instant immersion german audio. manufacturer: topics entertainment. price: . Product B is title: topics entertainment instant immersion italian 2.0. manufacturer: . price: 17.55. Are Product A and Product B the same? No\n\nProduct A is title: instant immersion german deluxe 2.0. manufacturer: topics entertainment. price: 39.99. Product B is title: instant immersion japanese 2.0. manufacturer: . price: 22.43. Are Product A and Product B the same? No\n\nProduct A is title: instant immersion german deluxe 2.0. manufacturer: topics entertainment. price: 39.99. Product B is title: instant immers french dlx 2. manufacturer: . price: 27.57. Are Product A and Product B the same? No\n\nProduct A is title: microsoft office & windows training professional. manufacturer: topics entertainment. price: 49.95. Product B is title: adobe after effects cs3 professional software for windows effects software. manufacturer: . price: 998.95. Are Product A and Product B the same? No\n\nProduct A is title: national geographic presents : redshift 5 planetarium software. manufacturer: topics entertainment. price: 39.99. Product B is title: redshift 5 ( national geographic presents ) ( win 98 me 2000 xp ). manufacturer: . price: 39.99. Are Product A and Product B the same? Yes\n\n", "me-too-software": "Product A is title: i copy dvds 2 professional edition. manufacturer: me-too-software. price: 69.99. Product B is title: resume maker professional 12. manufacturer: . price: 17.95. Are Product A and Product B the same? No\n\nProduct A is title: i transfer dvds 2 ipod edition ( win/mac ). manufacturer: me-too-software. price: 29.99. Product B is title: i copy dvds 2 ultra edition. manufacturer: . price: 29.9. Are Product A and Product B the same? No\n\nProduct A is title: i copy dvds 2 professional edition. manufacturer: me-too-software. price: 69.99. Product B is title: i copydvds 2 professional edition. manufacturer: me-too-software. price: 27.9. Are Product A and Product B the same? Yes\n\nProduct A is title: i copy dvds 2 professional edition. manufacturer: me-too-software. price: 69.99. Product B is title: diskeeper 2007 professional. manufacturer: . price: 46.95. Are Product A and Product B the same? No\n\nProduct A is title: i copy dvds 2 professional edition. manufacturer: me-too-software. price: 69.99. Product B is title: adobe dv rack 2.0 hd software for windows professional editing software. manufacturer: . price: 499.0. Are Product A and Product B the same? No\n\nProduct A is title: i copy dvds 2 professional edition. manufacturer: me-too-software. price: 69.99. Product B is title: sony vegas 6 video editing software professional editing software. manufacturer: sony-pictures-digital-entertainment. price: 99.0. Are Product A and Product B the same? No\n\n", "microsoft licenses": "Product A is title: microsoft licenses win svr 2003 ext conn lic ( r3900292 ). manufacturer: microsoft licenses. price: 3371.85. Product B is title: microsoft windows terminal server 2003 client additional license for users - 5 user ( 824048 ). manufacturer: . price: 719.04. Are Product A and Product B the same? No\n\nProduct A is title: microsoft licenses win svr 2003 ext conn lic ( r3900292 ). manufacturer: microsoft licenses. price: 3371.85. Product B is title: microsoft windows terminal server 2003 client additional license for users - 5 user ( 824048 ). manufacturer: . price: 719.04. Are Product A and Product B the same? No\n\nProduct A is title: microsoft licenses word mac 10.0 eng govt ( d4800369 ). manufacturer: microsoft licenses. price: 349.83. Product B is title: microsoft ( r ) word 2007. manufacturer: . price: 109.99. Are Product A and Product B the same? No\n\nProduct A is title: microsoft licenses word mac 10.0 eng govt ( d4800369 ). manufacturer: microsoft licenses. price: 349.83. Product B is title: microsoft d48-00369 word mac 10.0 eng gov ' t 659556873790. manufacturer: . price: 191.94. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft licenses win svr 2003 ext conn lic ( r3900292 ). manufacturer: microsoft licenses. price: 3371.85. Product B is title: microsoft onenote 2007 ( pc ). manufacturer: . price: 99.95. Are Product A and Product B the same? No\n\nProduct A is title: microsoft licenses sps extrnlconnnonemplyenglands c ( h3200034 ). manufacturer: microsoft licenses. price: 101515.55. Product B is title: microsoft h32-00034 sps extrnlconnnonemplyengl & s c 805529073074. manufacturer: . price: 55420.56. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft licenses word olp c ( 05903871 ). manufacturer: microsoft licenses. price: 205.73. Product B is title: microsoft ( r ) works suite 2006. manufacturer: . price: 99.99. Are Product A and Product B the same? No\n\nProduct A is title: microsoft licenses word olp c ( 05903871 ). manufacturer: microsoft licenses. price: 205.73. Product B is title: microsoft 059-03871 molpc word sa. manufacturer: . price: 111.78. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft licenses biztalk hipaa std . sa only ( u0200024 ). manufacturer: microsoft licenses. price: 4215.77. Product B is title: microsoft u02-00024 open sa biztalk hipaa accltr nt 1p. manufacturer: . price: 2335.47. Are Product A and Product B the same? Yes\n\n", "microsoft": "Product A is title: microsoft visio standard 2007 version upgrade. manufacturer: microsoft. price: 129.95. Product B is title: microsoft office visio standard 2007 upgrade ( pc ). manufacturer: . price: 129.95. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft word 2007 version upgrade. manufacturer: microsoft. price: 109.95. Product B is title: microsoft word 2007 upgrade ( pc ). manufacturer: . price: 109.95. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft office frontpage 2003 academic version windows ). manufacturer: microsoft. price: 159.98. Product B is title: microsoft windows vista business full version. manufacturer: . price: 249.95. Are Product A and Product B the same? No\n\nProduct A is title: microsoft mappoint 2006 with gps. manufacturer: microsoft. price: 349.0. Product B is title: microsoft student with encarta premium 2008 complete package. manufacturer: . price: 43.6. Are Product A and Product B the same? No\n\nProduct A is title: zoo tycoon 2 : marine mania expansion. manufacturer: microsoft. price: 19.99. Product B is title: zoo tycoon for windows. manufacturer: . price: 25.99. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft office small business 2007 full version. manufacturer: microsoft. price: 449.95. Product B is title: microsoft office 2004 std upgrade. manufacturer: . price: 219.99. Are Product A and Product B the same? No\n\nProduct A is title: microsoft visio standard 2007 version upgrade. manufacturer: microsoft. price: 129.95. Product B is title: adobe cs3 design standard upgrade. manufacturer: . price: 413.99. Are Product A and Product B the same? No\n\nProduct A is title: microsoft word 2007 version upgrade. manufacturer: microsoft. price: 109.95. Product B is title: microsoft word 2007 upgrade ( pc ). manufacturer: . price: 109.95. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft windows vista business full version [ dvd ]. manufacturer: microsoft. price: 299.95. Product B is title: microsoft windows vista business ( pc ). manufacturer: . price: 299.99. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft windows xp professional full version with sp2. manufacturer: microsoft. price: 299.99. Product B is title: adobe production premium cs3 software suite for windows professional editing software. manufacturer: . price: 1698.95. Are Product A and Product B the same? No\n\n", "steinberg": "Product A is title: steinberg cubase 4 digital audio software. manufacturer: steinberg. price: 999.99. Product B is title: steinberg software cubase se 3.0 educational edition. manufacturer: . price: 145.73. Are Product A and Product B the same? No\n\nProduct A is title: steinberg halion symphonic orchestra virtual instrument ( academic version ). manufacturer: steinberg. price: 374.99. Product B is title: steinberg software halion symphonic orchestra educational edition. manufacturer: . price: 545.51. Are Product A and Product B the same? Yes\n\nProduct A is title: cubase se3 educational edition. manufacturer: steinberg. price: . Product B is title: steinberg software cubase se 3.0 educational edition. manufacturer: . price: 145.73. Are Product A and Product B the same? Yes\n\nProduct A is title: steinberg cubase 4 digital audio software. manufacturer: steinberg. price: 999.99. Product B is title: sony media software sound forge 9 digital audio production suite music production software. manufacturer: . price: 299.95. Are Product A and Product B the same? No\n\nProduct A is title: steinberg cubase 4 digital audio software. manufacturer: steinberg. price: 999.99. Product B is title: steinberg wavelab studio 6 audio editing software competitive crossgrade music production software. manufacturer: . price: 199.95. Are Product A and Product B the same? No\n\nProduct A is title: steinberg nuendo 3 digital audio software. manufacturer: steinberg. price: 2499.0. Product B is title: steinberg cubase studio 4 software competitive crossgrade music production software. manufacturer: . price: 199.99. Are Product A and Product B the same? No\n\nProduct A is title: steinberg cubase studio 4. manufacturer: steinberg. price: 499.99. Product B is title: steinberg studio case 2 music production system educational discount music production software. manufacturer: . price: 164.95. Are Product A and Product B the same? No\n\nProduct A is title: steinberg cubase studio 4. manufacturer: steinberg. price: 499.99. Product B is title: steinberg cubase studio 4 software music production software. manufacturer: . price: 399.99. Are Product A and Product B the same? Yes\n\nProduct A is title: steinberg cubase 4 digital audio software. manufacturer: steinberg. price: 999.99. Product B is title: steinberg cubase 4 software music production software. manufacturer: . price: 799.99. Are Product A and Product B the same? Yes\n\nProduct A is title: steinberg sequel ( pc or mac ) music creation & performance software. manufacturer: steinberg. price: 129.0. Product B is title: steinberg sequel music software music production software. manufacturer: . price: 99.99. Are Product A and Product B the same? Yes\n\n", "intuit": "Product A is title: quickbooks pro 2007 small business financial software. manufacturer: intuit. price: 199.95. Product B is title: intuit quickbooks pro 2007 software for windows tax & finance software. manufacturer: . price: 179.95. Are Product A and Product B the same? Yes\n\nProduct A is title: quicken 2007 deluxe. manufacturer: intuit. price: 59.95. Product B is title: intuit quicken deluxe 2007 software for windows tax & finance software. manufacturer: . price: 54.95. Are Product A and Product B the same? Yes\n\nProduct A is title: quickbooks pro 2007 for mac ( mac ). manufacturer: intuit. price: 199.95. Product B is title: quickbooks ( r ) premier 2003. manufacturer: . price: 499.99. Are Product A and Product B the same? No\n\nProduct A is title: quickbooks premier professional services edition 2007. manufacturer: intuit. price: 399.95. Product B is title: intuit quickbooks premier professional services edition 2007 software win tax & finance software. manufacturer: . price: 349.95. Are Product A and Product B the same? Yes\n\nProduct A is title: quicken 2007 basic. manufacturer: intuit. price: 29.95. Product B is title: intuit quicken basic 2007 software for windows tax & finance software. manufacturer: . price: 29.95. Are Product A and Product B the same? Yes\n\nProduct A is title: quickbooks premier professional services edition 2007. manufacturer: intuit. price: 399.95. Product B is title: adobe production premium cs3 software suite for windows professional editing software. manufacturer: . price: 1698.95. Are Product A and Product B the same? No\n\nProduct A is title: quickbooks credit card processing kit 3.0. manufacturer: intuit. price: 39.95. Product B is title: quickbooks ( r ). manufacturer: . price: 199.99. Are Product A and Product B the same? No\n\nProduct A is title: quickbooks pro 2007 for mac ( mac ). manufacturer: intuit. price: 199.95. Product B is title: intuit quickbooks pro 2007 software for mac finance software. manufacturer: . price: 189.95. Are Product A and Product B the same? Yes\n\nProduct A is title: quicken 2007 premier. manufacturer: intuit. price: 79.95. Product B is title: quickbooks pro 2007 3 user. manufacturer: intuit. price: 398.99. Are Product A and Product B the same? No\n\nProduct A is title: quicken 2007 premier. manufacturer: intuit. price: 79.95. Product B is title: intuit inc quickbooks premier edition 2007 5 user. manufacturer: . price: 1309.31. Are Product A and Product B the same? No\n\n", "nolo press": "Product A is title: quicken legal business pro 2007. manufacturer: nolo press. price: 79.99. Product B is title: quicken home and business 2007 ( pc ) intuit. manufacturer: . price: 89.99. Are Product A and Product B the same? No\n\nProduct A is title: quicken legal business pro 2007. manufacturer: nolo press. price: 79.99. Product B is title: quicken ( r ) for mac 2007. manufacturer: . price: 69.99. Are Product A and Product B the same? No\n\nProduct A is title: quicken legal business pro 2007. manufacturer: nolo press. price: 79.99. Product B is title: business plan pro ( r ) 2004. manufacturer: . price: 99.87. Are Product A and Product B the same? No\n\nProduct A is title: quicken legal business pro 2007. manufacturer: nolo press. price: 79.99. Product B is title: nolo quicken legal business pro 2007. manufacturer: . price: 67.94. Are Product A and Product B the same? Yes\n\nProduct A is title: quicken legal business pro 2007. manufacturer: nolo press. price: 79.99. Product B is title: quicken 2007 deluxe win cd. manufacturer: intuit. price: 57.99. Are Product A and Product B the same? No\n\nProduct A is title: quicken legal business pro 2007. manufacturer: nolo press. price: 79.99. Product B is title: quickbooks pro 2007 3 user. manufacturer: intuit. price: 398.99. Are Product A and Product B the same? No\n\n", "apple": "Product A is title: apple logic pro 7.2 upgrade. manufacturer: apple. price: 299.0. Product B is title: apple/emagic logic pro 7.2 software upgrade music production software. manufacturer: . price: 259.95. Are Product A and Product B the same? Yes\n\nProduct A is title: apple iwork '06 ( mac dvd ). manufacturer: apple. price: 79.0. Product B is title: apple iwork '06. manufacturer: . price: 79.99. Are Product A and Product B the same? Yes\n\nProduct A is title: apple . mac 4.0 online service family pack. manufacturer: apple. price: 179.95. Product B is title: apple . mac 4.0 family pack ( new or renewal ). manufacturer: . price: 145.99. Are Product A and Product B the same? Yes\n\nProduct A is title: logic pro 6 upgrade from logic platinum/gold 5 & 6. manufacturer: apple. price: 199.99. Product B is title: apple software t9161ll/a logic platinum / gold 5 & 6 to logic pro 6 update. manufacturer: apple software. price: 149.0. Are Product A and Product B the same? Yes\n\nProduct A is title: apple final cut studio 2 ( mac ). manufacturer: apple. price: 1299.0. Product B is title: ma888z/a final cut studio v. 2 version upgrade package 1 seat dvd mac. manufacturer: . price: 460.7. Are Product A and Product B the same? No\n\nProduct A is title: apple final cut studio 2 ( mac ). manufacturer: apple. price: 1299.0. Product B is title: apple software m9372z/a final cut express 2.0 upgrade. manufacturer: apple software. price: 79.99. Are Product A and Product B the same? No\n\nProduct A is title: apple xsan ( mac ). manufacturer: apple. price: . Product B is title: apple ilife 06. manufacturer: . price: 70.47. Are Product A and Product B the same? No\n\nProduct A is title: apple final cut studio 2 ( mac ). manufacturer: apple. price: 1299.0. Product B is title: adobe flash cs3 professional ( mac ). manufacturer: . price: 699.0. Are Product A and Product B the same? No\n\nProduct A is title: apple remote desktop 2.2 10 client ( old version ). manufacturer: apple. price: 299.0. Product B is title: apple software m9953z/a remote desktop 2.2 10 client. manufacturer: apple software. price: 294.99. Are Product A and Product B the same? Yes\n\nProduct A is title: apple final cut studio 2 ( mac ). manufacturer: apple. price: 1299.0. Product B is title: adobe flash cs3 professional ( mac ). manufacturer: . price: 699.0. Are Product A and Product B the same? No\n\n", "punch ! software": "Product A is title: punch ! professional home design platinum v10. manufacturer: punch ! software. price: 99.0. Product B is title: punch software professional home design platinum version 10. manufacturer: . price: 99.99. Are Product A and Product B the same? Yes\n\nProduct A is title: home design architect series 3000 v10 .0. manufacturer: punch ! software. price: 149.95. Product B is title: punch software 83100 punch ! home design architectural series 3000 v 10 ( win 98 me nt 2000 xp ). manufacturer: . price: 124.97. Are Product A and Product B the same? Yes\n\nProduct A is title: punch 5 in 1 home design. manufacturer: punch ! software. price: 39.99. Product B is title: punch software 41100 punch ! home design architectural series 18. manufacturer: punch software. price: 118.99. Are Product A and Product B the same? No\n\nProduct A is title: punch ! home design architectural series 18. manufacturer: punch ! software. price: 129.99. Product B is title: punch software 17100 punch ! super home suite v. 3.0 complete product architectural 1 user complete productcomplete productstandard english pc. manufacturer: . price: 45.97. Are Product A and Product B the same? No\n\nProduct A is title: punch ! home design architectural series 18. manufacturer: punch ! software. price: 129.99. Product B is title: punch software 17100 punch ! super home suite v. 3.0 complete product architectural 1 user complete productcomplete productstandard english pc. manufacturer: . price: 45.97. Are Product A and Product B the same? No\n\nProduct A is title: punch ! professional home design platinum v10. manufacturer: punch ! software. price: 99.0. Product B is title: punch software 41100 punch ! home design architectural series 18. manufacturer: punch software. price: 118.99. Are Product A and Product B the same? No\n\nProduct A is title: punch ! professional home design platinum v10. manufacturer: punch ! software. price: 99.0. Product B is title: punch software 41100 punch ! home design architectural series 18. manufacturer: punch software. price: 118.99. Are Product A and Product B the same? No\n\nProduct A is title: punch ! home design architectural series 18. manufacturer: punch ! software. price: 129.99. Product B is title: punch software 42100 punch ! home design architectural series 18 ( small box ). manufacturer: punch software. price: 119.99. Are Product A and Product B the same? Yes\n\nProduct A is title: punch ! master landscape professional & home design v 10.0. manufacturer: punch ! software. price: 69.95. Product B is title: punch software master landscape professional & home design v 10.0. manufacturer: . price: 69.99. Are Product A and Product B the same? Yes\n\nProduct A is title: punch ! super home suite. manufacturer: punch ! software. price: 49.99. Product B is title: punch software 38100 punch ! super home suite ( small box ). manufacturer: punch software. price: 45.99. Are Product A and Product B the same? Yes\n\n", "aspyr media": "Product A is title: star wars empire at war ( intel only macs ). manufacturer: aspyr media. price: 49.99. Product B is title: aspyr star wars empire at war. manufacturer: . price: 47.99. Are Product A and Product B the same? Yes\n\nProduct A is title: rollercoaster tycoon 3 : soaked. manufacturer: aspyr media. price: 19.99. Product B is title: rollercoaster tycoon 3 soaked. manufacturer: . price: 23.99. Are Product A and Product B the same? Yes\n\nProduct A is title: call of duty 2 dvd-rom ( mac ). manufacturer: aspyr media. price: 49.99. Product B is title: fallout collection ( dvd-rom ). manufacturer: . price: 24.49. Are Product A and Product B the same? No\n\nProduct A is title: sims 2 seasons expansion pack. manufacturer: aspyr media. price: 34.99. Product B is title: age of empires iii : warchiefs expansion pack. manufacturer: . price: 34.95. Are Product A and Product B the same? No\n\nProduct A is title: sims 2 nightlife expansion pack. manufacturer: aspyr media. price: 34.99. Product B is title: aspyr media inc sims 2 nightlife. manufacturer: . price: 33.21. Are Product A and Product B the same? Yes\n\nProduct A is title: lego star wars ( dvd ) ( mac ). manufacturer: aspyr media. price: 29.99. Product B is title: xbox : lego star wars. manufacturer: . price: 19.95. Are Product A and Product B the same? No\n\nProduct A is title: rollercoaster tycoon 3 ( mac ). manufacturer: aspyr media. price: 49.99. Product B is title: aspyr roller coaster tycoon 3 mac os x. manufacturer: . price: 45.99. Are Product A and Product B the same? Yes\n\nProduct A is title: rollercoaster tycoon 3 ( mac ). manufacturer: aspyr media. price: 49.99. Product B is title: roller coaster tycoon 3 for pc. manufacturer: . price: 29.99. Are Product A and Product B the same? No\n\nProduct A is title: sims 2 pets expansion pack. manufacturer: aspyr media. price: 34.99. Product B is title: the sims expansion collection vol 1. manufacturer: . price: 8.72. Are Product A and Product B the same? No\n\nProduct A is title: civilization iv warlords ( mac ). manufacturer: aspyr media. price: 29.99. Product B is title: civilization iv warlords expansion for mac. manufacturer: . price: 29.99. Are Product A and Product B the same? Yes\n\n", "final draft": "Product A is title: final draft 7/syd field 's screenwriting workshop dvd bundle. manufacturer: final draft. price: 249.99. Product B is title: final-draft final draft 7 screen writing software mac/win screen writing software. manufacturer: . price: 249.95. Are Product A and Product B the same? No\n\nProduct A is title: final draft 7 professional scriptwriting win/mac. manufacturer: final draft. price: 289.99. Product B is title: final-draft scriptwriter 's suite 2.5 screenwriting software mac/win screen writing software. manufacturer: . price: 279.95. Are Product A and Product B the same? No\n\nProduct A is title: final draft 7 professional scriptwriting win/mac. manufacturer: final draft. price: 289.99. Product B is title: final-draft final draft 7.0 with syd fields workshop bundle screen writing software. manufacturer: . price: 247.95. Are Product A and Product B the same? No\n\nProduct A is title: final draft 7 professional scriptwriting win/mac. manufacturer: final draft. price: 289.99. Product B is title: final-draft final draft av 2.5 screenwriting software mac/win screen writing software. manufacturer: . price: 199.95. Are Product A and Product B the same? No\n\nProduct A is title: final draft 7/syd field 's screenwriting workshop dvd bundle. manufacturer: final draft. price: 249.99. Product B is title: final-draft final draft 7.0 with syd fields workshop bundle screen writing software. manufacturer: . price: 247.95. Are Product A and Product B the same? Yes\n\nProduct A is title: final draft 7/syd field 's screenwriting workshop dvd bundle. manufacturer: final draft. price: 249.99. Product B is title: final-draft final draft av 2.5 screenwriting software mac/win screen writing software. manufacturer: . price: 199.95. Are Product A and Product B the same? No\n\n", "sony-pictures-digital-entertainment": "Product A is title: sony sound forge audio studio 8. manufacturer: sony-pictures-digital-entertainment. price: 69.99. Product B is title: sony creative software sound forge audio studio v. 8.0 complete product. manufacturer: . price: 55.29. Are Product A and Product B the same? No\n\nProduct A is title: cinescore professional soundtrack edition. manufacturer: sony-pictures-digital-entertainment. price: 249.95. Product B is title: microsoft windows xp professional edition ( upgrade ). manufacturer: . price: 199.99. Are Product A and Product B the same? No\n\nProduct A is title: sony vegas 6 + dvd [ old version ]. manufacturer: sony-pictures-digital-entertainment. price: 899.95. Product B is title: vegas + dvd sony. manufacturer: sony-pictures-digital-entertainment. price: 899.99. Are Product A and Product B the same? Yes\n\nProduct A is title: cinescore professional soundtrack edition. manufacturer: sony-pictures-digital-entertainment. price: 249.95. Product B is title: sony media software cinescore soundtrack creation software audio software for video. manufacturer: sony-pictures-digital-entertainment. price: 179.95. Are Product A and Product B the same? Yes\n\nProduct A is title: sony sound forge audio studio 8. manufacturer: sony-pictures-digital-entertainment. price: 69.99. Product B is title: sony media software cinescore soundtrack creation software audio software for video. manufacturer: sony-pictures-digital-entertainment. price: 179.95. Are Product A and Product B the same? No\n\nProduct A is title: sony sound forge audio studio 8. manufacturer: sony-pictures-digital-entertainment. price: 69.99. Product B is title: sony media software sound forge 9 digital audio production suite music production software. manufacturer: . price: 299.95. Are Product A and Product B the same? No\n\nProduct A is title: sony sound effects series vol 1 through 10. manufacturer: sony-pictures-digital-entertainment. price: . Product B is title: madison media software ssda3000 sony sound effects series vol 1 through 10. manufacturer: sony-pictures-digital-entertainment. price: 370.97. Are Product A and Product B the same? Yes\n\nProduct A is title: sony acid pro 6 professional music workstation. manufacturer: sony-pictures-digital-entertainment. price: 499.95. Product B is title: diskeeper corporation diskeeper 2007 professional edition complete package 1 workstation academic. manufacturer: . price: 29.99. Are Product A and Product B the same? No\n\nProduct A is title: upgrade acid pro 6 boxed. manufacturer: sony-pictures-digital-entertainment. price: 305.18. Product B is title: sony media software acid pro 6 software upgrade from acid pro 5 music production software. manufacturer: sony-pictures-digital-entertainment. price: 129.95. Are Product A and Product B the same? Yes\n\nProduct A is title: sony acid music studio. manufacturer: sony-pictures-digital-entertainment. price: 69.99. Product B is title: acid music studio 6. manufacturer: sony-pictures-digital-entertainment. price: 59.95. Are Product A and Product B the same? Yes\n\n", "phantom efx": "Product A is title: reel deal casino shuffle master edition. manufacturer: phantom efx. price: 19.99. Product B is title: phantom efx reel deal casino shuffle master edition. manufacturer: . price: 17.24. Are Product A and Product B the same? Yes\n\nProduct A is title: reel deal slots 2.0 for pc. manufacturer: phantom efx. price: 9.99. Product B is title: phantom efx reel deal slots mystic forest. manufacturer: . price: 17.24. Are Product A and Product B the same? No\n\nProduct A is title: reel deal casino shuffle master edition. manufacturer: phantom efx. price: 19.99. Product B is title: mastercook deluxe low carb edition. manufacturer: . price: 17.9. Are Product A and Product B the same? No\n\nProduct A is title: reel deal casino shuffle master edition. manufacturer: phantom efx. price: 19.99. Product B is title: reel deal casino championship edition ( win 98 me nt 2000 xp ). manufacturer: . price: 19.95. Are Product A and Product B the same? No\n\nProduct A is title: reel deal slots 2.0 for pc. manufacturer: phantom efx. price: 9.99. Product B is title: phantom efx reel deal slots v 2.0. manufacturer: . price: 7.36. Are Product A and Product B the same? Yes\n\nProduct A is title: reel deal slots 2.0 for pc. manufacturer: phantom efx. price: 9.99. Product B is title: phantom efx reel deal slots v 2.0. manufacturer: . price: 7.36. Are Product A and Product B the same? Yes\n\nProduct A is title: reel deal casino quest. manufacturer: phantom efx. price: 19.99. Product B is title: reel deal casino championship edition ( win 98 me nt 2000 xp ). manufacturer: . price: 19.95. Are Product A and Product B the same? No\n\nProduct A is title: reel deal casino championship edition. manufacturer: phantom efx. price: 19.99. Product B is title: reel deal casino championship edition ( win 98 me nt 2000 xp ). manufacturer: . price: 19.95. Are Product A and Product B the same? Yes\n\nProduct A is title: reel deal casino championship edition. manufacturer: phantom efx. price: 19.99. Product B is title: reel deal casino quest ( win 95 98 me 2000 xp ). manufacturer: . price: 19.95. Are Product A and Product B the same? No\n\n", "panda software": "Product A is title: panda antivirus 2007. manufacturer: panda software. price: 29.95. Product B is title: symantec norton anti-virus 2007 windows. manufacturer: . price: 39.99. Are Product A and Product B the same? No\n\nProduct A is title: panda software antivirus & firewall 2008 windows. manufacturer: panda software. price: . Product B is title: intuit quicken home and business 2007 software for windows tax & finance software. manufacturer: . price: 89.95. Are Product A and Product B the same? No\n\nProduct A is title: panda antivirus 2007. manufacturer: panda software. price: 29.95. Product B is title: norton antivirus 2007 3 user pack. manufacturer: . price: 49.95. Are Product A and Product B the same? No\n\nProduct A is title: panda internet security 2007 3-user. manufacturer: panda software. price: 69.95. Product B is title: panda software panda internet security 2007. manufacturer: . price: 69.99. Are Product A and Product B the same? Yes\n\nProduct A is title: panda antivirus 2007. manufacturer: panda software. price: 29.95. Product B is title: sos aggregation company panda antivirus 2007. manufacturer: . price: 23.72. Are Product A and Product B the same? Yes\n\nProduct A is title: panda internet security 2007 3-user. manufacturer: panda software. price: 69.95. Product B is title: norton internet security 2008 10 user. manufacturer: . price: 199.99. Are Product A and Product B the same? No\n\nProduct A is title: panda antivirus 2007. manufacturer: panda software. price: 29.95. Product B is title: panda software panda internet security 2007. manufacturer: . price: 69.99. Are Product A and Product B the same? No\n\n", "macsoft": "Product A is title: rise of nations : gold ( mac ). manufacturer: macsoft. price: 49.99. Product B is title: destineer rise of nations gold edition. manufacturer: . price: 45.99. Are Product A and Product B the same? Yes\n\nProduct A is title: halo ( mac ). manufacturer: macsoft. price: 29.99. Product B is title: adobe illustrator cs3 ( mac ). manufacturer: . price: 599.0. Are Product A and Product B the same? No\n\nProduct A is title: railroad tycoon 3 ( mac ). manufacturer: macsoft. price: 19.99. Product B is title: destineer railroad tycoon 3. manufacturer: . price: 21.99. Are Product A and Product B the same? Yes\n\nProduct A is title: halo ( mac ). manufacturer: macsoft. price: 29.99. Product B is title: apple appleworks 6.2.9 ( mac ). manufacturer: . price: 79.99. Are Product A and Product B the same? No\n\nProduct A is title: rise of nations : gold ( mac ). manufacturer: macsoft. price: 49.99. Product B is title: microsoft rise of nations : rise of legends for pc. manufacturer: . price: 16.99. Are Product A and Product B the same? No\n\n", "hewlett-packard - ( consumables )": "Product A is title: hp dss software v. 4.0 complete package t1936aa ua0 ). manufacturer: hewlett-packard - ( consumables ). price: 630.36. Product B is title: firewall feature set complete package. manufacturer: . price: 506.58. Are Product A and Product B the same? No\n\nProduct A is title: hp dss software v. 4.0 complete package t1936aa ua0 ). manufacturer: hewlett-packard - ( consumables ). price: 630.36. Product B is title: vp-wf04-00s1 kofax virtualrescan plus upgrade kit v. 4.0 complete package 1 user. manufacturer: . price: 879.93. Are Product A and Product B the same? No\n\nProduct A is title: hp dss software v. 4.0 complete package t1936aa ua0 ). manufacturer: hewlett-packard - ( consumables ). price: 630.36. Product B is title: 9002 refviz v. 2 complete package 1 user mac. manufacturer: . price: 160.88. Are Product A and Product B the same? No\n\nProduct A is title: hp sp linux we 50 lic/cd 3.0 c t3586a ). manufacturer: hewlett-packard - ( consumables ). price: . Product B is title: sp linux we 50 lic/cd 3.0 c. manufacturer: . price: 69216.95. Are Product A and Product B the same? Yes\n\nProduct A is title: hp dss software v. 4.0 complete package t1936aa ua0 ). manufacturer: hewlett-packard - ( consumables ). price: 630.36. Product B is title: prw photo explosion deluxe v. 3.0 complete package 1 user cd win. manufacturer: . price: 47.6. Are Product A and Product B the same? No\n\nProduct A is title: hp dss software v. 4.0 complete package t1936aa ua0 ). manufacturer: hewlett-packard - ( consumables ). price: 630.36. Product B is title: sony creative software sound forge audio studio v. 8.0 complete product. manufacturer: . price: 46.47. Are Product A and Product B the same? No\n\nProduct A is title: hp dss software v. 4.0 complete package t1936aa ua0 ). manufacturer: hewlett-packard - ( consumables ). price: 630.36. Product B is title: hp t1936aa uao digital sending software 4.0 ( 10 device license ). manufacturer: hp. price: 369.99. Are Product A and Product B the same? Yes\n\nProduct A is title: hp sp linux we 50 lic/cd 3.0 c t3586a ). manufacturer: hewlett-packard - ( consumables ). price: . Product B is title: sp linux we 50 lic/cd 3.0 c. manufacturer: hewlett-packard - ( consumables ). price: 69216.95. Are Product A and Product B the same? Yes\n\n", "global-software-publishing": "Product A is title: studyworks ! teaching pro : middle school math & science ( win/mac ). manufacturer: global-software-publishing. price: 29.99. Product B is title: middle school life science classification & ecology ( pc/mac ) fogware. manufacturer: . price: 9.99. Are Product A and Product B the same? No\n\nProduct A is title: dk smart steps 3rd grade. manufacturer: global-software-publishing. price: 9.99. Product B is title: smart steps 4th grade ( jc ). manufacturer: dorling-kindersley-multimedia - ( dk ). price: 11.95. Are Product A and Product B the same? No\n\nProduct A is title: rainbow fish and the whale ( win/mac ). manufacturer: global-software-publishing. price: 9.99. Product B is title: world of rainbow fish ( a9839m2h ). manufacturer: global-software-publishing. price: 19.19. Are Product A and Product B the same? No\n\nProduct A is title: world of rainbow fish. manufacturer: global-software-publishing. price: 19.99. Product B is title: world of rainbow fish ( a9839m2h ). manufacturer: global-software-publishing. price: 19.19. Are Product A and Product B the same? Yes\n\nProduct A is title: kakuro puzzle addict unlimited edition. manufacturer: global-software-publishing. price: 9.99. Product B is title: global software a2960jcw sudoku puzzle addict : unlimited edition win 98 me 2000 xp. manufacturer: . price: 8.79. Are Product A and Product B the same? No\n\nProduct A is title: rainbow fish and the whale ( win/mac ). manufacturer: global-software-publishing. price: 9.99. Product B is title: global software a2649m2h rainbow fish & the whale win 95 98 me xp/mac 8.6-9.x ( classic ) x v10 .1 or higher. manufacturer: global-software-publishing. price: 8.79. Are Product A and Product B the same? Yes\n\nProduct A is title: kakuro puzzle addict unlimited edition. manufacturer: global-software-publishing. price: 9.99. Product B is title: global software a7163jcw kakuro puzzle addict unlimited edition dvd movie. manufacturer: global-software-publishing. price: 8.79. Are Product A and Product B the same? Yes\n\nProduct A is title: world of rainbow fish. manufacturer: global-software-publishing. price: 19.99. Product B is title: world of rainbow fish ( a9839m2h ). manufacturer: global-software-publishing. price: 19.19. Are Product A and Product B the same? Yes\n\nProduct A is title: i love the usa. manufacturer: global-software-publishing. price: 9.99. Product B is title: global software a1055 i love the usa. manufacturer: global software. price: 11.99. Are Product A and Product B the same? Yes\n\nProduct A is title: dk smart steps 3rd grade. manufacturer: global-software-publishing. price: 9.99. Product B is title: smart steps 4th grade ( jc ). manufacturer: dorling-kindersley-multimedia - ( dk ). price: 11.95. Are Product A and Product B the same? No\n\n", "fogware publishing": "Product A is title: poetry fiction and drama ( win/mac ) ( jewel case ). manufacturer: fogware publishing. price: 9.99. Product B is title: total 3d home deluxe ( jewel case ) ( jce-th6 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: geometry ( win/mac ) ( jewel case ). manufacturer: fogware publishing. price: 9.99. Product B is title: geometry ( win/mac ) ( jewel case ) ( 10158 ). manufacturer: . price: 9.59. Are Product A and Product B the same? Yes\n\nProduct A is title: geometry ( win/mac ) ( jewel case ). manufacturer: fogware publishing. price: 9.99. Product B is title: cross stitch design studio ( jewel case ) ( 8006 ). manufacturer: m-and-r-technologies. price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: student home learning system for pc/mac. manufacturer: fogware publishing. price: 139.99. Product B is title: math learning system 2007. manufacturer: . price: 15.95. Are Product A and Product B the same? No\n\nProduct A is title: the universe ( win/mac ) ( jewel case ). manufacturer: fogware publishing. price: 9.99. Product B is title: elementary school space the universe ( pc/mac ) fogware. manufacturer: . price: 9.99. Are Product A and Product B the same? Yes\n\nProduct A is title: foreign policy & reform ( win/mac ) ( jewel case ). manufacturer: fogware publishing. price: 9.99. Product B is title: fogware publishing 10356 high school us history 2 foreign policy & reform. manufacturer: . price: 6.84. Are Product A and Product B the same? Yes\n\nProduct A is title: english literature basics ( win/mac ) ( jewel case ). manufacturer: fogware publishing. price: 9.99. Product B is title: elementary school english literature basics ( pc/mac ) fogware. manufacturer: . price: 9.99. Are Product A and Product B the same? Yes\n\nProduct A is title: student home learning system for pc/mac. manufacturer: fogware publishing. price: 139.99. Product B is title: student home learning system. manufacturer: . price: 109.99. Are Product A and Product B the same? Yes\n\nProduct A is title: history ( dvd ) ( pc & mac ). manufacturer: fogware publishing. price: . Product B is title: starry night galaxy explorer ( pc & mac ) ( snge-5-c1u ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: places ( pc & mac ). manufacturer: fogware publishing. price: 29.99. Product B is title: cosmi imusictools ( pc ). manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\n", "makemusic !": "Product A is title: emedia print music 2006 win/mac. manufacturer: makemusic !. price: 69.99. Product B is title: emedia music corp emedia piano & keyboard method. manufacturer: . price: 49.21. Are Product A and Product B the same? No\n\n", "sibelius-software-ltd .": "Product A is title: sibelius 4 professional edition. manufacturer: sibelius-software-ltd .. price: 599.99. Product B is title: zipmagic personal edition. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\nProduct A is title: sibelius 5 professional edition. manufacturer: sibelius-software-ltd .. price: 599.0. Product B is title: systemsuite 6 ( tm ) professional mini. manufacturer: . price: 49.99. Are Product A and Product B the same? No\n\nProduct A is title: sibelius 5 professional edition. manufacturer: sibelius-software-ltd .. price: 599.0. Product B is title: avid xpress pro video editing software for mac and windows professional editing software. manufacturer: . price: 1449.0. Are Product A and Product B the same? No\n\nProduct A is title: sibelius compass. manufacturer: sibelius-software-ltd .. price: 159.99. Product B is title: sibelius compass music composition teaching software music production software. manufacturer: sibelius-software-ltd .. price: 119.95. Are Product A and Product B the same? Yes\n\nProduct A is title: sibelius 4 professional edition. manufacturer: sibelius-software-ltd .. price: 599.99. Product B is title: sibelius sibelius 5 software music production software. manufacturer: sibelius-software-ltd .. price: 449.95. Are Product A and Product B the same? No\n\nProduct A is title: sibelius 4 professional edition. manufacturer: sibelius-software-ltd .. price: 599.99. Product B is title: sibelius sibelius 4 software music production software. manufacturer: sibelius-software-ltd .. price: 449.95. Are Product A and Product B the same? Yes\n\nProduct A is title: sibelius 5 professional edition. manufacturer: sibelius-software-ltd .. price: 599.0. Product B is title: sibelius sibelius 5 software music production software. manufacturer: sibelius-software-ltd .. price: 449.95. Are Product A and Product B the same? Yes\n\nProduct A is title: sibelius 4 professional edition. manufacturer: sibelius-software-ltd .. price: 599.99. Product B is title: sibelius sibelius 5 software music production software. manufacturer: sibelius-software-ltd .. price: 449.95. Are Product A and Product B the same? No\n\nProduct A is title: sibelius 5 educational edition. manufacturer: sibelius-software-ltd .. price: 329.0. Product B is title: sibelius sibelius 5 software educational discount music production software. manufacturer: sibelius-software-ltd .. price: 248.95. Are Product A and Product B the same? Yes\n\n", "imsi design": "Product A is title: instant architect design suite. manufacturer: imsi design. price: 29.99. Product B is title: punch software pro home design suite. manufacturer: . price: 61.8. Are Product A and Product B the same? No\n\nProduct A is title: instant architect design suite. manufacturer: imsi design. price: 29.99. Product B is title: encore inc 3d home architect home design deluxe version 9. manufacturer: . price: 25.49. Are Product A and Product B the same? No\n\nProduct A is title: instant architect design suite. manufacturer: imsi design. price: 29.99. Product B is title: instant home design 1 ( jc ). manufacturer: . price: 12.9. Are Product A and Product B the same? No\n\nProduct A is title: clip art and more 250 000 ( jewel case ). manufacturer: imsi design. price: . Product B is title: poetry fiction and drama ( win/mac ) ( jewel case ) ( 10172 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: clip art and more 250 000 ( jewel case ). manufacturer: imsi design. price: . Product B is title: geometry ( win/mac ) ( jewel case ) ( 10158 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: clip art and more 250 000 ( jewel case ). manufacturer: imsi design. price: . Product B is title: clipart & more 250000 ( jc ). manufacturer: . price: 9.9. Are Product A and Product B the same? Yes\n\n", "encore software": "Product A is title: reader rabbit learning system 1st grade. manufacturer: encore software. price: 29.99. Product B is title: dr. seuss reading learning system 2008 encore. manufacturer: . price: 19.95. Are Product A and Product B the same? No\n\nProduct A is title: mavis beacon typing 17 ( win/mac ). manufacturer: encore software. price: 19.99. Product B is title: broderbund mavis beacon teaches typing standard17. manufacturer: . price: 22.99. Are Product A and Product B the same? No\n\nProduct A is title: aquarium 3-in-one collection ( win/mac ). manufacturer: encore software. price: 19.99. Product B is title: aquarium 3-in-one limited edition ( win 95 98 me 2000 xp/mac 10.1 or higher ). manufacturer: . price: 19.95. Are Product A and Product B the same? Yes\n\nProduct A is title: midway arcade treasures ( jewel case ). manufacturer: encore software. price: 9.99. Product B is title: cross stitch design studio ( jewel case ) ( 8006 ). manufacturer: m-and-r-technologies. price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: printmaster gold v 17.0. manufacturer: encore software. price: 19.99. Product B is title: print master platinum v17. manufacturer: . price: 29.9. Are Product A and Product B the same? No\n\nProduct A is title: spongebob squarepants typing 2008 win/mac. manufacturer: encore software. price: 19.95. Product B is title: encore inc spongebob squarepants typing fun. manufacturer: . price: 17.1. Are Product A and Product B the same? Yes\n\nProduct A is title: jewels of cleopatra. manufacturer: encore software. price: 19.95. Product B is title: encore software 11861 jewels of cleopatra win xp vista. manufacturer: . price: 18.97. Are Product A and Product B the same? Yes\n\nProduct A is title: 3d home architect landscape designer v8. manufacturer: encore software. price: 29.99. Product B is title: encore inc 3d home architect home design deluxe version 9. manufacturer: . price: 25.49. Are Product A and Product B the same? No\n\nProduct A is title: clickart fonts ( 2006 ) by encore. manufacturer: encore software. price: 19.99. Product B is title: encore software 10463 clickart fonts ( 2006 ) by encore. manufacturer: . price: 17.97. Are Product A and Product B the same? Yes\n\nProduct A is title: hasbro family game collection 2006. manufacturer: encore software. price: 19.99. Product B is title: encore software 11273 hasbro family collection 2006 sb cs by hasbro. manufacturer: . price: 17.97. Are Product A and Product B the same? Yes\n\n", "broderbund": "Product A is title: mavis beacon teaches typing 16. manufacturer: broderbund. price: 19.99. Product B is title: mavis beacon teaches typing 16 deluxe. manufacturer: . price: 34.9. Are Product A and Product B the same? No\n\nProduct A is title: mavis beacon teaches typing deluxe 16. manufacturer: broderbund. price: 39.99. Product B is title: mavis beacon teaches typing v17 deluxe pc/mac. manufacturer: . price: 29.9. Are Product A and Product B the same? No\n\nProduct A is title: mavis beacon teaches typing 16. manufacturer: broderbund. price: 19.99. Product B is title: mavis beacon teaches typing 17 standard ( pc/mac ) encore. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\nProduct A is title: the printshop 20 professional publisher. manufacturer: broderbund. price: 69.99. Product B is title: the print shop ( r ) 22. manufacturer: . price: 29.99. Are Product A and Product B the same? No\n\nProduct A is title: the printshop 20. manufacturer: broderbund. price: 29.99. Product B is title: the printshop v22 deluxe. manufacturer: . price: 29.9. Are Product A and Product B the same? No\n\nProduct A is title: mavis beacon teaches typing 16. manufacturer: broderbund. price: 19.99. Product B is title: mavis beacon 16. manufacturer: . price: 17.9. Are Product A and Product B the same? Yes\n\n", "encore": "Product A is title: hoyle kids ' games jc. manufacturer: encore. price: 9.99. Product B is title: hoyle card games. manufacturer: . price: 9.95. Are Product A and Product B the same? No\n\nProduct A is title: the print shop 22 deluxe. manufacturer: encore. price: 49.99. Product B is title: encore software 11941 the print shop zoom. manufacturer: . price: 17.97. Are Product A and Product B the same? No\n\nProduct A is title: hoyle casino ( 2007 ). manufacturer: encore. price: 19.99. Product B is title: hoyle ( r ) card games 2007. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\nProduct A is title: chicken hunter wanted jc. manufacturer: encore. price: 9.99. Product B is title: encore software 10760 chicken hunter wanted win 98 me 2000 xp. manufacturer: . price: 7.69. Are Product A and Product B the same? Yes\n\nProduct A is title: hoyle : classic collection 2006. manufacturer: encore. price: 19.99. Product B is title: encore software 11052 hoyle : classic collection 2006 win 98 me 2000 xp. manufacturer: . price: 18.97. Are Product A and Product B the same? Yes\n\nProduct A is title: tlc arthur 's kindergarten learning system 2008. manufacturer: encore. price: 19.99. Product B is title: encore software 13871 tlc reader rabbit 1st grade learning system 2008. manufacturer: . price: 17.97. Are Product A and Product B the same? No\n\nProduct A is title: hoyle : classic collection 2006. manufacturer: encore. price: 19.99. Product B is title: encore software 11052 hoyle : classic collection 2006 win 98 me 2000 xp. manufacturer: . price: 18.97. Are Product A and Product B the same? Yes\n\nProduct A is title: monopoly here & now. manufacturer: encore. price: 19.95. Product B is title: encore software 11180 monopoly here & now win 98 me 2000 xp. manufacturer: . price: 18.97. Are Product A and Product B the same? Yes\n\nProduct A is title: hoyle kids ' games jc. manufacturer: encore. price: 9.99. Product B is title: hoyle card games. manufacturer: . price: 9.95. Are Product A and Product B the same? No\n\nProduct A is title: the print shop 22 deluxe. manufacturer: encore. price: 49.99. Product B is title: the printshop v22 deluxe. manufacturer: . price: 29.9. Are Product A and Product B the same? Yes\n\n", "corel": "Product A is title: intervideo dvd copy 5 platinum. manufacturer: corel. price: 49.99. Product B is title: corel intervideo dvd copy 5 platinum software for windows authoring software. manufacturer: . price: 49.95. Are Product A and Product B the same? Yes\n\nProduct A is title: corel dvd moviefactory 6.0 plus. manufacturer: corel. price: 79.99. Product B is title: corel ulead dvd moviefactory 6 software for windows authoring software. manufacturer: . price: 49.95. Are Product A and Product B the same? No\n\nProduct A is title: corel wordperfect office 12 standard upgrade. manufacturer: corel. price: 149.99. Product B is title: microsoft office 2004 std upgrade. manufacturer: . price: 219.99. Are Product A and Product B the same? No\n\nProduct A is title: ulead videostudio 11. manufacturer: corel. price: 89.99. Product B is title: corel ulead videostudio 11 plus software for win educational license authoring software. manufacturer: . price: 89.95. Are Product A and Product B the same? No\n\nProduct A is title: ulead videostudio 11. manufacturer: corel. price: 89.99. Product B is title: corel ulead videostudio 11 video editing and dvd authoring software for windows authoring software. manufacturer: . price: 89.95. Are Product A and Product B the same? Yes\n\nProduct A is title: corel painter x win/mac. manufacturer: corel. price: 429.0. Product B is title: corel painter x. manufacturer: . price: 395.99. Are Product A and Product B the same? Yes\n\nProduct A is title: corel dvd moviefactory 6.0 plus. manufacturer: corel. price: 79.99. Product B is title: ulead ( r ) dvd moviefactory ( r ) 5. manufacturer: . price: 49.99. Are Product A and Product B the same? No\n\nProduct A is title: corel dvd moviefactory 6.0. manufacturer: corel. price: 49.99. Product B is title: corel ulead dvd moviefactory 6 software for windows authoring software. manufacturer: . price: 49.95. Are Product A and Product B the same? Yes\n\nProduct A is title: corel dvd moviefactory 6.0 plus. manufacturer: corel. price: 79.99. Product B is title: corel ulead dvd moviefactory plus 6 software for windows authoring software. manufacturer: . price: 79.95. Are Product A and Product B the same? Yes\n\nProduct A is title: corel wordperfect office 12 standard upgrade. manufacturer: corel. price: 149.99. Product B is title: adobe cs3 design standard upgrade. manufacturer: . price: 413.99. Are Product A and Product B the same? No\n\n", "microspot ltd.": "Product A is title: microspot interiors. manufacturer: microspot ltd.. price: 99.95. Product B is title: microspot interiors ( mac 10.2 or later ). manufacturer: . price: 99.95. Are Product A and Product B the same? Yes\n\nProduct A is title: microspot macdraft professional ( mac ). manufacturer: microspot ltd.. price: 349.99. Product B is title: adobe acrobat v8 .0 professional for mac. manufacturer: . price: 405.99. Are Product A and Product B the same? No\n\nProduct A is title: microspot macdraft pe ( mac ). manufacturer: microspot ltd.. price: 99.95. Product B is title: microsoft office 2004 for students and teachers ( mac ). manufacturer: . price: 134.99. Are Product A and Product B the same? No\n\nProduct A is title: microspot macdraft professional ( mac ). manufacturer: microspot ltd.. price: 349.99. Product B is title: adobe acrobat v8 .0 professional for mac. manufacturer: . price: 405.99. Are Product A and Product B the same? No\n\nProduct A is title: microspot macdraft pe ( mac ). manufacturer: microspot ltd.. price: 99.95. Product B is title: microspot macdraft pe personal edition. manufacturer: . price: 85.99. Are Product A and Product B the same? Yes\n\nProduct A is title: microspot interiors ( mac ). manufacturer: microspot ltd.. price: 99.95. Product B is title: microspot interiors version 3.6. manufacturer: . price: 85.99. Are Product A and Product B the same? Yes\n\nProduct A is title: microspot interiors ( mac ). manufacturer: microspot ltd.. price: 99.95. Product B is title: microsoft office 2004 for students and teachers ( mac ). manufacturer: . price: 134.99. Are Product A and Product B the same? No\n\nProduct A is title: microspot macdraft professional ( mac ). manufacturer: microspot ltd.. price: 349.99. Product B is title: microspot macdraft professional edition. manufacturer: . price: 249.99. Are Product A and Product B the same? Yes\n\nProduct A is title: microspot macdraft professional ( mac ). manufacturer: microspot ltd.. price: 349.99. Product B is title: microsoft academic microsoft office project 2003 professional. manufacturer: . price: 176.99. Are Product A and Product B the same? No\n\n", "victory multimedia": "Product A is title: sales skills 2.0 ages 10 +. manufacturer: victory multimedia. price: 19.95. Product B is title: simply magazine sales skills. manufacturer: . price: 3.1. Are Product A and Product B the same? Yes\n\n", "sonic systems inc. .": "Product A is title: sonicwall comprehensive gateway security suite 1 year upgrade plan 01-ssc-5845 ). manufacturer: sonic systems inc. .. price: . Product B is title: sonicwall comprehensive gateway security for tz 150 series 25 node sonicwall -. manufacturer: . price: 107.01. Are Product A and Product B the same? No\n\nProduct A is title: sonicwall global vpn client license 01-ssc-5310 ). manufacturer: sonic systems inc. .. price: . Product B is title: rfipsc2-1 ipsec vpn client license 1 user. manufacturer: . price: 57.2. Are Product A and Product B the same? No\n\nProduct A is title: sonicwall global vpn client license 01-ssc-5310 ). manufacturer: sonic systems inc. .. price: . Product B is title: procurve vpn client software unlimited client licenses. manufacturer: . price: 2598.96. Are Product A and Product B the same? No\n\nProduct A is title: sonicwall global vpn client license 01-ssc-5310 ). manufacturer: sonic systems inc. .. price: . Product B is title: 01-ssc-6993 sonicwall client/server anti-virus suite subscription license 3 years 50. manufacturer: . price: 3992.98. Are Product A and Product B the same? No\n\nProduct A is title: sonicwall global vpn client license 01-ssc-5310 ). manufacturer: sonic systems inc. .. price: . Product B is title: sonicwall 01-ssc-2574. manufacturer: sonicwall. price: 561.65. Are Product A and Product B the same? No\n\n", "nova development": "Product A is title: business plan writer deluxe 8.0 2005. manufacturer: nova development. price: 99.99. Product B is title: palo alto software business plan pro 2007. manufacturer: palo-alto-software. price: 88.83. Are Product A and Product B the same? No\n\nProduct A is title: humorous greeting card factory. manufacturer: nova development. price: 29.99. Product B is title: nova development ghw humorous greeting card factory. manufacturer: nova development. price: 29.99. Are Product A and Product B the same? Yes\n\nProduct A is title: art explosion 600 000 images. manufacturer: nova development. price: 79.99. Product B is title: nova development corp art explosion 600000 images. manufacturer: . price: 72.86. Are Product A and Product B the same? Yes\n\nProduct A is title: printmaster gold v 17.0. manufacturer: nova development. price: 19.95. Product B is title: printmaster gold v 17.0 ( win 98 me 2000 xp ). manufacturer: . price: 19.95. Are Product A and Product B the same? Yes\n\nProduct A is title: business card factory deluxe 3.0. manufacturer: nova development. price: 29.99. Product B is title: nova greeting card factory for mac. manufacturer: . price: 41.99. Are Product A and Product B the same? No\n\nProduct A is title: humorous greeting card factory. manufacturer: nova development. price: 29.99. Product B is title: nova development corp humorous greeting card factory. manufacturer: . price: 28.3. Are Product A and Product B the same? Yes\n\nProduct A is title: hallmark blank greeting cards half-fold matte premium 20 count. manufacturer: nova development. price: 12.99. Product B is title: hallmark half-fold matte prem . blank greeting cards ( 20 pack ) ( paper ). manufacturer: . price: 10.99. Are Product A and Product B the same? Yes\n\nProduct A is title: photo explosion deluxe 3.0. manufacturer: nova development. price: 49.99. Product B is title: photoshow deluxe. manufacturer: . price: 39.92. Are Product A and Product B the same? No\n\nProduct A is title: photo explosion deluxe 3.0. manufacturer: nova development. price: 49.99. Product B is title: photostudio ( r ) expressions 2.0. manufacturer: . price: 29.99. Are Product A and Product B the same? No\n\nProduct A is title: photo objects 150 000 for macintosh. manufacturer: nova development. price: 49.99. Product B is title: bjmt art explosion photo objects 150000 complete package 1 user dvd mac. manufacturer: . price: 43.22. Are Product A and Product B the same? No\n\n", "marware": "Product A is title: project x project management software. manufacturer: marware. price: 199.95. Product B is title: practical software maintenance : best practices for managing your software. manufacturer: . price: 90.0. Are Product A and Product B the same? No\n\nProduct A is title: project x project management software. manufacturer: marware. price: 199.95. Product B is title: software. manufacturer: . price: 274.99. Are Product A and Product B the same? No\n\nProduct A is title: project x project management software. manufacturer: marware. price: 199.95. Product B is title: cakewalk project5 version 2 software music production software. manufacturer: . price: 199.0. Are Product A and Product B the same? No\n\nProduct A is title: project x project management software. manufacturer: marware. price: 199.95. Product B is title: now software nuw45e1k now up-to-date & contact complete product content management 1 user ( s ) complete product standard pc. manufacturer: . price: 107.97. Are Product A and Product B the same? No\n\nProduct A is title: project x project management software. manufacturer: marware. price: 199.95. Product B is title: marware project x project management software. manufacturer: . price: 179.99. Are Product A and Product B the same? Yes\n\nProduct A is title: project x project management software. manufacturer: marware. price: 199.95. Product B is title: software management approaches : project management estimation and life cycle. manufacturer: . price: 64.95. Are Product A and Product B the same? No\n\n", "sage software": "Product A is title: peachtree by sage premium accounting 2007 accountants ' edition. manufacturer: sage software. price: 499.99. Product B is title: sage ( ptree ) premium accounting accountants edition 2007. manufacturer: . price: 492.01. Are Product A and Product B the same? Yes\n\nProduct A is title: peachtree by sage premium accounting 2007. manufacturer: sage software. price: 499.99. Product B is title: sage ( ptree ) vernfp2007rt premium accounting for nonprofits 2007. manufacturer: . price: 590.35. Are Product A and Product B the same? No\n\nProduct A is title: peachtree by sage premium accounting 2007. manufacturer: sage software. price: 499.99. Product B is title: sage ( ptree ) vernfp2007rt premium accounting for nonprofits 2007. manufacturer: . price: 590.35. Are Product A and Product B the same? No\n\nProduct A is title: peachtree by sage complete accounting 2007. manufacturer: sage software. price: 269.99. Product B is title: sage ( ptree ) premium accounting for manufacturing 2007 multi-user. manufacturer: . price: 1180.37. Are Product A and Product B the same? No\n\nProduct A is title: peachtree by sage premium accounting for manufacturing 2007 multi user ( up to 5 users ). manufacturer: sage software. price: 1199.99. Product B is title: sage ( ptree ) premium accounting for manufacturing 2007 multi-user. manufacturer: . price: 1180.37. Are Product A and Product B the same? Yes\n\nProduct A is title: peachtree by sage premium accounting 2007. manufacturer: sage software. price: 499.99. Product B is title: peachtree ( r ) compatible continuous checks-office accounting 12.0. manufacturer: . price: 91.59. Are Product A and Product B the same? No\n\nProduct A is title: peachtree by sage complete accounting 2007. manufacturer: sage software. price: 269.99. Product B is title: sage software peachtree complete accounting 2007 software for windows tax & finance software. manufacturer: . price: 249.95. Are Product A and Product B the same? Yes\n\nProduct A is title: peachtree by sage premium accounting 2007. manufacturer: sage software. price: 499.99. Product B is title: laser checks for peachtree ( r ) accounting for windows 8.0-2005. manufacturer: . price: 106.29. Are Product A and Product B the same? No\n\n", "intego inc": "Product A is title: internet security barrier x4 antispam edition. manufacturer: intego inc. price: . Product B is title: zonealarm ( r ) internet security suite. manufacturer: . price: 49.99. Are Product A and Product B the same? No\n\nProduct A is title: internet security barrier x4 antispam edition. manufacturer: intego inc. price: . Product B is title: norton internet security ( v3 .0 ) mac symantec 10069614-in. manufacturer: . price: 83.43. Are Product A and Product B the same? No\n\nProduct A is title: internet security barrier x4 antispam edition. manufacturer: intego inc. price: . Product B is title: internet security suite 2007 home protection pack xl box. manufacturer: . price: 61.06. Are Product A and Product B the same? No\n\nProduct A is title: internet security barrier x4 antispam edition. manufacturer: intego inc. price: . Product B is title: intego inc internet security barrier x4 antispam edition. manufacturer: . price: 71.75. Are Product A and Product B the same? Yes\n\nProduct A is title: internet security barrier x4 antispam edition. manufacturer: intego inc. price: . Product B is title: pc-cillin ( tm ) internet security 2007. manufacturer: . price: 49.99. Are Product A and Product B the same? No\n\nProduct A is title: internet security barrier x4 antispam edition. manufacturer: intego inc. price: . Product B is title: intego contentbarrier x4 10.4. manufacturer: . price: 54.99. Are Product A and Product B the same? No\n\n", "micromat": "Product A is title: micromat techtool protege with firewire device. manufacturer: micromat. price: . Product B is title: micromat techtool protege utility with 1gb firewire flash drive. manufacturer: . price: 219.99. Are Product A and Product B the same? Yes\n\nProduct A is title: micromat techtool pro 4 ( mac ). manufacturer: micromat. price: 99.99. Product B is title: micromat tech tool pro 4.5. manufacturer: . price: 82.99. Are Product A and Product B the same? Yes\n\nProduct A is title: micromat protogo. manufacturer: micromat. price: . Product B is title: micromat techtool protogo. manufacturer: . price: 109.99. Are Product A and Product B the same? Yes\n\nProduct A is title: micromat podlock ( mac ). manufacturer: micromat. price: 69.0. Product B is title: micromat podlock ipod utility software. manufacturer: . price: 39.99. Are Product A and Product B the same? Yes\n\nProduct A is title: micromat techtool protege with firewire device. manufacturer: micromat. price: . Product B is title: micromat tech tool pro 4.5. manufacturer: . price: 82.99. Are Product A and Product B the same? No\n\nProduct A is title: micromat protogo. manufacturer: micromat. price: . Product B is title: micromat inc. diskstudio. manufacturer: . price: 43.36. Are Product A and Product B the same? No\n\nProduct A is title: micromat podlock ( mac ). manufacturer: micromat. price: 69.0. Product B is title: micromat inc. diskstudio. manufacturer: . price: 43.36. Are Product A and Product B the same? No\n\nProduct A is title: micromat techtool protege with firewire device. manufacturer: micromat. price: . Product B is title: micromat techtool protogo. manufacturer: . price: 109.99. Are Product A and Product B the same? No\n\nProduct A is title: micromat podlock ( mac ). manufacturer: micromat. price: 69.0. Product B is title: microspot interiors ( mac 10.2 or later ). manufacturer: . price: 99.95. Are Product A and Product B the same? No\n\n", "microsoft corporation": "Product A is title: microsoft office mac 2004 english 3pk dsp 731-01163 ). manufacturer: microsoft corporation. price: . Product B is title: microsoft word 2004. manufacturer: . price: 209.99. Are Product A and Product B the same? No\n\nProduct A is title: microsoft office mac 2004 english 3pk dsp 731-01163 ). manufacturer: microsoft corporation. price: . Product B is title: microsoft ( r ) office word 2004 for mac full version. manufacturer: . price: 231.29. Are Product A and Product B the same? No\n\nProduct A is title: microsoft upg win small business svr cal t74-01215 ). manufacturer: microsoft corporation. price: 330.14. Product B is title: cabprenaen150 ca business protection suite for microsoft small business server premium edition. manufacturer: . price: 666.65. Are Product A and Product B the same? No\n\nProduct A is title: microsoft office mac 2004 english 3pk dsp 731-01163 ). manufacturer: microsoft corporation. price: . Product B is title: microsoft office 2004 software suite oem mac apple & mac software. manufacturer: . price: 239.95. Are Product A and Product B the same? Yes\n\nProduct A is title: microsoft office mac 2004 english 3pk dsp 731-01163 ). manufacturer: microsoft corporation. price: . Product B is title: microsoft academic microsoft office project 2003 standard. manufacturer: . price: 69.95. Are Product A and Product B the same? No\n\nProduct A is title: microsoft office mac 2004 english 3pk dsp 731-01163 ). manufacturer: microsoft corporation. price: . Product B is title: microsoft ( r ) office word 2004 for mac full version. manufacturer: . price: 231.29. Are Product A and Product B the same? No\n\n", "adobe systems inc": "Product A is title: adobe photoshop elements 4.0 plus adobe premiere elements 2.0 complete package 1 user cd mini-box win canadian french 29180162 ). manufacturer: adobe systems inc. price: . Product B is title: adobe premiere elements 3 and photoshop elements 5 software for windows consumer video editing software. manufacturer: . price: 149.95. Are Product A and Product B the same? No\n\nProduct A is title: adobe photoshop elements 4.0 plus adobe premiere elements 2.0 complete package 1 user cd mini-box win canadian french 29180162 ). manufacturer: adobe systems inc. price: . Product B is title: prw photo explosion deluxe v. 3.0 complete package 1 user cd win. manufacturer: . price: 47.6. Are Product A and Product B the same? No\n\nProduct A is title: adobe photoshop elements 4.0 plus adobe premiere elements 2.0 complete package 1 user cd mini-box win canadian french 29180162 ). manufacturer: adobe systems inc. price: . Product B is title: adobe photoshop cs3 extended complete package 1 user academic cd mac. manufacturer: . price: 279.0. Are Product A and Product B the same? No\n\nProduct A is title: adobe photoshop elements 4.0 plus adobe premiere elements 2.0 complete package 1 user cd mini-box win canadian french 29180162 ). manufacturer: adobe systems inc. price: . Product B is title: adobe photoshop cs3 extended complete package 1 user academic cd mac. manufacturer: . price: 279.0. Are Product A and Product B the same? No\n\nProduct A is title: adobe photoshop elements 4.0 plus adobe premiere elements 2.0 complete package 1 user cd mini-box win canadian french 29180162 ). manufacturer: adobe systems inc. price: . Product B is title: adobe dreamweaver cs3 complete package 1 user academic cd mac. manufacturer: . price: 189.0. Are Product A and Product B the same? No\n\n", "iplaymusic": "Product A is title: play music together mac. manufacturer: iplaymusic. price: 39.99. Product B is title: play music together dvd by iplaymusic mac. manufacturer: . price: 32.99. Are Product A and Product B the same? Yes\n\n", "avid technology": "Product A is title: avid liquid vs 7 software. manufacturer: avid technology. price: 888.75. Product B is title: software. manufacturer: . price: 153.99. Are Product A and Product B the same? No\n\nProduct A is title: avid liquid vs 7 software. manufacturer: avid technology. price: 888.75. Product B is title: software. manufacturer: . price: 46.99. Are Product A and Product B the same? No\n\nProduct A is title: avid liquid vs 7 software. manufacturer: avid technology. price: 888.75. Product B is title: avid technology inc. 70102024701 software avid liquid chrome xe. manufacturer: . price: 840.97. Are Product A and Product B the same? No\n\nProduct A is title: avid liquid vs 7 software. manufacturer: avid technology. price: 888.75. Product B is title: avid liquid 7.0 professional video editing software for windows professional editing software. manufacturer: . price: 439.95. Are Product A and Product B the same? Yes\n\nProduct A is title: avid liquid vs 7 software. manufacturer: avid technology. price: 888.75. Product B is title: onone software smartscale software. manufacturer: . price: 189.99. Are Product A and Product B the same? No\n\nProduct A is title: avid liquid vs 7 software. manufacturer: avid technology. price: 888.75. Product B is title: macspeech ilisten software. manufacturer: . price: 99.99. Are Product A and Product B the same? No\n\n", "mindjet": "", "cosmi": "Product A is title: cosmi talking math & more for kids ( windows ). manufacturer: cosmi. price: . Product B is title: learning quickbooks ( r ) for windows ( r ). manufacturer: . price: 39.99. Are Product A and Product B the same? No\n\nProduct A is title: cosmi rom03558 print perfect clip art deluxe. manufacturer: cosmi. price: . Product B is title: cosmi print perfect clip art deluxe. manufacturer: . price: 6.99. Are Product A and Product B the same? Yes\n\nProduct A is title: cosmi rom07524 print perfect business cards dvd. manufacturer: cosmi. price: . Product B is title: cosmi print perfect clip art deluxe. manufacturer: . price: 6.99. Are Product A and Product B the same? No\n\nProduct A is title: cosmi rom07524 print perfect business cards dvd. manufacturer: cosmi. price: . Product B is title: encore inc printshop business card maker. manufacturer: riverdeep-learning-company. price: 8.01. Are Product A and Product B the same? No\n\n", "hewlett packard": "Product A is title: procurve vpn client software unlimited client licenses. manufacturer: hewlett packard. price: . Product B is title: prosafe vpn client software single user netgear inc. vpn01l. manufacturer: . price: 27.78. Are Product A and Product B the same? No\n\nProduct A is title: procurve vpn client software unlimited client licenses. manufacturer: hewlett packard. price: . Product B is title: sr2116010 safenet softremote vpn client license 5 users win. manufacturer: . price: 184.57. Are Product A and Product B the same? No\n\nProduct A is title: red hat enterprise linux ws v. 4 update 4 license 1 workstation rl296aa ). manufacturer: hewlett packard. price: . Product B is title: 83076 smarterm enterprise suite v. 12.1 upgrade license 1 user win. manufacturer: . price: 146.42. Are Product A and Product B the same? No\n\nProduct A is title: procurve vpn client software unlimited client licenses. manufacturer: hewlett packard. price: . Product B is title: d-link vpn client software ( 1-user license ) ds-601 ds-601. manufacturer: . price: 42.99. Are Product A and Product B the same? No\n\nProduct A is title: procurve vpn client software unlimited client licenses. manufacturer: hewlett packard. price: . Product B is title: d-link vpn client software ( 5-user license ) ds-605 ds-605. manufacturer: . price: 173.99. Are Product A and Product B the same? No\n\nProduct A is title: procurve vpn client software unlimited client licenses. manufacturer: hewlett packard. price: . Product B is title: procurve vpn client software unlimited client licenses. manufacturer: . price: 2598.96. Are Product A and Product B the same? Yes\n\nProduct A is title: hp storageworks secure path for windows workgroup edition v. 4.0 c license 213076-b26 ). manufacturer: hewlett packard. price: . Product B is title: hewlett packard 213076-b26 secure path v4 .0 c win wkgp ed 1 lic/cd. manufacturer: . price: 1888.39. Are Product A and Product B the same? Yes\n\n", "webroot software": "Product A is title: spy sweeper with antivirus/child safe. manufacturer: webroot software. price: 39.95. Product B is title: webroot software inc 52310 spy sweeper with antivirus/child safe 1-pc win 2000 xp vista. manufacturer: . price: 37.97. Are Product A and Product B the same? Yes\n\nProduct A is title: spy sweeper spanish. manufacturer: webroot software. price: 29.95. Product B is title: webroot ( r ) spy sweeper ( tm ) 2006. manufacturer: . price: 29.99. Are Product A and Product B the same? No\n\nProduct A is title: spy sweeper w/antivirus family edition. manufacturer: webroot software. price: 49.95. Product B is title: spy sweeper ( small box ). manufacturer: . price: 22.99. Are Product A and Product B the same? No\n\nProduct A is title: webroot spy sweeper antispyware 5.x. manufacturer: webroot software. price: 29.95. Product B is title: webroot software inc 31250 spy sweeper spanish win 2000 xp. manufacturer: . price: 25.97. Are Product A and Product B the same? No\n\nProduct A is title: webroot spysweeper antispyware 3 user. manufacturer: webroot software. price: 39.99. Product B is title: webroot software 65210 spy sweeper 3 pc. manufacturer: webroot software. price: 31.99. Are Product A and Product B the same? Yes\n\nProduct A is title: webroot desktop firewall. manufacturer: webroot software. price: 29.99. Product B is title: webroot software 54210 desktop firewall. manufacturer: webroot software. price: 26.99. Are Product A and Product B the same? Yes\n\nProduct A is title: spy sweeper [ lb ]. manufacturer: webroot software. price: 29.99. Product B is title: spy sweeper ( small box ). manufacturer: . price: 22.99. Are Product A and Product B the same? No\n\nProduct A is title: webroot spy sweeper with antivirus family edition 3 pcs. manufacturer: webroot software. price: 49.95. Product B is title: webroot software inc 50210 spy sweeper w/antivirus family edition ( mass merchant ) win 2000 xp. manufacturer: . price: 41.97. Are Product A and Product B the same? No\n\nProduct A is title: webroot spy sweeper antispyware 5.x. manufacturer: webroot software. price: 29.95. Product B is title: webroot software inc 66410 spy sweeper ( small box ) ( win 98 me 2000 xp ). manufacturer: . price: 25.97. Are Product A and Product B the same? Yes\n\nProduct A is title: webroot desktop firewall. manufacturer: webroot software. price: 29.99. Product B is title: webroot software inc 54210 webroot desktop firewall firewall pc. manufacturer: . price: 25.97. Are Product A and Product B the same? Yes\n\n", "individual software": "Product A is title: resumemaker professional 12.0. manufacturer: individual software. price: 29.99. Product B is title: individual software resume maker professional deluxe 12.0. manufacturer: . price: 29.99. Are Product A and Product B the same? Yes\n\nProduct A is title: systemsuite 8 professional. manufacturer: individual software. price: 49.99. Product B is title: iolo system mechanic 7 professional ( pc ). manufacturer: . price: 69.99. Are Product A and Product B the same? No\n\nProduct A is title: systemsuite 8 professional. manufacturer: individual software. price: 49.99. Product B is title: sibelius sibelius photoscore professional 4. manufacturer: . price: 145.7. Are Product A and Product B the same? No\n\nProduct A is title: resumemaker professional 12.0. manufacturer: individual software. price: 29.99. Product B is title: resumemaker ( r ) professional ultimate. manufacturer: . price: 39.99. Are Product A and Product B the same? No\n\nProduct A is title: resumemaker professional 12.0. manufacturer: individual software. price: 29.99. Product B is title: individual software prm-r12 resumemaker professional 12.0. manufacturer: individual software. price: 29.99. Are Product A and Product B the same? No\n\nProduct A is title: professor teaches windows xp. manufacturer: individual software. price: 19.99. Product B is title: professor teaches windows xp individual software. manufacturer: . price: 19.99. Are Product A and Product B the same? Yes\n\nProduct A is title: total 3d home & landscape design suite version 9. manufacturer: individual software. price: 39.99. Product B is title: individual software inc total 3d home & landscape deluxe 9. manufacturer: . price: 33.5. Are Product A and Product B the same? Yes\n\nProduct A is title: total 3d home & landscape design suite version 9. manufacturer: individual software. price: 39.99. Product B is title: punch software 85100 punch!master landscape pro v10 and home design. manufacturer: . price: 59.97. Are Product A and Product B the same? No\n\nProduct A is title: professor teaches excel & word. manufacturer: individual software. price: 19.99. Product B is title: individual software professor teaches excel & word. manufacturer: . price: 19.99. Are Product A and Product B the same? Yes\n\nProduct A is title: systemsuite 8 professional. manufacturer: individual software. price: 49.99. Product B is title: individual software inc systemsuite 8 professional ( us pos ). manufacturer: . price: 46.42. Are Product A and Product B the same? Yes\n\n", "ibm": "Product A is title: ibm rational clearcase lt v. 6.0.0 media vol passport passport adva. manufacturer: ibm. price: . Product B is title: bt00nna ibm rational clearcase lt v. 6.0.0 media cd win hp-ux solaris en. manufacturer: . price: 108.95. Are Product A and Product B the same? No\n\n", "bling software": "", "nuance communications inc.": "Product A is title: pdf converter professional v4 english. manufacturer: nuance communications inc.. price: 99.99. Product B is title: scansoft pdf converter 4. manufacturer: . price: 37.95. Are Product A and Product B the same? No\n\nProduct A is title: scansoft paperport professional 11 5 user license. manufacturer: nuance communications inc.. price: 949.99. Product B is title: resume maker professional 12. manufacturer: . price: 17.95. Are Product A and Product B the same? No\n\nProduct A is title: pdf converter professional v4 english. manufacturer: nuance communications inc.. price: 99.99. Product B is title: resume maker professional 12. manufacturer: . price: 17.95. Are Product A and Product B the same? No\n\nProduct A is title: scansoft paperport professional 11 5 user license. manufacturer: nuance communications inc.. price: 949.99. Product B is title: orgchart professional 1.3. manufacturer: . price: 39.95. Are Product A and Product B the same? No\n\nProduct A is title: scansoft paperport professional 11 5 user license. manufacturer: nuance communications inc.. price: 949.99. Product B is title: m109a-g00-4 .0 scansoft pdf converter professional v. 4.0 complete package 1 user w. manufacturer: . price: 87.79. Are Product A and Product B the same? No\n\n", "wasp technologies": "Product A is title: wasp bar code wasp bar code labeler complete package 3 users std cd -. manufacturer: wasp technologies. price: . Product B is title: prw photo explosion deluxe v. 3.0 complete package 1 user cd win. manufacturer: . price: 47.6. Are Product A and Product B the same? No\n\nProduct A is title: wasp bar code wasp bar code labeler complete package 3 users std cd -. manufacturer: wasp technologies. price: . Product B is title: 298881 quickbooks customer manager v. 2.5 complete package 1 user cd win. manufacturer: . price: 69.38. Are Product A and Product B the same? No\n\nProduct A is title: wasp bar code wasp bar code labeler complete package 3 users std cd -. manufacturer: wasp technologies. price: . Product B is title: 24100 punch ! 5 in 1 home design complete package 1 user cd win. manufacturer: . price: 36.28. Are Product A and Product B the same? No\n\nProduct A is title: wasp bar code wasp bar code labeler complete package 3 users std cd -. manufacturer: wasp technologies. price: . Product B is title: m009a-l00-4 .0 scansoft pdf create ! v. 4.0 complete package 1 user cd win engli. manufacturer: . price: 43.21. Are Product A and Product B the same? No\n\nProduct A is title: wasp bar code wasp bar code labeler complete package 3 users std cd -. manufacturer: wasp technologies. price: . Product B is title: snt-040 ntracker complete package 1 user win. manufacturer: . price: 26.59. Are Product A and Product B the same? No\n\n", "dantz": "Product A is title: retrospect 6.0 desktop mac upgrade. manufacturer: dantz. price: 59.99. Product B is title: emc insignia retrospect workgroup 6 upgrade. manufacturer: . price: 162.99. Are Product A and Product B the same? No\n\nProduct A is title: emc retrospect 7.5 professional for windows upgrade. manufacturer: dantz. price: . Product B is title: avid liquid pro 7.0 video editing hardware/software bundle for windows professional editing software. manufacturer: . price: 899.0. Are Product A and Product B the same? No\n\nProduct A is title: emc retrospect 7.5 professional for windows upgrade. manufacturer: dantz. price: . Product B is title: adobe after effects professional 7 software for windows effects software. manufacturer: . price: 999.0. Are Product A and Product B the same? No\n\nProduct A is title: emc retrospect 7.5 svr 1-client windows. manufacturer: dantz. price: 249.0. Product B is title: emc insignia retrospect server client v. 7.5 complete package. manufacturer: . price: 181.97. Are Product A and Product B the same? Yes\n\nProduct A is title: retrospect 6.0 desktop mac upgrade. manufacturer: dantz. price: 59.99. Product B is title: bu24a600000 emc insignia retrospect desktop v. 6.1 upgrade package 2 clients cd. manufacturer: . price: 51.85. Are Product A and Product B the same? No\n\nProduct A is title: upgrade emc retrospect 7.5 multi server windows only. manufacturer: dantz. price: . Product B is title: retrospect 7.5 prof win upg only pz24a0075. manufacturer: emc. price: 54.68. Are Product A and Product B the same? No\n\n", "prosoft engineering": "Product A is title: prosoft tunetech for ipod ( mac ). manufacturer: prosoft engineering. price: 74.99. Product B is title: valusoft tune transfer for ipod. manufacturer: . price: 21.99. Are Product A and Product B the same? No\n\nProduct A is title: prosoft tunetech for ipod ( mac ). manufacturer: prosoft engineering. price: 74.99. Product B is title: prosoft 27100 tunetech for ipod by prosoft 0794038271005. manufacturer: . price: 51.11. Are Product A and Product B the same? No\n\n", "valusoft": "Product A is title: ultimate puzzle games. manufacturer: valusoft. price: 19.99. Product B is title: ultimate pinball. manufacturer: . price: 6.95. Are Product A and Product B the same? No\n\nProduct A is title: pokemon team turbo ( jewel case ). manufacturer: valusoft. price: 9.99. Product B is title: geometry ( win/mac ) ( jewel case ) ( 10158 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: ultimate puzzle games. manufacturer: valusoft. price: 19.99. Product B is title: ultimate fonts. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\nProduct A is title: sea scene : living underwater worlds ( jewel case ). manufacturer: valusoft. price: 9.99. Product B is title: sea scene : living underwater worlds. manufacturer: . price: 19.55. Are Product A and Product B the same? Yes\n\nProduct A is title: print workshop 2007. manufacturer: valusoft. price: 19.99. Product B is title: print shop deluxe 21. manufacturer: . price: 44.9. Are Product A and Product B the same? No\n\nProduct A is title: mastercook deluxe low carb. manufacturer: valusoft. price: 14.99. Product B is title: mastercook deluxe low carb edition. manufacturer: . price: 17.9. Are Product A and Product B the same? Yes\n\nProduct A is title: ultimate pinball ( jewel case ). manufacturer: valusoft. price: 9.99. Product B is title: ultimate fonts. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\n", "onone software": "Product A is title: genuine fractals print pro 4.1 sharpener pro 2.0. manufacturer: onone software. price: . Product B is title: onone software inc. gpp-55211 genuine fractals print pro 5 1u. manufacturer: . price: 274.97. Are Product A and Product B the same? No\n\nProduct A is title: onone software pxl smartscale windows and macintosh ). manufacturer: onone software. price: . Product B is title: onone software inc. mpr-40212. manufacturer: onone software inc.. price: 66.27. Are Product A and Product B the same? No\n\nProduct A is title: onone software intellihance pro 4.1. manufacturer: onone software. price: . Product B is title: onone software plug-in suite full. manufacturer: . price: 393.68. Are Product A and Product B the same? No\n\nProduct A is title: onone plug-in suite 3 for adobe photoshop. manufacturer: onone software. price: 399.95. Product B is title: onone software plug-in suite full. manufacturer: . price: 393.68. Are Product A and Product B the same? Yes\n\nProduct A is title: onone software pxl smartscale windows and macintosh ). manufacturer: onone software. price: . Product B is title: pxl smartscale for mac/win. manufacturer: . price: 159.95. Are Product A and Product B the same? Yes\n\nProduct A is title: onone essentials for adobe photoshop elements. manufacturer: onone software. price: 59.95. Product B is title: onone software ele-10211 on1 essentials v. 1.0 for photoshop elements complete product graphics/designing 1 user complete product standard english pc mac. manufacturer: . price: 50.97. Are Product A and Product B the same? Yes\n\nProduct A is title: onone essentials for adobe photoshop elements version 2 full. manufacturer: onone software. price: 59.95. Product B is title: adobe photoshop cs3 extended for mac. manufacturer: . price: 935.99. Are Product A and Product B the same? No\n\nProduct A is title: genuine fractals print pro 5 1u. manufacturer: onone software. price: 299.95. Product B is title: onone software inc. gpp-55211 genuine fractals print pro 5 1u. manufacturer: . price: 274.97. Are Product A and Product B the same? Yes\n\nProduct A is title: onone plug-in suite 3 for adobe photoshop. manufacturer: onone software. price: 399.95. Product B is title: onone software pps-30211 on1 plug-in suite v. 3.0 for adobe photoshop complete product image collection/editing/archive complete product pc mac intel-based mac. manufacturer: . price: 365.97. Are Product A and Product B the same? Yes\n\nProduct A is title: onone software pxl smartscale windows and macintosh ). manufacturer: onone software. price: . Product B is title: onone software inc. mpr-40212. manufacturer: onone software inc.. price: 66.27. Are Product A and Product B the same? No\n\n", "the learning company": "Product A is title: reader rabbit kindergarten version 1.1. manufacturer: the learning company. price: 9.99. Product B is title: reader rabbit reading learning system 2007. manufacturer: . price: 12.95. Are Product A and Product B the same? No\n\nProduct A is title: reader rabbit kindergarten version 1.1. manufacturer: the learning company. price: 9.99. Product B is title: jumpstart kindergarten ( jc ). manufacturer: . price: 9.9. Are Product A and Product B the same? No\n\nProduct A is title: reader rabbit playtime for baby and toddler. manufacturer: the learning company. price: 19.99. Product B is title: reader rabbit playtime for baby & toddler. manufacturer: . price: 9.9. Are Product A and Product B the same? No\n\nProduct A is title: cluefinders 3rd grade. manufacturer: the learning company. price: . Product B is title: brain play 1st-3rd grade. manufacturer: . price: 17.9. Are Product A and Product B the same? No\n\nProduct A is title: reader rabbit kindergarten version 1.1. manufacturer: the learning company. price: 9.99. Product B is title: reader rabbit learn to read with phonics ! preschool & kindergarten. manufacturer: . price: 12.95. Are Product A and Product B the same? No\n\n", "emedia": "Product A is title: emedia rock guitar method. manufacturer: emedia. price: 39.99. Product B is title: emedia music corp guitar pro v 5.1 ( hybrid ). manufacturer: . price: 51.92. Are Product A and Product B the same? No\n\nProduct A is title: emedia bass method win/mac. manufacturer: emedia. price: 59.99. Product B is title: emedia eg03014 bass method os x. manufacturer: emedia. price: 53.99. Are Product A and Product B the same? Yes\n\nProduct A is title: finale 2007 academic ( win/mac ). manufacturer: emedia. price: 350.0. Product B is title: makemusic finale 2007 academic create edit and print sheet music. manufacturer: . price: 269.0. Are Product A and Product B the same? Yes\n\nProduct A is title: emedia blues guitar legends. manufacturer: emedia. price: 29.94. Product B is title: emedia essential bass guitar dvd. manufacturer: . price: 16.95. Are Product A and Product B the same? No\n\nProduct A is title: my guitar. manufacturer: emedia. price: 29.95. Product B is title: emedia music corp my guitar. manufacturer: . price: 24.81. Are Product A and Product B the same? Yes\n\nProduct A is title: emedia intermediate guitar method os x. manufacturer: emedia. price: 59.99. Product B is title: emedia music corp emedia intermediate guitar method. manufacturer: . price: 49.21. Are Product A and Product B the same? No\n\nProduct A is title: my guitar. manufacturer: emedia. price: 29.95. Product B is title: mike dowling homespun swing guitar. manufacturer: . price: 36.67. Are Product A and Product B the same? No\n\nProduct A is title: emedia piano and keyboard method volume 1. manufacturer: emedia. price: 59.99. Product B is title: emedia music corp emedia piano & keyboard method. manufacturer: . price: 49.21. Are Product A and Product B the same? Yes\n\nProduct A is title: emedia rock guitar method. manufacturer: emedia. price: 39.99. Product B is title: emedia music corp emedia blues guitar legends. manufacturer: . price: 24.81. Are Product A and Product B the same? No\n\nProduct A is title: emedia guitarpro 5 win/mac. manufacturer: emedia. price: 59.99. Product B is title: emedia music corp guitar pro v 5.1 ( hybrid ). manufacturer: . price: 51.92. Are Product A and Product B the same? Yes\n\n", "symantec": "Product A is title: symc backup exec sbs std 11d win small business server standard. manufacturer: symantec. price: 811.21. Product B is title: 10759333 symantec backup exec 11d for windows servers central admin server option compl. manufacturer: . price: 1219.72. Are Product A and Product B the same? No\n\nProduct A is title: symc backup exec sbs prm 11d win small business server. manufacturer: symantec. price: 885.83. Product B is title: e17887d symantec backup exec for netware servers remote agent for windows or netware ser. manufacturer: . price: 197.65. Are Product A and Product B the same? No\n\nProduct A is title: symc backup exec sbs aws 11d win sbs agent for windows systems. manufacturer: symantec. price: 447.74. Product B is title: 10759328 symantec backup exec 11d for windows servers agent for db2 on windows servers -. manufacturer: . price: 644.84. Are Product A and Product B the same? No\n\nProduct A is title: norton internet security mac 3.0 [ antivirus firewall privacy controls iclean ]. manufacturer: symantec. price: 99.95. Product B is title: symantec norton internet security 3.0 software for macintosh apple & mac software. manufacturer: . price: 89.95. Are Product A and Product B the same? Yes\n\nProduct A is title: norton antivirus 2007. manufacturer: symantec. price: 39.99. Product B is title: symantec norton anti-virus 2007 windows. manufacturer: . price: 39.99. Are Product A and Product B the same? Yes\n\nProduct A is title: norton internet security 2008 10 user. manufacturer: symantec. price: 199.99. Product B is title: norton internet security 2008 10 user. manufacturer: . price: 199.99. Are Product A and Product B the same? Yes\n\nProduct A is title: symantec client security 3.1 with groupware protection business pack 50 user. manufacturer: symantec. price: 3732.0. Product B is title: 10517940 symantec client security with groupware protection business pack v. 3.1 -. manufacturer: . price: 3323.92. Are Product A and Product B the same? No\n\nProduct A is title: norton antivirus mac 10.0 ( mac ). manufacturer: symantec. price: 69.95. Product B is title: symantec norton anti-virus v. 10 mac. manufacturer: . price: 64.99. Are Product A and Product B the same? Yes\n\nProduct A is title: norton internet security 2008 10 user. manufacturer: symantec. price: 199.99. Product B is title: symantec norton internet security 3.0 software for macintosh apple & mac software. manufacturer: . price: 89.95. Are Product A and Product B the same? No\n\n", "apple computer": "Product A is title: apple final cut express hd 3.5 ( mac ). manufacturer: apple computer. price: 299.0. Product B is title: apple final cut studio 2 upgrade from final cut pro. manufacturer: . price: 699.99. Are Product A and Product B the same? No\n\nProduct A is title: apple final cut express hd 3.5 ( mac ). manufacturer: apple computer. price: 299.0. Product B is title: apple software m9372z/a final cut express 2.0 upgrade. manufacturer: apple software. price: 79.99. Are Product A and Product B the same? No\n\nProduct A is title: logic express 7. manufacturer: apple computer. price: 299.0. Product B is title: apple/emagic logic express 7.2 software music production software. manufacturer: . price: 299.0. Are Product A and Product B the same? Yes\n\nProduct A is title: apple final cut express hd 3.5 ( mac ). manufacturer: apple computer. price: 299.0. Product B is title: ma265zm/a final cut express hd v. 3.5 media volume dvd mac multi-country. manufacturer: . price: 12.98. Are Product A and Product B the same? No\n\nProduct A is title: apple ilife '06 ( mac dvd ) [ older version ]. manufacturer: apple computer. price: 79.0. Product B is title: ilife '06 mac apple ma166z/a. manufacturer: . price: 47.8. Are Product A and Product B the same? Yes\n\nProduct A is title: apple mac os x tiger 10.4.6 family pack ( mac ). manufacturer: apple computer. price: 199.0. Product B is title: apple shake 4.1 mac os x. manufacturer: . price: 499.99. Are Product A and Product B the same? No\n\nProduct A is title: apple mac os x tiger 10.4.6 family pack ( mac ). manufacturer: apple computer. price: 199.0. Product B is title: apple mac os x 10.4.6 tiger family pack. manufacturer: . price: 185.99. Are Product A and Product B the same? Yes\n\nProduct A is title: apple ilife '06 family pack ( mac dvd ) [ older version ]. manufacturer: apple computer. price: 99.0. Product B is title: apple iwork '06 family pack. manufacturer: . price: 99.99. Are Product A and Product B the same? No\n\nProduct A is title: logic express 7. manufacturer: apple computer. price: 299.0. Product B is title: apple logic express 7.2. manufacturer: . price: 299.99. Are Product A and Product B the same? Yes\n\nProduct A is title: apple final cut express hd 3.5 ( mac ). manufacturer: apple computer. price: 299.0. Product B is title: apple final cut express hd 3.5 video editing software for mac av production software. manufacturer: . price: 299.0. Are Product A and Product B the same? Yes\n\n", "school-zone-interactive": "Product A is title: school zone pencil-pal software big phonics ( cd-rom & book ). manufacturer: school-zone-interactive. price: . Product B is title: school zone interactive shapes on track software. manufacturer: . price: 9.45. Are Product A and Product B the same? No\n\nProduct A is title: school zone pencil-pal software big phonics ( cd-rom & book ). manufacturer: school-zone-interactive. price: . Product B is title: school zone interactive math 2 on track software. manufacturer: . price: 9.45. Are Product A and Product B the same? No\n\n", "panicware": "Product A is title: pop-up stopper professional. manufacturer: panicware. price: 29.99. Product B is title: adobe acrobat professional 7. manufacturer: . price: 350.0. Are Product A and Product B the same? No\n\nProduct A is title: pop-up stopper professional. manufacturer: panicware. price: 29.99. Product B is title: system mechanic 6 professional. manufacturer: . price: 36.9. Are Product A and Product B the same? No\n\nProduct A is title: pop-up stopper professional. manufacturer: panicware. price: 29.99. Product B is title: system care ? professional. manufacturer: . price: 49.99. Are Product A and Product B the same? No\n\nProduct A is title: pop-up stopper professional. manufacturer: panicware. price: 29.99. Product B is title: csdc pop-up stopper professional panicware. manufacturer: . price: 26.14. Are Product A and Product B the same? Yes\n\nProduct A is title: pop-up stopper professional. manufacturer: panicware. price: 29.99. Product B is title: avanquest usa 1461 system suit 7 professional. manufacturer: avanquest usa. price: 44.99. Are Product A and Product B the same? No\n\nProduct A is title: pop-up stopper professional. manufacturer: panicware. price: 29.99. Product B is title: orgchart professional 1.3. manufacturer: . price: 39.95. Are Product A and Product B the same? No\n\n", "avanquest": "Product A is title: system care professional. manufacturer: avanquest. price: 49.95. Product B is title: adobe premiere pro 2.0 software professional editing software. manufacturer: . price: 848.95. Are Product A and Product B the same? No\n\nProduct A is title: vcomm system suite professional 7. manufacturer: avanquest. price: 49.95. Product B is title: adobe cs3 flash professional. manufacturer: . price: 724.99. Are Product A and Product B the same? No\n\nProduct A is title: encyclopedia britannica deluxe 2008 win/mac. manufacturer: avanquest. price: 29.95. Product B is title: britannica deluxe 2008. manufacturer: . price: 26.5. Are Product A and Product B the same? Yes\n\nProduct A is title: vcomm system suite professional 7. manufacturer: avanquest. price: 49.95. Product B is title: avanquest usa 1461 system suit 7 professional. manufacturer: avanquest usa. price: 44.99. Are Product A and Product B the same? No\n\nProduct A is title: vcomm system suite professional 7. manufacturer: avanquest. price: 49.95. Product B is title: individual software inc systemsuite 8 professional ( us pos ). manufacturer: . price: 46.42. Are Product A and Product B the same? No\n\nProduct A is title: transfer my dvd. manufacturer: avanquest. price: 29.95. Product B is title: transfer my dvd ( 6601 ). manufacturer: . price: 28.75. Are Product A and Product B the same? No\n\n", "netopia": "Product A is title: timbuktu remote control softwre 10 licenses for windows. manufacturer: netopia. price: . Product B is title: now software nuw45e1k now up-to-date & contact for windows. manufacturer: now software. price: 114.99. Are Product A and Product B the same? No\n\nProduct A is title: timbuktu remote control softwre 2 licenses for windows & mac. manufacturer: netopia. price: 199.99. Product B is title: netopia timbuktu pro for mac and windows 2 pack. manufacturer: . price: 207.99. Are Product A and Product B the same? Yes\n\nProduct A is title: timbuktu remote control softwre 10 licenses for windows. manufacturer: netopia. price: . Product B is title: now software nuw45e1k now up-to-date & contact for windows. manufacturer: power-on-software. price: 114.99. Are Product A and Product B the same? No\n\nProduct A is title: timbuktu remote control softwre 2 licenses for windows & mac. manufacturer: netopia. price: 199.99. Product B is title: now software nuw45e1k now up-to-date & contact for windows. manufacturer: now software. price: 114.99. Are Product A and Product B the same? No\n\n", "m-audio": "Product A is title: m-audio pro tools m-powered 7.3 m-audio compatible world-class production software. manufacturer: m-audio. price: 299.99. Product B is title: adobe soundbooth cs3 audio editing software windows music production software. manufacturer: . price: 198.95. Are Product A and Product B the same? No\n\nProduct A is title: m-audio pro tools m-powered 7.3 m-audio compatible world-class production software. manufacturer: m-audio. price: 299.99. Product B is title: cakewalk sonar 5 studio edition music production software. manufacturer: . price: 229.95. Are Product A and Product B the same? No\n\nProduct A is title: m-audio pro tools m-powered 7.3 m-audio compatible world-class production software. manufacturer: m-audio. price: 299.99. Product B is title: apple/emagic logic pro 7.2 software music production software. manufacturer: . price: 999.0. Are Product A and Product B the same? No\n\nProduct A is title: m-audio pro tools m-powered 7.3 m-audio compatible world-class production software. manufacturer: m-audio. price: 299.99. Product B is title: make finale allegro software music production software. manufacturer: . price: 164.95. Are Product A and Product B the same? No\n\nProduct A is title: m-audio pro tools m-powered 7.3 m-audio compatible world-class production software. manufacturer: m-audio. price: 299.99. Product B is title: cakewalk sonar home studio 6 software music production software. manufacturer: . price: 99.0. Are Product A and Product B the same? No\n\n", "valuesoft": "Product A is title: valuesoft tune transfer for ipod software for windows xp. manufacturer: valuesoft. price: 29.95. Product B is title: pinnacle mobile media converter software for windows ipod software. manufacturer: . price: 29.95. Are Product A and Product B the same? No\n\nProduct A is title: valuesoft tune transfer for ipod software for windows xp. manufacturer: valuesoft. price: 29.95. Product B is title: pinnacle mobile media converter software for windows ipod software. manufacturer: . price: 29.95. Are Product A and Product B the same? No\n\nProduct A is title: valuesoft tune transfer for ipod software for windows xp. manufacturer: valuesoft. price: 29.95. Product B is title: individual software professor teaches windows xp. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\nProduct A is title: valuesoft tune transfer for ipod software for windows xp. manufacturer: valuesoft. price: 29.95. Product B is title: adobe captivate 2 software for windows presentation software. manufacturer: . price: 598.95. Are Product A and Product B the same? No\n\nProduct A is title: valuesoft tune transfer for ipod software for windows xp. manufacturer: valuesoft. price: 29.95. Product B is title: adobe after effects professional 7 software for windows effects software. manufacturer: . price: 999.0. Are Product A and Product B the same? No\n\n", "autodesk": "Product A is title: autodesk discreet combustion 4 windows ). manufacturer: autodesk. price: . Product B is title: autodesk autodesk combustion 4 motion graphics software compositing software. manufacturer: . price: 994.95. Are Product A and Product B the same? No\n\n", "cakewalk": "Product A is title: pc cakewalk guitar tracks pro 3. manufacturer: cakewalk. price: 149.99. Product B is title: cakewalk guitar tracks pro 3 music production software. manufacturer: . price: 99.0. Are Product A and Product B the same? Yes\n\nProduct A is title: cakewalk sonar home studio 6. manufacturer: cakewalk. price: 139.0. Product B is title: cakewalk sonar 5 producer edition music production software. manufacturer: . price: 379.95. Are Product A and Product B the same? No\n\nProduct A is title: cakewalk sonar home studio 6 xl. manufacturer: cakewalk. price: 209.0. Product B is title: cakewalk sonar 5 studio edition music production software. manufacturer: . price: 229.95. Are Product A and Product B the same? No\n\nProduct A is title: cakewalk sonar 6 producer ed. manufacturer: cakewalk. price: 799.0. Product B is title: cakewalk sonar 6 producers edition software music production software. manufacturer: . price: 459.95. Are Product A and Product B the same? Yes\n\nProduct A is title: cakewalk project5 version 2. manufacturer: cakewalk. price: 259.0. Product B is title: cakewalk sonar 6 producers edition software music production software. manufacturer: . price: 459.95. Are Product A and Product B the same? No\n\nProduct A is title: cakewalk sonar home studio 6 xl. manufacturer: cakewalk. price: 209.0. Product B is title: cakewalk sonar home studio 6 software music production software. manufacturer: . price: 99.0. Are Product A and Product B the same? No\n\nProduct A is title: cakewalk project5 version 2. manufacturer: cakewalk. price: 259.0. Product B is title: cakewalk project5 version 2 software music production software. manufacturer: . price: 199.0. Are Product A and Product B the same? Yes\n\nProduct A is title: cakewalk sonar 6 producer ed. manufacturer: cakewalk. price: 799.0. Product B is title: cakewalk sonar 6 studio edition software music production software. manufacturer: . price: 279.95. Are Product A and Product B the same? No\n\nProduct A is title: cakewalk sonar home studio 6 xl. manufacturer: cakewalk. price: 209.0. Product B is title: cakewalk sonar home studio 6 xl software music production software. manufacturer: . price: 159.0. Are Product A and Product B the same? Yes\n\n", "intego": "Product A is title: contentbarrier x4 10.4 single user ( mac ). manufacturer: intego. price: 49.99. Product B is title: virusbarrier x4 10.4 for mac-10 users ( vbx4-10u ). manufacturer: . price: 316.75. Are Product A and Product B the same? No\n\nProduct A is title: virusbarrier x4 10.4 for mac-10 users. manufacturer: intego. price: 329.95. Product B is title: virusbarrier x4 10.4 for mac-10 users ( vbx4-10u ). manufacturer: . price: 316.75. Are Product A and Product B the same? Yes\n\n", "megasystems": "Product A is title: human body 360. manufacturer: megasystems. price: 29.99. Product B is title: topics presents : human body. manufacturer: . price: 15.9. Are Product A and Product B the same? No\n\n", "activision": "Product A is title: hidden expedition : titanic. manufacturer: activision. price: 19.99. Product B is title: hidden expedition : titanic. manufacturer: . price: 15.5. Are Product A and Product B the same? Yes\n\nProduct A is title: dora the explorer : dora 's world adventures. manufacturer: activision. price: 19.99. Product B is title: dora the explorer : dora world adventures pc cd-rom. manufacturer: . price: 19.99. Are Product A and Product B the same? Yes\n\nProduct A is title: dora the explorer : dora 's world adventures. manufacturer: activision. price: 19.99. Product B is title: clifford the big red dog thinking adventures. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\n", "intuit inc.": "Product A is title: quickbooks premier non-profit edition 2005. manufacturer: intuit inc.. price: 499.95. Product B is title: intuit inc 284216 qckbks prem nonprofit ed 2005. manufacturer: . price: 404.0. Are Product A and Product B the same? Yes\n\nProduct A is title: quickbooks pos : pro multistore 6.0. manufacturer: intuit inc.. price: 1399.95. Product B is title: quickbooks ( r ). manufacturer: . price: 103.99. Are Product A and Product B the same? No\n\nProduct A is title: quickbooks pos : pro multistore 6.0. manufacturer: intuit inc.. price: 1399.95. Product B is title: quickbooks ( r ) pro 2007 edition 3-user. manufacturer: . price: 499.99. Are Product A and Product B the same? No\n\nProduct A is title: quickbooks premier non-profit edition 2005. manufacturer: intuit inc.. price: 499.95. Product B is title: quickbooks ( r ). manufacturer: . price: 199.99. Are Product A and Product B the same? No\n\nProduct A is title: quickbooks pos : pro multistore 6.0. manufacturer: intuit inc.. price: 1399.95. Product B is title: quickbooks ( r ) pro 2007 edition 3-user. manufacturer: . price: 499.99. Are Product A and Product B the same? No\n\nProduct A is title: quickbooks premier non-profit edition 2005. manufacturer: intuit inc.. price: 499.95. Product B is title: quickbooks ( r ). manufacturer: . price: 199.99. Are Product A and Product B the same? No\n\n", "e-mu": "Product A is title: e-mu em8685 beat garden x emulator x2/proteus x2 soundset. manufacturer: e-mu. price: 129.99. Product B is title: e-mu modern symphonic orchestra for emulator x and proteus x sounds for software. manufacturer: . price: 329.99. Are Product A and Product B the same? No\n\n", "zone labs": "Product A is title: zonealarm internet security suite. manufacturer: zone labs. price: 49.99. Product B is title: panda software panda internet security 2007. manufacturer: . price: 69.99. Are Product A and Product B the same? No\n\nProduct A is title: zonealarm pro firewall. manufacturer: zone labs. price: 39.99. Product B is title: zonealarm wireless security 2005. manufacturer: . price: 9.95. Are Product A and Product B the same? No\n\nProduct A is title: zonealarm internet security suite. manufacturer: zone labs. price: 49.99. Product B is title: panda software panda internet security 2007. manufacturer: . price: 69.99. Are Product A and Product B the same? No\n\nProduct A is title: zonealarm internet security suite. manufacturer: zone labs. price: 49.99. Product B is title: pc-cillin ( tm ) internet security 2007. manufacturer: . price: 49.99. Are Product A and Product B the same? No\n\nProduct A is title: zonealarm internet security suite. manufacturer: zone labs. price: 49.99. Product B is title: zonealarm internet security suite 2007 ( pc ) zonelabs. manufacturer: . price: 49.99. Are Product A and Product B the same? Yes\n\nProduct A is title: zonealarm internet security suite. manufacturer: zone labs. price: 49.99. Product B is title: symantec 10725608 norton internet security 2007 internet security 1 user ( s ) pc. manufacturer: . price: 64.97. Are Product A and Product B the same? No\n\nProduct A is title: zonealarm pro firewall. manufacturer: zone labs. price: 39.99. Product B is title: zone labs zonealarm pro. manufacturer: . price: 30.17. Are Product A and Product B the same? Yes\n\n", "dataviz": "", "roxio": "Product A is title: roxio toast 8 titanium. manufacturer: roxio. price: 99.95. Product B is title: roxio toast 8 titanium cdr software. manufacturer: . price: 82.99. Are Product A and Product B the same? Yes\n\nProduct A is title: roxio easy media creator 9. manufacturer: roxio. price: 99.95. Product B is title: podmediacreator. manufacturer: . price: 29.99. Are Product A and Product B the same? No\n\nProduct A is title: roxio easy media creator 9. manufacturer: roxio. price: 99.95. Product B is title: roxio easy media creator 9 suite software for windows authoring software. manufacturer: . price: 98.95. Are Product A and Product B the same? Yes\n\nProduct A is title: stopzilla 4.0. manufacturer: roxio. price: 47.33. Product B is title: stopzilla ! 4.0 ( pc ) roxio. manufacturer: . price: 29.99. Are Product A and Product B the same? No\n\nProduct A is title: roxio popcorn 2 ( mac ). manufacturer: roxio. price: 49.95. Product B is title: roxio popcorn 2 video conversion software. manufacturer: . price: 44.99. Are Product A and Product B the same? Yes\n\n", "dk": "Product A is title: dk rainbow fish most beautiful fish. manufacturer: dk. price: 9.99. Product B is title: rainbow fish and the whale. manufacturer: . price: 6.95. Are Product A and Product B the same? No\n\n", "power production": "Product A is title: power production storyboard quick. manufacturer: power production. price: . Product B is title: power production power production storyboard artist v4 .2 software animation software. manufacturer: . price: 749.0. Are Product A and Product B the same? No\n\nProduct A is title: power production storyboard artist 4. manufacturer: power production. price: . Product B is title: power production storyboard quick 5 software for mac and windows animation software. manufacturer: . price: 299.0. Are Product A and Product B the same? No\n\nProduct A is title: power production storyboard artist 4. manufacturer: power production. price: . Product B is title: power production power production storyboard artist software animation software. manufacturer: . price: 498.95. Are Product A and Product B the same? Yes\n\nProduct A is title: power production storyboard quick. manufacturer: power production. price: . Product B is title: power production power production storyboard artist software animation software. manufacturer: . price: 498.95. Are Product A and Product B the same? No\n\nProduct A is title: power production storyboard quick. manufacturer: power production. price: . Product B is title: power production storyboard quick 5 software for mac and windows animation software. manufacturer: . price: 299.0. Are Product A and Product B the same? Yes\n\n", "on-hand-software": "Product A is title: best of card games ( jewel case ). manufacturer: on-hand-software. price: 9.99. Product B is title: total 3d home deluxe ( jewel case ) ( jce-th6 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: best of card games ( jewel case ). manufacturer: on-hand-software. price: 9.99. Product B is title: cross stitch design studio ( jewel case ) ( 8006 ). manufacturer: m-and-r-technologies. price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: best of card games ( jewel case ). manufacturer: on-hand-software. price: 9.99. Product B is title: geometry ( win/mac ) ( jewel case ) ( 10158 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: wild photo effects ( jewel case ). manufacturer: on-hand-software. price: 9.99. Product B is title: total 3d home deluxe ( jewel case ) ( jce-th6 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: best of card games ( jewel case ). manufacturer: on-hand-software. price: 9.99. Product B is title: geometry ( win/mac ) ( jewel case ) ( 10158 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\n", "csdc": "Product A is title: more brain games. manufacturer: csdc. price: 9.99. Product B is title: brain games : solitaire. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\nProduct A is title: more brain games. manufacturer: csdc. price: 9.99. Product B is title: freeverse software big bang brain games mac. manufacturer: . price: 24.99. Are Product A and Product B the same? No\n\nProduct A is title: more brain games. manufacturer: csdc. price: 9.99. Product B is title: hoyle card games. manufacturer: . price: 9.95. Are Product A and Product B the same? No\n\nProduct A is title: route 66 route canada ( french ). manufacturer: csdc. price: . Product B is title: csdc route 66 route canada ( french ). manufacturer: . price: 35.98. Are Product A and Product B the same? Yes\n\nProduct A is title: macjournal v4 ( mac ). manufacturer: csdc. price: 39.95. Product B is title: mariner macjournal 4 journal blog podcast software. manufacturer: . price: 38.99. Are Product A and Product B the same? Yes\n\nProduct A is title: ilisten with headset microphone 1.7. manufacturer: csdc. price: . Product B is title: macspeech ilisten with headset and microphone. manufacturer: . price: 149.99. Are Product A and Product B the same? Yes\n\nProduct A is title: omnioutliner professional 3.0. manufacturer: csdc. price: 69.95. Product B is title: diskeeper 2007 professional. manufacturer: . price: 46.95. Are Product A and Product B the same? No\n\nProduct A is title: marketcircle daylite v3 .0 ( mac ). manufacturer: csdc. price: 149.0. Product B is title: marketcircle daylite 3 customer relationship management crm. manufacturer: . price: 125.99. Are Product A and Product B the same? Yes\n\nProduct A is title: omnioutliner professional 3.0. manufacturer: csdc. price: 69.95. Product B is title: adobe cs3 flash professional academic. manufacturer: . price: 239.99. Are Product A and Product B the same? No\n\nProduct A is title: modeless liquid ledger personal finance software ( mac ). manufacturer: csdc. price: 79.99. Product B is title: modeless software liquid ledger personal finance. manufacturer: . price: 71.99. Are Product A and Product B the same? Yes\n\n", "sony media": "Product A is title: american idol extreme music creator. manufacturer: sony media. price: 49.95. Product B is title: sony media software american idol extreme music creator software music production software. manufacturer: . price: 49.95. Are Product A and Product B the same? Yes\n\nProduct A is title: sound forge 9.0. manufacturer: sony media. price: 399.96. Product B is title: sony creative software sfas9000 sound forge audio studio 9. manufacturer: . price: 53.97. Are Product A and Product B the same? No\n\nProduct A is title: sound forge audio studio 9. manufacturer: sony media. price: 69.59. Product B is title: sound forge audio studio 9. manufacturer: . price: 66.89. Are Product A and Product B the same? Yes\n\nProduct A is title: sound forge 9.0. manufacturer: sony media. price: 399.96. Product B is title: sound studio 3 for mac. manufacturer: . price: 79.99. Are Product A and Product B the same? No\n\nProduct A is title: acid music studio 7. manufacturer: sony media. price: 78.95. Product B is title: acid music studio 6. manufacturer: sony-pictures-digital-entertainment. price: 59.95. Are Product A and Product B the same? No\n\nProduct A is title: vegas movie studiodvd +8 plat. manufacturer: sony media. price: 129.95. Product B is title: vegas movie studiodvd +8 plat. manufacturer: sony creative software. price: 92.99. Are Product A and Product B the same? Yes\n\nProduct A is title: sony vegas movie studio + dvd 8 platinum edition. manufacturer: sony media. price: 129.95. Product B is title: sony vegas 6 video editing software professional editing software. manufacturer: sony-pictures-digital-entertainment. price: 99.0. Are Product A and Product B the same? No\n\nProduct A is title: sony sound forge 9 boxed upgrade. manufacturer: sony media. price: 232.05. Product B is title: sound forge audio studio 9. manufacturer: . price: 66.89. Are Product A and Product B the same? No\n\nProduct A is title: vegas movie studiodvd +8 plat. manufacturer: sony media. price: 129.95. Product B is title: sony vegas movie studio + dvd v8 platinum edition video editing software professional editing software. manufacturer: . price: 129.95. Are Product A and Product B the same? Yes\n\n", "blizzard entertainment": "Product A is title: world of warcraft 60 day pre-paid time card. manufacturer: blizzard entertainment. price: 29.99. Product B is title: world of wild cats. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\nProduct A is title: world of warcraft. manufacturer: blizzard entertainment. price: 19.99. Product B is title: starcraft ghost. manufacturer: . price: 49.99. Are Product A and Product B the same? No\n\nProduct A is title: world of warcraft 60 day pre-paid time card. manufacturer: blizzard entertainment. price: 29.99. Product B is title: vivendi blizzard world of warcraft. manufacturer: . price: 24.99. Are Product A and Product B the same? No\n\nProduct A is title: world of warcraft. manufacturer: blizzard entertainment. price: 19.99. Product B is title: vivendi blizzard world of warcraft. manufacturer: . price: 24.99. Are Product A and Product B the same? No\n\nProduct A is title: world of warcraft 60 day pre-paid time card. manufacturer: blizzard entertainment. price: 29.99. Product B is title: world of warcraft burning crusade. manufacturer: . price: 39.99. Are Product A and Product B the same? No\n\nProduct A is title: world of warcraft 60 day pre-paid time card. manufacturer: blizzard entertainment. price: 29.99. Product B is title: vivendi-universal games inc 72305 world of warcraft 60 day prepaid. manufacturer: . price: 27.99. Are Product A and Product B the same? Yes\n\nProduct A is title: world of warcraft expansion : burning crusade. manufacturer: blizzard entertainment. price: 39.99. Product B is title: world of warcraft burning crusade. manufacturer: . price: 39.99. Are Product A and Product B the same? Yes\n\n", "sony-creative-software": "Product A is title: station access collection. manufacturer: sony-creative-software. price: 29.99. Product B is title: adobe cs3 master collection academic. manufacturer: . price: 939.99. Are Product A and Product B the same? No\n\nProduct A is title: station access collection. manufacturer: sony-creative-software. price: 29.99. Product B is title: adobe cs3 master collection. manufacturer: . price: 2499.99. Are Product A and Product B the same? No\n\nProduct A is title: station access collection. manufacturer: sony-creative-software. price: 29.99. Product B is title: ultimate puzzle collection. manufacturer: . price: 9.95. Are Product A and Product B the same? No\n\nProduct A is title: station access collection. manufacturer: sony-creative-software. price: 29.99. Product B is title: rayman 10th anniversary collection. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\nProduct A is title: station access collection. manufacturer: sony-creative-software. price: 29.99. Product B is title: fallout collection ( dvd-rom ). manufacturer: . price: 24.49. Are Product A and Product B the same? No\n\nProduct A is title: everquest : titanium. manufacturer: sony-creative-software. price: 19.95. Product B is title: sony online entertainment inc 41032 everquest : titanium win 2000 xp. manufacturer: sony-creative-software. price: 23.09. Are Product A and Product B the same? Yes\n\n", "mcafee": "Product A is title: mcafee internet security suite 2007 3 users. manufacturer: mcafee. price: 89.99. Product B is title: pc-cillin internet security 2007 3-user ( win me 2000 xp ). manufacturer: . price: 49.95. Are Product A and Product B the same? No\n\nProduct A is title: mcafee total protection 2007 3 users. manufacturer: mcafee. price: 99.99. Product B is title: mtp07emb3rua mcafee total protection 2007 complete package 3 users cd mini-box wi. manufacturer: . price: 86.94. Are Product A and Product B the same? No\n\nProduct A is title: mcafee internet security suite 2007 3 users. manufacturer: mcafee. price: 89.99. Product B is title: f-secure internet security 2007 3 pcs ( global marketing partners ). manufacturer: global-marketing-partners. price: 63.35. Are Product A and Product B the same? No\n\nProduct A is title: mcafee internet security suite 2007 3 users. manufacturer: mcafee. price: 89.99. Product B is title: f-secure internet security 2007 3 pcs ( global marketing partners ). manufacturer: global-marketing-partners. price: 63.35. Are Product A and Product B the same? No\n\nProduct A is title: mcafee total protection 2007 3 users. manufacturer: mcafee. price: 99.99. Product B is title: mcafee inc total protection 2007 3-user. manufacturer: . price: 92.51. Are Product A and Product B the same? Yes\n\nProduct A is title: mcafee virusscan plus 3 user. manufacturer: mcafee. price: 59.99. Product B is title: mcafee inc mcafee virusscan plus sub 1:1 3u mb en. manufacturer: . price: 56.38. Are Product A and Product B the same? Yes\n\nProduct A is title: mcafee internet security suite 2007 3 users. manufacturer: mcafee. price: 89.99. Product B is title: symantec 10725608 norton internet security 2007 internet security 1 user ( s ) pc. manufacturer: . price: 64.97. Are Product A and Product B the same? No\n\n", "adobe systems": "Product A is title: adobe acrobat distiller svr v6-cd linux u/u 42050142 ). manufacturer: adobe systems. price: . Product B is title: adobe 42050106 acrobat distiller svr v6-cd sun 100u. manufacturer: adobe. price: 5399.99. Are Product A and Product B the same? No\n\n", "zero-g": "Product A is title: zero-g pro pack for garageband ( appleloops ). manufacturer: zero-g. price: 169.95. Product B is title: apple garageband jam pack : symphony orchestra. manufacturer: . price: 95.99. Are Product A and Product B the same? No\n\nProduct A is title: zero-g pro pack for garageband ( appleloops ). manufacturer: zero-g. price: 169.95. Product B is title: apple garageband jam pack : rhythm section. manufacturer: . price: 95.99. Are Product A and Product B the same? No\n\nProduct A is title: zero-g pro pack for garageband ( appleloops ). manufacturer: zero-g. price: 169.95. Product B is title: apple garageband jam pack : symphony orchestra. manufacturer: . price: 95.99. Are Product A and Product B the same? No\n\nProduct A is title: zero-g pro pack for garageband ( appleloops ). manufacturer: zero-g. price: 169.95. Product B is title: apple garageband jam pack : world music. manufacturer: . price: 95.99. Are Product A and Product B the same? No\n\n", "dreamcatcher interactive": "Product A is title: sentinel : descendants in time. manufacturer: dreamcatcher interactive. price: 19.99. Product B is title: dreamcatcher interactive sentinel : descendants in time. manufacturer: . price: 17.55. Are Product A and Product B the same? Yes\n\nProduct A is title: the guild 2 pirates of the seas. manufacturer: dreamcatcher interactive. price: 29.99. Product B is title: imagination international corp pirates of the carribean. manufacturer: . price: 21.26. Are Product A and Product B the same? No\n\nProduct A is title: 3 great games for guys. manufacturer: dreamcatcher interactive. price: 29.99. Product B is title: dreamcatcher interactive 3 great games for guys. manufacturer: . price: 26.14. Are Product A and Product B the same? Yes\n\nProduct A is title: the guild 2 pirates of the seas. manufacturer: dreamcatcher interactive. price: 29.99. Product B is title: guild 2 : pirates of the seas. manufacturer: . price: 23.99. Are Product A and Product B the same? Yes\n\n", "findex": "Product A is title: quickverse mac bible study 2007 gold box. manufacturer: findex. price: 349.95. Product B is title: findex.com inc quickverse bible study 2007 bible suite. manufacturer: . price: 24.89. Are Product A and Product B the same? No\n\nProduct A is title: findex quickverse bible study 2007 bible-suite. manufacturer: findex. price: . Product B is title: findex.com inc quickverse mac bible study 2007 white box. manufacturer: . price: 37.18. Are Product A and Product B the same? No\n\nProduct A is title: quickverse mac bible study 2007 gold box. manufacturer: findex. price: 349.95. Product B is title: findex.com inc quickverse mac bible study 2007 gold box. manufacturer: . price: 215.42. Are Product A and Product B the same? Yes\n\nProduct A is title: findex quickverse bible study 2007 bible-suite. manufacturer: findex. price: . Product B is title: findex.com inc quickverse bible study 2007 bible suite. manufacturer: . price: 24.89. Are Product A and Product B the same? Yes\n\nProduct A is title: quickverse mac bible study 2007 gold box. manufacturer: findex. price: 349.95. Product B is title: findex.com inc quickverse mac bible study 2007 white box. manufacturer: . price: 37.18. Are Product A and Product B the same? No\n\nProduct A is title: findex quickverse bible study 2007 bible-suite. manufacturer: findex. price: . Product B is title: findex.com quickverse bible study 2007 mobile deluxe. manufacturer: . price: 24.99. Are Product A and Product B the same? No\n\nProduct A is title: quickverse mac bible study 2007 white box. manufacturer: findex. price: 59.95. Product B is title: findex.com inc quickverse mac bible study 2007 gold box. manufacturer: . price: 215.42. Are Product A and Product B the same? No\n\n", "diskeeper corporation": "Product A is title: diskeeper 2007 professional single lic pack. manufacturer: diskeeper corporation. price: 79.21. Product B is title: resume maker professional 12. manufacturer: . price: 17.95. Are Product A and Product B the same? No\n\nProduct A is title: diskeeper 2007 professional single lic pack. manufacturer: diskeeper corporation. price: 79.21. Product B is title: csdc pop-up stopper professional panicware. manufacturer: . price: 26.14. Are Product A and Product B the same? No\n\nProduct A is title: diskeeper 2007 pro premier 5-lic pack. manufacturer: diskeeper corporation. price: 736.66. Product B is title: quicken 2007 premier win cd. manufacturer: intuit. price: 73.99. Are Product A and Product B the same? No\n\nProduct A is title: diskeeper 2007 professional single lic pack. manufacturer: diskeeper corporation. price: 79.21. Product B is title: diskeeper 2007 professional. manufacturer: . price: 46.95. Are Product A and Product B the same? Yes\n\nProduct A is title: diskeeper 2007 professional single lic pack. manufacturer: diskeeper corporation. price: 79.21. Product B is title: microsoft office accounting professional 2007 ( pc ). manufacturer: . price: 229.95. Are Product A and Product B the same? No\n\nProduct A is title: diskeeper 2007 pro premier 10-lic pack. manufacturer: diskeeper corporation. price: 1372.21. Product B is title: quicken 2007 premier win cd. manufacturer: intuit. price: 73.99. Are Product A and Product B the same? No\n\n", "total training": "Product A is title: total training for adobe creative suite 3 : design premium bundle. manufacturer: total training. price: 299.99. Product B is title: 19500220 adobe creative suite 3 design premium product upgrade package 1 user upgra. manufacturer: . price: 1687.48. Are Product A and Product B the same? No\n\nProduct A is title: total training for adobe creative suite 3 : design premium bundle. manufacturer: total training. price: 299.99. Product B is title: adobe production premium cs3 software suite for windows professional editing software. manufacturer: . price: 1698.95. Are Product A and Product B the same? No\n\nProduct A is title: total training for adobe creative suite 3 : design premium bundle. manufacturer: total training. price: 299.99. Product B is title: adobe production premium cs3 software suite for mac av production software. manufacturer: . price: 1698.95. Are Product A and Product B the same? No\n\nProduct A is title: total training for adobe creative suite 3 : design premium bundle. manufacturer: total training. price: 299.99. Product B is title: adobe creative suite 3 design standard complete package 1 user academic windows. manufacturer: . price: 369.0. Are Product A and Product B the same? No\n\nProduct A is title: total training for adobe creative suite 3 : design premium bundle. manufacturer: total training. price: 299.99. Product B is title: total training for adobe creative suite 3 design premium. manufacturer: . price: 279.95. Are Product A and Product B the same? Yes\n\nProduct A is title: total training for adobe creative suite 3 : design premium bundle. manufacturer: total training. price: 299.99. Product B is title: total training for advanced flash 8 action script. manufacturer: . price: 99.99. Are Product A and Product B the same? No\n\n", "scholastic": "Product A is title: math missions : the amazing arcade adventure grades 3-5. manufacturer: scholastic. price: 19.99. Product B is title: cluefinders 3rd grade adventures with adapt. manufacturer: . price: 9.95. Are Product A and Product B the same? No\n\nProduct A is title: clifford reading pack ( dvd box ). manufacturer: scholastic. price: 19.99. Product B is title: clifford thinking. manufacturer: . price: 12.9. Are Product A and Product B the same? No\n\nProduct A is title: math missions : the amazing arcade adventure grades 3-5. manufacturer: scholastic. price: 19.99. Product B is title: math missions : the race to spectacle city arcade ( grades k-2 ). manufacturer: . price: 8.49. Are Product A and Product B the same? No\n\nProduct A is title: clifford reading pack ( dvd box ). manufacturer: scholastic. price: 19.99. Product B is title: clifford the big red dog reading pack. manufacturer: . price: 19.95. Are Product A and Product B the same? No\n\nProduct A is title: math missions : the race to spectacle city arcade ( grades k-2 ). manufacturer: scholastic. price: 29.95. Product B is title: math missions : the amazing arcade adventure ( grades 3-5 ). manufacturer: . price: 12.95. Are Product A and Product B the same? No\n\n", "watchguard license": "", "zend-technologies-inc .": "Product A is title: zend studio 5.1 ( win/mac ). manufacturer: zend-technologies-inc .. price: 299.0. Product B is title: zend studio 5.1 php integrated development environment. manufacturer: zend-technologies-inc .. price: 249.0. Are Product A and Product B the same? Yes\n\nProduct A is title: zend studio 5.1 ( win/mac ). manufacturer: zend-technologies-inc .. price: 299.0. Product B is title: sony acid music studio ( pc ). manufacturer: . price: 69.99. Are Product A and Product B the same? No\n\n", "sony-media-software": "Product A is title: discrete drums : vol .1. manufacturer: sony-media-software. price: 59.95. Product B is title: sony media software discrete drums : vol .1 sony sound series. manufacturer: sony-media-software. price: 24.91. Are Product A and Product B the same? Yes\n\nProduct A is title: sony super duper music looper. manufacturer: sony-media-software. price: 19.99. Product B is title: sony acid music studio ( pc ). manufacturer: . price: 69.99. Are Product A and Product B the same? No\n\nProduct A is title: photo go. manufacturer: sony-media-software. price: 24.95. Product B is title: books to go. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\n", "yamaha": "Product A is title: mike garson jazzical suite. manufacturer: yamaha. price: 34.95. Product B is title: mike garson the burt bacharach songbook. manufacturer: . price: 25.77. Are Product A and Product B the same? No\n\nProduct A is title: the little mermaid/beauty and the beast. manufacturer: yamaha. price: 34.95. Product B is title: phillip keveren disney 's the little mermaid/beauty and the beast. manufacturer: . price: 23.59. Are Product A and Product B the same? Yes\n\nProduct A is title: academy award winners. manufacturer: yamaha. price: 31.95. Product B is title: bobby walters academy award winners. manufacturer: . price: 23.59. Are Product A and Product B the same? Yes\n\nProduct A is title: hollywood volume 1. manufacturer: yamaha. price: 31.95. Product B is title: phillip keveren 501056 hollywood volume 1. manufacturer: . price: 23.59. Are Product A and Product B the same? Yes\n\nProduct A is title: sunset boulevard. manufacturer: yamaha. price: 31.95. Product B is title: mike garson andrew lloyd webber andrew lloyd webber favorites featuring sunset boulevard. manufacturer: . price: 23.59. Are Product A and Product B the same? Yes\n\nProduct A is title: big tiny little a tiny little christmas. manufacturer: yamaha. price: 34.95. Product B is title: big tiny little big tiny little a tiny little christmas. manufacturer: . price: 23.59. Are Product A and Product B the same? Yes\n\n", "global software": "Product A is title: global software smart steps 2nd grade windows macintosh ). manufacturer: global software. price: . Product B is title: global marketing partners art text belight software. manufacturer: . price: 43.29. Are Product A and Product B the same? No\n\nProduct A is title: global software smart steps 2nd grade windows macintosh ). manufacturer: global software. price: . Product B is title: global software a0447jch dk smart steps 2nd grade win 95 98 me 2000 xp vista. manufacturer: . price: 9.08. Are Product A and Product B the same? Yes\n\nProduct A is title: global software smart steps 2nd grade windows macintosh ). manufacturer: global software. price: . Product B is title: ilford studio software for pc ( windows 2000 pro xp pro ). manufacturer: . price: 699.99. Are Product A and Product B the same? No\n\nProduct A is title: global software smart steps 2nd grade windows macintosh ). manufacturer: global software. price: . Product B is title: phase one capture-one pro dslr raw image editing software for macintosh & windows .. manufacturer: . price: 399.95. Are Product A and Product B the same? No\n\nProduct A is title: global software smart steps 1st grade windows macintosh ). manufacturer: global software. price: . Product B is title: global software a0454jch dk smart steps 3rd grade win 95 98 me 2000 xp vista. manufacturer: global-software-publishing. price: 9.08. Are Product A and Product B the same? No\n\nProduct A is title: global software smart steps 1st grade windows macintosh ). manufacturer: global software. price: . Product B is title: phase one capture-one pro dslr raw image editing software for macintosh & windows .. manufacturer: . price: 399.95. Are Product A and Product B the same? No\n\n", "stomp inc": "Product A is title: data protection suite. manufacturer: stomp inc. price: 69.99. Product B is title: arcserve bkup protection suite exch r11 .5 upg-v prod only. manufacturer: . price: 2249.99. Are Product A and Product B the same? No\n\n", "avanquest-publishing-usa-inc .": "Product A is title: power director 3. manufacturer: avanquest-publishing-usa-inc .. price: 79.95. Product B is title: cyberlink power director 3. manufacturer: avanquest-publishing-usa-inc .. price: 9.99. Are Product A and Product B the same? Yes\n\n", "adobe-education-box": "Product A is title: photoshop elements 4 edu mac 1u. manufacturer: adobe-education-box. price: 69.0. Product B is title: adobe systems inc 29180248 adobe photoshop elements 5.0 / premiere elements 3.0. manufacturer: . price: 139.51. Are Product A and Product B the same? No\n\nProduct A is title: dreamweaver cs3 9 mac ed 1u. manufacturer: adobe-education-box. price: 199.0. Product B is title: adobe dreamweaver cs3 complete package 1 user academic cd mac. manufacturer: . price: 189.0. Are Product A and Product B the same? No\n\nProduct A is title: photoshop elements 4 edu mac 1u. manufacturer: adobe-education-box. price: 69.0. Product B is title: adobe photoshop elements 4.0 mac academic. manufacturer: adobe-education-box. price: 69.99. Are Product A and Product B the same? Yes\n\nProduct A is title: dreamweaver cs3 9 mac ed 1u. manufacturer: adobe-education-box. price: 199.0. Product B is title: adobe dreamweaver cs3 academic. manufacturer: adobe-education-box. price: 195.99. Are Product A and Product B the same? Yes\n\nProduct A is title: illustrator cs3 13 mac ed 1u. manufacturer: adobe-education-box. price: 199.0. Product B is title: adobe illustrator cs3 for mac academic. manufacturer: adobe-education-box. price: 199.99. Are Product A and Product B the same? Yes\n\nProduct A is title: audition 2 win ed ue 1u. manufacturer: adobe-education-box. price: 149.0. Product B is title: adobe 22011152 audition2 win retail ue 1u. manufacturer: . price: 296.2. Are Product A and Product B the same? No\n\nProduct A is title: illustrator cs3 13 mac ed 1u. manufacturer: adobe-education-box. price: 199.0. Product B is title: adobe illustrator cs3 for mac academic. manufacturer: adobe-education-box. price: 199.99. Are Product A and Product B the same? Yes\n\nProduct A is title: illustrator cs3 13 mac ed 1u. manufacturer: adobe-education-box. price: 199.0. Product B is title: adobe illustrator cs3 ( mac ). manufacturer: . price: 599.0. Are Product A and Product B the same? No\n\nProduct A is title: dreamweaver cs3 9 mac ed 1u. manufacturer: adobe-education-box. price: 199.0. Product B is title: adobe dreamweaver cs3. manufacturer: . price: 415.99. Are Product A and Product B the same? No\n\n", "global marketing partners": "Product A is title: readiris pro 11. manufacturer: global marketing partners. price: 129.99. Product B is title: readiris pro 11 corporate edition for pc. manufacturer: . price: 349.99. Are Product A and Product B the same? No\n\nProduct A is title: readiris pro 11. manufacturer: global marketing partners. price: 129.99. Product B is title: iris inc readiris pro 11 corporate edition. manufacturer: . price: 430.58. Are Product A and Product B the same? No\n\nProduct A is title: cadopia 6 standard edition by cadopia. manufacturer: global marketing partners. price: 295.0. Product B is title: goldmine ( r ) standard edition 6.7. manufacturer: . price: 199.99. Are Product A and Product B the same? No\n\n", "knowledge adventure": "Product A is title: jumpstart advanced preschool v2 .0. manufacturer: knowledge adventure. price: 19.99. Product B is title: knowledge adventure inc. jumpstart advanced preschool v2 .0. manufacturer: . price: 16.93. Are Product A and Product B the same? Yes\n\nProduct A is title: jumpstart reading with karaoke. manufacturer: knowledge adventure. price: 29.99. Product B is title: knowledge adventure 20033 jumpstart reading with karaoke. manufacturer: knowledge adventure. price: 19.99. Are Product A and Product B the same? Yes\n\nProduct A is title: jumpstart world 2nd grade. manufacturer: knowledge adventure. price: 19.99. Product B is title: jumpstart kindergarten ( jc ). manufacturer: . price: 9.9. Are Product A and Product B the same? No\n\nProduct A is title: jumpstart world 1st grade. manufacturer: knowledge adventure. price: 19.99. Product B is title: knowledge adventure inc. 20223 jumpstart world 1st grade mini box. manufacturer: . price: 16.93. Are Product A and Product B the same? Yes\n\nProduct A is title: jumpstart advanced 1st grade v2 .0. manufacturer: knowledge adventure. price: 19.99. Product B is title: knowledge adventure inc. 20223 jumpstart world 1st grade mini box. manufacturer: . price: 16.93. Are Product A and Product B the same? No\n\nProduct A is title: jumpstart 4th-6th grade v 2.0 b43. manufacturer: knowledge adventure. price: 19.99. Product B is title: knowledge adventure inc. jumpstart 4th-6th grade v 2.0 b43. manufacturer: . price: 16.93. Are Product A and Product B the same? Yes\n\nProduct A is title: jumpstart advanced 3rd grade v2 .0. manufacturer: knowledge adventure. price: 19.99. Product B is title: jumpstart advanced 3rd grade ( pc/mac ) knowledge adventure. manufacturer: . price: 19.99. Are Product A and Product B the same? Yes\n\nProduct A is title: jumpstart world 2nd grade. manufacturer: knowledge adventure. price: 19.99. Product B is title: jumpstart 1st grade ( jc ). manufacturer: . price: 9.9. Are Product A and Product B the same? No\n\nProduct A is title: jumpstart advanced preschool v2 .0. manufacturer: knowledge adventure. price: 19.99. Product B is title: jumpstart advanced 3rd grade ( pc/mac ) knowledge adventure. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\nProduct A is title: jumpstart advanced preschool v2 .0. manufacturer: knowledge adventure. price: 19.99. Product B is title: jumpstart advanced 2nd grade ( pc/mac ) knowledge adventure. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\n", "aladdin": "", "global-marketing-partners": "Product A is title: video patrol 5.0 by honest technology. manufacturer: global-marketing-partners. price: 50.86. Product B is title: global marketing partners video patrol 5.0 honest technology. manufacturer: global-marketing-partners. price: 25.2. Are Product A and Product B the same? Yes\n\nProduct A is title: swift publisher for mac. manufacturer: global-marketing-partners. price: 39.99. Product B is title: belight swift publisher page layout software. manufacturer: global-marketing-partners. price: 39.99. Are Product A and Product B the same? Yes\n\nProduct A is title: f-secure internet security 2007 3 pcs. manufacturer: global-marketing-partners. price: 79.99. Product B is title: symantec 10725608 norton internet security 2007 internet security 1 user ( s ) pc. manufacturer: . price: 64.97. Are Product A and Product B the same? No\n\nProduct A is title: gmp data rescue license 24300 ). manufacturer: global-marketing-partners. price: 349.0. Product B is title: prosoft engineering data rescue universal license. manufacturer: global-marketing-partners. price: 236.34. Are Product A and Product B the same? Yes\n\nProduct A is title: f-secure internet security 2007 3 pcs. manufacturer: global-marketing-partners. price: 79.99. Product B is title: symantec norton internet security 3.0 software for macintosh apple & mac software. manufacturer: . price: 89.95. Are Product A and Product B the same? No\n\nProduct A is title: drive genius by prosoft engineering inc. manufacturer: global-marketing-partners. price: 129.0. Product B is title: prosoft engineering data rescue universal license. manufacturer: global-marketing-partners. price: 236.34. Are Product A and Product B the same? No\n\nProduct A is title: easy video editor 2.0 by honest technology. manufacturer: global-marketing-partners. price: 50.88. Product B is title: global marketing partners easy video editor 2.0 honest technology. manufacturer: global-marketing-partners. price: 25.21. Are Product A and Product B the same? Yes\n\nProduct A is title: swift publisher for mac. manufacturer: global-marketing-partners. price: 39.99. Product B is title: allume poser 6 for mac. manufacturer: curious-labs-inc .. price: 169.99. Are Product A and Product B the same? No\n\nProduct A is title: f-secure internet security 2007 3 pcs. manufacturer: global-marketing-partners. price: 79.99. Product B is title: mcafee internet security 7.0. manufacturer: . price: 69.99. Are Product A and Product B the same? No\n\n", "sierra home": "Product A is title: hallmark card studio 2006. manufacturer: sierra home. price: 29.99. Product B is title: hallmark card studio 2004. manufacturer: . price: 29.97. Are Product A and Product B the same? No\n\nProduct A is title: hallmark card studio 2005 deluxe. manufacturer: sierra home. price: 49.99. Product B is title: hallmark card studio 2004. manufacturer: . price: 29.97. Are Product A and Product B the same? No\n\nProduct A is title: hallmark card studio 2005 deluxe. manufacturer: sierra home. price: 49.99. Product B is title: hgw hallmark card studio 2005 deluxe complete package 1 user cd win. manufacturer: . price: 47.43. Are Product A and Product B the same? No\n\nProduct A is title: hallmark card studio 2006. manufacturer: sierra home. price: 29.99. Product B is title: sierrahome hse hallmark card studio special edition win 98 me 2000 xp. manufacturer: sierrahome. price: 19.26. Are Product A and Product B the same? No\n\nProduct A is title: hallmark card studio 2005 deluxe. manufacturer: sierra home. price: 49.99. Product B is title: sierrahome hse hallmark card studio special edition win 98 me 2000 xp. manufacturer: sierrahome. price: 19.26. Are Product A and Product B the same? No\n\n", "viva media": "Product A is title: learn to play chess with fritz and chesster ( mac ). manufacturer: viva media. price: 29.99. Product B is title: viva media llc learn to play chess with fritz & chesster 2. manufacturer: . price: 26.14. Are Product A and Product B the same? No\n\nProduct A is title: my sim aquarium. manufacturer: viva media. price: 19.99. Product B is title: marine aquarium & goldfish aquarium. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\nProduct A is title: learn to play chess with fritz and chesster ( mac ). manufacturer: viva media. price: 29.99. Product B is title: viva media llc learn to play chess with fritz & chesster. manufacturer: . price: 26.14. Are Product A and Product B the same? Yes\n\n", "palo-alto-software": "Product A is title: palo alto marketing plan pro 9.0. manufacturer: palo-alto-software. price: 179.0. Product B is title: palo alto software business plan pro 2007. manufacturer: palo-alto-software. price: 88.83. Are Product A and Product B the same? No\n\nProduct A is title: palo alto business plan pro premier 2007. manufacturer: palo-alto-software. price: 199.0. Product B is title: quicken ( r ) premier 2007. manufacturer: . price: 79.99. Are Product A and Product B the same? No\n\nProduct A is title: palo alto business plan pro premier 2007. manufacturer: palo-alto-software. price: 199.0. Product B is title: business plan pro ( r ) 2004. manufacturer: . price: 99.87. Are Product A and Product B the same? No\n\nProduct A is title: palo alto marketing plan pro 9.0. manufacturer: palo-alto-software. price: 179.0. Product B is title: palo alto software mpd9 palo alto marketing plan v. 9.0 pro complete product business planning 1 user complete product standard pc. manufacturer: palo-alto-software. price: 149.97. Are Product A and Product B the same? Yes\n\nProduct A is title: palo alto business plan pro premier 2007. manufacturer: palo-alto-software. price: 199.0. Product B is title: palo alto software bpdx palo alto business plan 2007 pro premier edition complete product business planning 1 user complete product standard pc. manufacturer: palo-alto-software. price: 184.97. Are Product A and Product B the same? Yes\n\nProduct A is title: palo alto business plan pro premier 2007. manufacturer: palo-alto-software. price: 199.0. Product B is title: myob premier accounting for small business 2007-windows. manufacturer: . price: 289.99. Are Product A and Product B the same? No\n\nProduct A is title: palo alto business plan pro 2007. manufacturer: palo-alto-software. price: 99.95. Product B is title: palo alto software bppx palo alto business plan 2007 pro complete product business planning 1 user complete product standard pc. manufacturer: . price: 83.97. Are Product A and Product B the same? Yes\n\nProduct A is title: palo alto business plan pro 2007. manufacturer: palo-alto-software. price: 99.95. Product B is title: palo alto software mpd9 palo alto marketing plan v. 9.0 pro complete product business planning 1 user complete product standard pc. manufacturer: palo-alto-software. price: 149.97. Are Product A and Product B the same? No\n\nProduct A is title: palo alto business plan pro 2007. manufacturer: palo-alto-software. price: 99.95. Product B is title: palo alto software business plan pro 2007. manufacturer: palo-alto-software. price: 88.83. Are Product A and Product B the same? Yes\n\n", "ubi soft": "Product A is title: ubi soft star wars activity center windows ). manufacturer: ubi soft. price: 29.99. Product B is title: the beginners bible : noah 's ark activity center : activity center. manufacturer: . price: 9.95. Are Product A and Product B the same? No\n\nProduct A is title: rayman 10th anniversary collection. manufacturer: ubi soft. price: 19.99. Product B is title: rayman 10th anniversary collection. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\nProduct A is title: ubi soft star wars activity center windows ). manufacturer: ubi soft. price: 29.99. Product B is title: star wars episode iii activity center. manufacturer: . price: 10.95. Are Product A and Product B the same? No\n\nProduct A is title: rayman 10th anniversary collection. manufacturer: ubi soft. price: 19.99. Product B is title: ultimate puzzle collection. manufacturer: . price: 9.95. Are Product A and Product B the same? No\n\n", "nuance-communications-inc .": "Product A is title: dragon naturally speaking standard v9. manufacturer: nuance-communications-inc .. price: 99.99. Product B is title: a209s-gd4-9 .0 dragon naturallyspeaking professional v. 9 complete package 1 user c. manufacturer: . price: 786.91. Are Product A and Product B the same? No\n\nProduct A is title: ibm viavoice advanced v. 10. manufacturer: nuance-communications-inc .. price: . Product B is title: nuance communications inc. ibm viavoice advanced v. 10. manufacturer: nuance-communications-inc .. price: 57.54. Are Product A and Product B the same? Yes\n\nProduct A is title: textbridge pro 11. manufacturer: nuance-communications-inc .. price: 79.99. Product B is title: scansoft textbridge pro v. 11. manufacturer: nuance-communications-inc .. price: 68.99. Are Product A and Product B the same? Yes\n\n", "cisco-systems-enterprise": "Product A is title: cisco security server agent for windows and solaris license 100 agents win solaris csa-b100-srvr-k9 ). manufacturer: cisco-systems-enterprise. price: . Product B is title: cisco csa-b250-dtop-k9 cisco security desktop agent win + sol 250 agent bundle cisco security desktop agent license 250 agents 0746320832500. manufacturer: . price: 10267.54. Are Product A and Product B the same? No\n\n", "carry-a-tune technologies": "", "computer associates": "Product A is title: ca arcserve bkup protection suite exch r11 .5 upgrade prod only. manufacturer: computer associates. price: . Product B is title: arcserve bkup protection suite exch r11 .5 upg-v prod only. manufacturer: . price: 2249.99. Are Product A and Product B the same? Yes\n\nProduct A is title: ca arcserve bkup r11 .5 win agent mac os x upgrade prod only. manufacturer: computer associates. price: . Product B is title: arcserve bkup protection suite exch r11 .5 upg-v prod only. manufacturer: . price: 2249.99. Are Product A and Product B the same? No\n\nProduct A is title: ca arcserve bkup r11 .5 win client vss software snap-shot - prod only. manufacturer: computer associates. price: . Product B is title: computer associates babwbr1151s33 brightstor arcserve backup client for vss software snap-shot v. 11.5 for windows with service pack 1 add-on backup & recovery 1 server ( s ) complete product standard multilingual pc. manufacturer: . price: 799.97. Are Product A and Product B the same? Yes\n\nProduct A is title: ca internet security suite 2007 3 user. manufacturer: computer associates. price: 69.95. Product B is title: norton internet security ( v3 .0 ) mac symantec 10069614-in. manufacturer: . price: 83.43. Are Product A and Product B the same? No\n\nProduct A is title: ca brightstor arcserve backup r11 .1 for netware tape library option service pack. manufacturer: computer associates. price: . Product B is title: e17886d symantec backup exec for netware servers san shared storage option v. 9.2 ). manufacturer: . price: 654.28. Are Product A and Product B the same? No\n\nProduct A is title: ca antivirus 2007. manufacturer: computer associates. price: 39.95. Product B is title: ca antivirus 2007. manufacturer: . price: 32.28. Are Product A and Product B the same? Yes\n\nProduct A is title: ca internet security suite 2007 3 user. manufacturer: computer associates. price: 69.95. Product B is title: quickbooks pro 2007 3 user. manufacturer: intuit. price: 398.99. Are Product A and Product B the same? No\n\nProduct A is title: ca brightstor arcserve backup r11 .1 for netware upgrade from arcserve 7 for netware. manufacturer: computer associates. price: . Product B is title: babwbr1151s04 brightstor arcserve backup serverless backup option for windows w/sp1 v. 11 .. manufacturer: . price: 1031.41. Are Product A and Product B the same? No\n\n", "netgear": "Product A is title: netgear vpn01l vpn client software. manufacturer: netgear. price: 89.99. Product B is title: prosafe vpn client software single user netgear inc. vpn01l. manufacturer: . price: 27.78. Are Product A and Product B the same? No\n\nProduct A is title: netgear vpn01l vpn client software. manufacturer: netgear. price: 89.99. Product B is title: software. manufacturer: . price: 112.95. Are Product A and Product B the same? No\n\nProduct A is title: netgear vpn01l vpn client software. manufacturer: netgear. price: 89.99. Product B is title: software. manufacturer: . price: 46.99. Are Product A and Product B the same? No\n\nProduct A is title: netgear vpn01l vpn client software. manufacturer: netgear. price: 89.99. Product B is title: d-link vpn client software ( 5-user license ) ds-605 ds-605. manufacturer: . price: 173.99. Are Product A and Product B the same? No\n\nProduct A is title: netgear vpn01l vpn client software. manufacturer: netgear. price: 89.99. Product B is title: procurve vpn client software unlimited client licenses. manufacturer: . price: 2598.96. Are Product A and Product B the same? No\n\n", "century software": "Product A is title: century software tinyterm plus v. 4.0 complete product. manufacturer: century software. price: . Product B is title: century software ttpl-1 century tinyterm plus v. 4.0 complete product connectivity/data acess 1 user ( s ) complete product standard english pc. manufacturer: . price: 188.97. Are Product A and Product B the same? Yes\n\nProduct A is title: century software tinyterm plus v. 4.0 complete product. manufacturer: century software. price: . Product B is title: encore software 25005 encore marine aquarium v. 2.0 complete product screen saver 1 user ( s ) complete product standard pc mac. manufacturer: . price: 18.97. Are Product A and Product B the same? No\n\nProduct A is title: century software tinyterm plus v. 4.0 complete product. manufacturer: century software. price: . Product B is title: onone software inc. mpr-40211 on1 mask pro v. 4.0 complete product graphics/designing 1 user complete product standard pc intel-based mac mac. manufacturer: . price: 148.97. Are Product A and Product B the same? No\n\nProduct A is title: century software tinyterm plus v. 4.0 complete product. manufacturer: century software. price: . Product B is title: laplink software pafglle104000p0rtpen laplink everywhere v. 4.0 retail edition remote management complete product remote management complete product pc. manufacturer: . price: 39.97. Are Product A and Product B the same? No\n\nProduct A is title: century software tinyterm plus v. 4.0 complete product. manufacturer: century software. price: . Product B is title: encore software 10033 encore the print shop v. 21.0 complete product print management 1 user ( s ) complete product standard pc. manufacturer: . price: 25.97. Are Product A and Product B the same? No\n\nProduct A is title: century software tinyterm plus v. 4.0 complete product. manufacturer: century software. price: . Product B is title: century software tt-5 century tinyterm v. 4.2 x complete product emulation 5 user ( s ) complete product standard pc. manufacturer: . price: 395.97. Are Product A and Product B the same? No\n\n", "pinnacle": "Product A is title: pinnacle mobile media converter. manufacturer: pinnacle. price: 29.99. Product B is title: pinnacle mobile media converter software for windows ipod software. manufacturer: . price: 29.95. Are Product A and Product B the same? Yes\n\nProduct A is title: studio movieboard plus pci hardware. manufacturer: pinnacle. price: 251.55. Product B is title: studio plus version 11. manufacturer: pinnacle systems. price: 90.99. Are Product A and Product B the same? No\n\nProduct A is title: pinnacle studio plus titanium edition v10 .5. manufacturer: pinnacle. price: 69.99. Product B is title: pinnacle studio ultimate v11 video editing software for windows consumer video editing software. manufacturer: . price: 129.95. Are Product A and Product B the same? No\n\nProduct A is title: pinnacle mobile media organizer. manufacturer: pinnacle. price: 49.99. Product B is title: pinnacle mobile media converter software for windows ipod software. manufacturer: . price: 29.95. Are Product A and Product B the same? No\n\nProduct A is title: pinnacle mobile media converter. manufacturer: pinnacle. price: 29.99. Product B is title: pinnacle mobile media organizer software for windows ipod software. manufacturer: . price: 39.95. Are Product A and Product B the same? No\n\n", "stompsoft inc.": "Product A is title: digital vault. manufacturer: stompsoft inc.. price: 29.99. Product B is title: digital video & audio prod book. manufacturer: sony-pictures-digital-entertainment. price: 27.5. Are Product A and Product B the same? No\n\nProduct A is title: digital vault. manufacturer: stompsoft inc.. price: 29.99. Product B is title: digital video & audio prod book. manufacturer: sony-pictures-digital-entertainment. price: 27.5. Are Product A and Product B the same? No\n\nProduct A is title: stompsoft digital file shredder pro. manufacturer: stompsoft inc.. price: 24.99. Product B is title: migo software digital file shredder 2005. manufacturer: . price: 24.58. Are Product A and Product B the same? Yes\n\n", "riverdeep": "Product A is title: riverdeep adventure workshop 4th-6th grade 8th edition. manufacturer: riverdeep. price: . Product B is title: encore software 10119 adventure workshop 4th-6th grade 7th edition win xp. manufacturer: . price: 18.97. Are Product A and Product B the same? No\n\nProduct A is title: riverdeep adventure workshop 4th-6th grade 8th edition. manufacturer: riverdeep. price: . Product B is title: adventure workshop 1st-3rd grade . 8th edition ( pc ) encore. manufacturer: . price: 19.95. Are Product A and Product B the same? No\n\nProduct A is title: riverdeep adventure workshop 4th-6th grade 8th edition. manufacturer: riverdeep. price: . Product B is title: adventure workshop preschool-1st grade 8th edition win. manufacturer: . price: 18.36. Are Product A and Product B the same? No\n\nProduct A is title: riverdeep arthur 's kindergarten. manufacturer: riverdeep. price: . Product B is title: tlc arthur 's kindergarten learning system ( pc ) encore. manufacturer: . price: 19.95. Are Product A and Product B the same? Yes\n\nProduct A is title: riverdeep the print shop zoom dvd. manufacturer: riverdeep. price: . Product B is title: encore software 11941 the print shop zoom. manufacturer: . price: 17.97. Are Product A and Product B the same? Yes\n\nProduct A is title: riverdeep arthur 's kindergarten. manufacturer: riverdeep. price: . Product B is title: jumpstart kindergarten ( jc ). manufacturer: . price: 9.9. Are Product A and Product B the same? No\n\nProduct A is title: treasure mountain ! ( jewel case ) ages 5-9 for win/mac. manufacturer: riverdeep. price: 9.99. Product B is title: geometry ( win/mac ) ( jewel case ) ( 10158 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\n", "laughing bird": "Product A is title: the logo creator ( mac ). manufacturer: laughing bird. price: 39.99. Product B is title: laughingbird the logo creator. manufacturer: . price: 34.99. Are Product A and Product B the same? Yes\n\nProduct A is title: net ad creator. manufacturer: laughing bird. price: 39.99. Product B is title: laughingbird the logo creator. manufacturer: . price: 34.99. Are Product A and Product B the same? No\n\nProduct A is title: net ad creator. manufacturer: laughing bird. price: 39.99. Product B is title: laughingbird the logo creator. manufacturer: . price: 34.99. Are Product A and Product B the same? No\n\n", "absolute software": "Product A is title: computrace lojack for laptops 3 year license ( mac ). manufacturer: absolute software. price: 99.95. Product B is title: absolute software lfl-m2-36 computrace lojack for laptops 3 year license mac 10.2 or higher. manufacturer: . price: 80.97. Are Product A and Product B the same? Yes\n\nProduct A is title: computrace lojack for laptops 4 year subscription. manufacturer: absolute software. price: 119.99. Product B is title: absolute software lfl-n2-36 computrace lojack for laptops 3 year license ( win 2000 xp ). manufacturer: . price: 85.97. Are Product A and Product B the same? No\n\nProduct A is title: computrace lojack for laptops 4 year subscription. manufacturer: absolute software. price: 119.99. Product B is title: absolute software lfl-n2-48 computrace lojack for laptops 4 year license. manufacturer: . price: 105.68. Are Product A and Product B the same? Yes\n\nProduct A is title: computrace lojack for laptops 1 year license ( mac ). manufacturer: absolute software. price: 49.95. Product B is title: lfl-n2-12 computrace lojack for laptops subscription package 1 year 1 notebook m. manufacturer: . price: 39.95. Are Product A and Product B the same? No\n\nProduct A is title: computrace lojack for laptops : 3 year subscription. manufacturer: absolute software. price: 99.99. Product B is title: absolute software lfl-n2-36 computrace lojack for laptops 3 year license ( win 2000 xp ). manufacturer: . price: 85.97. Are Product A and Product B the same? Yes\n\nProduct A is title: computrace lojack for laptops 3 year license ( mac ). manufacturer: absolute software. price: 99.95. Product B is title: computrace lojack for laptops 3 yr subscription. manufacturer: . price: 89.99. Are Product A and Product B the same? Yes\n\nProduct A is title: computrace lojack for laptops 3 year license ( mac ). manufacturer: absolute software. price: 99.95. Product B is title: absolute software lfl-n2-36 computrace lojack for laptops 3 year license ( win 2000 xp ). manufacturer: . price: 85.97. Are Product A and Product B the same? No\n\nProduct A is title: computrace lojack for laptops 1 year license ( mac ). manufacturer: absolute software. price: 49.95. Product B is title: lfl-n2-12 computrace lojack for laptops subscription package 1 year 1 notebook m. manufacturer: . price: 39.95. Are Product A and Product B the same? No\n\nProduct A is title: computrace lojack for laptops : 3 year subscription. manufacturer: absolute software. price: 99.99. Product B is title: absolute software lfl-m2-12 computrace lojack for laptops 1 year license mac 10.2 or higher. manufacturer: . price: 44.97. Are Product A and Product B the same? No\n\nProduct A is title: computrace lojack for laptops : 1 year subscription. manufacturer: absolute software. price: 49.99. Product B is title: absolute software lfl-n2-12 computrace lojack for laptops 1 year license ( win 2000 xp ). manufacturer: . price: 41.97. Are Product A and Product B the same? Yes\n\n", "individual": "Product A is title: individual small business advantage deluxe 2006. manufacturer: individual. price: 49.99. Product B is title: high school success deluxe 2006. manufacturer: . price: 7.9. Are Product A and Product B the same? No\n\nProduct A is title: individual small business advantage deluxe 2006. manufacturer: individual. price: 49.99. Product B is title: individual software prm-bp5 business planmaker 2006 deluxe win 98 me 2000 xp. manufacturer: . price: 42.97. Are Product A and Product B the same? No\n\nProduct A is title: individual small business advantage deluxe 2006. manufacturer: individual. price: 49.99. Product B is title: individual software inc typing instructor deluxe j/c. manufacturer: . price: 6.84. Are Product A and Product B the same? No\n\nProduct A is title: individual small business advantage deluxe 2006. manufacturer: individual. price: 49.99. Product B is title: individual software anytime organizer deluxe 10. manufacturer: . price: 27.99. Are Product A and Product B the same? No\n\nProduct A is title: individual small business advantage deluxe 2006. manufacturer: individual. price: 49.99. Product B is title: elementary school success deluxe 2006. manufacturer: . price: 12.95. Are Product A and Product B the same? No\n\n", "avanquest software": "Product A is title: britannica deluxe. manufacturer: avanquest software. price: 29.95. Product B is title: britannica deluxe 2008. manufacturer: . price: 26.5. Are Product A and Product B the same? No\n\nProduct A is title: britannica deluxe. manufacturer: avanquest software. price: 29.95. Product B is title: business planmaker deluxe. manufacturer: . price: 49.99. Are Product A and Product B the same? No\n\nProduct A is title: britannica deluxe. manufacturer: avanquest software. price: 29.95. Product B is title: vhs to dvd 3.0 deluxe. manufacturer: . price: 99.99. Are Product A and Product B the same? No\n\nProduct A is title: britannica deluxe. manufacturer: avanquest software. price: 29.95. Product B is title: photoshow deluxe. manufacturer: . price: 39.92. Are Product A and Product B the same? No\n\nProduct A is title: britannica deluxe. manufacturer: avanquest software. price: 29.95. Product B is title: freeverse 3d bridge deluxe. manufacturer: . price: 24.99. Are Product A and Product B the same? No\n\n", "susteen": "Product A is title: cell phone software solution. manufacturer: susteen. price: 113.1. Product B is title: pcmover software utility. manufacturer: . price: 39.96. Are Product A and Product B the same? No\n\nProduct A is title: cell phone software solution. manufacturer: susteen. price: 113.1. Product B is title: freeverse software 005 solace. manufacturer: freeverse software. price: 18.99. Are Product A and Product B the same? No\n\nProduct A is title: datapilot cell phone data transfer suite universal. manufacturer: susteen. price: 79.99. Product B is title: susteen data pilot universal essentials. manufacturer: . price: 59.99. Are Product A and Product B the same? No\n\nProduct A is title: cell phone software solution. manufacturer: susteen. price: 113.1. Product B is title: freeverse software 005 solace. manufacturer: freeverse software. price: 18.99. Are Product A and Product B the same? No\n\n", "h & r block": "Product A is title: h & r block taxcut 2006 premium federal + state + efile. manufacturer: h & r block. price: 59.99. Product B is title: h & r block taxcut home and business with state and e-file efile software for windows tax & finance software. manufacturer: . price: 73.95. Are Product A and Product B the same? No\n\nProduct A is title: h & r block taxcut 2006 premium federal + state + efile. manufacturer: h & r block. price: 59.99. Product B is title: h & r block taxcut home and business with state e-file efile and sage act ! 8 win tax & finance software. manufacturer: . price: 73.95. Are Product A and Product B the same? No\n\nProduct A is title: h & r block taxcut 2006 home & business + state. manufacturer: h & r block. price: 79.99. Product B is title: h & r block taxcut premium federal and state with e-file efile software mac/win tax & finance software. manufacturer: . price: 54.95. Are Product A and Product B the same? No\n\nProduct A is title: h & r block taxcut 2006 premium federal + state with usb 256m flashdrive. manufacturer: h & r block. price: 39.99. Product B is title: h & r block taxcut premium federal and state with e-file efile software mac/win tax & finance software. manufacturer: . price: 54.95. Are Product A and Product B the same? No\n\nProduct A is title: h & r block taxcut 2006 home & business + state. manufacturer: h & r block. price: 79.99. Product B is title: h & r block taxcut home and business with state and e-file efile software for windows tax & finance software. manufacturer: . price: 73.95. Are Product A and Product B the same? Yes\n\nProduct A is title: h & r block taxcut 2006 premium federal + state with usb 256m flashdrive. manufacturer: h & r block. price: 39.99. Product B is title: h & r block taxcut home and business with state e-file efile and sage act ! 8 win tax & finance software. manufacturer: . price: 73.95. Are Product A and Product B the same? No\n\nProduct A is title: h & r block taxcut 2006 premium federal + state + efile. manufacturer: h & r block. price: 59.99. Product B is title: h & r block taxcut premium federal and state with e-file efile software mac/win tax & finance software. manufacturer: . price: 54.95. Are Product A and Product B the same? Yes\n\n", "bias": "Product A is title: bias peak le 5. manufacturer: bias. price: 129.0. Product B is title: bias peak pro 5 software music production software. manufacturer: . price: 499.0. Are Product A and Product B the same? No\n\nProduct A is title: bias peak pro 5 macintosh ). manufacturer: bias. price: 599.0. Product B is title: bias peak pro 5 software music production software. manufacturer: . price: 499.0. Are Product A and Product B the same? Yes\n\nProduct A is title: bias deck le 3.5 macintosh cd. manufacturer: bias. price: 99.0. Product B is title: bias deck 3.5 software music production software. manufacturer: . price: 299.0. Are Product A and Product B the same? No\n\n", "motu": "Product A is title: motu digital performer 5 digital audio software ( mac only ). manufacturer: motu. price: 795.0. Product B is title: motu digital performer dp5 software music production software. manufacturer: . price: 319.95. Are Product A and Product B the same? No\n\nProduct A is title: motu digital performer 5 digital audio software competitive upgrade ( mac only ). manufacturer: motu. price: 395.0. Product B is title: apple shake 4.1 digital compositing software for mac os x effects software. manufacturer: . price: 498.95. Are Product A and Product B the same? No\n\nProduct A is title: motu digital performer 5 digital audio software competitive upgrade ( mac only ). manufacturer: motu. price: 395.0. Product B is title: steinberg wavelab studio 6 audio editing software competitive crossgrade music production software. manufacturer: . price: 199.95. Are Product A and Product B the same? No\n\nProduct A is title: motu digital performer 5 digital audio software competitive upgrade ( mac only ). manufacturer: motu. price: 395.0. Product B is title: motu digital performer dp5 software music production software. manufacturer: . price: 319.95. Are Product A and Product B the same? Yes\n\nProduct A is title: motu digital performer 5 digital audio software competitive upgrade ( mac only ). manufacturer: motu. price: 395.0. Product B is title: motu digital performer dp5 software music production software. manufacturer: . price: 319.95. Are Product A and Product B the same? Yes\n\nProduct A is title: motu digital performer 5 digital audio software ( mac only ). manufacturer: motu. price: 795.0. Product B is title: migo software digital vault 2005. manufacturer: . price: 24.58. Are Product A and Product B the same? No\n\nProduct A is title: motu digital performer 5 digital audio software competitive upgrade ( mac only ). manufacturer: motu. price: 395.0. Product B is title: cakewalk sonar 5 producer edition competative upgrade music production software. manufacturer: . price: 299.0. Are Product A and Product B the same? No\n\n", "neso usa": "Product A is title: nero 7 ultra edition enhanced pc & home entrmnt soln. manufacturer: neso usa. price: 99.99. Product B is title: nero nero 7 ultra edition enhanced cd/dvd burning software for windows authoring software. manufacturer: . price: 79.99. Are Product A and Product B the same? Yes\n\nProduct A is title: nero 7 ultra edition enhanced pc & home entrmnt soln. manufacturer: neso usa. price: 99.99. Product B is title: ahead software 70115 nero 7 ultra edition enhanced mini pc & home entrmnt soln. manufacturer: . price: 83.97. Are Product A and Product B the same? No\n\nProduct A is title: nero 7 ultra edition enhanced pc & home entrmnt soln. manufacturer: neso usa. price: 99.99. Product B is title: 70009 nero ultra edition enhanced v. 7 complete package 1 user cd win. manufacturer: . price: 87.76. Are Product A and Product B the same? No\n\n", "watchguard technologies inc": "Product A is title: watchguard 1yr subs firebox x5 webblocker wg017196 ). manufacturer: watchguard technologies inc. price: 159.9. Product B is title: wg017449 watchguard firebox x5500e utm software suite subscription license 1 year +. manufacturer: . price: 3715.48. Are Product A and Product B the same? No\n\nProduct A is title: upg serverlock for solaris gold. manufacturer: watchguard technologies inc. price: . Product B is title: wg7569 watchguard livesecurity service gold watchguard serverlock for solaris technic. manufacturer: . price: 2279.02. Are Product A and Product B the same? No\n\nProduct A is title: watchguard 1yr subs firebox x5 webblocker wg017196 ). manufacturer: watchguard technologies inc. price: 159.9. Product B is title: wg017285 watchguard spamblocker subscription license renewal 1 year 1 appliance. manufacturer: . price: 1335.25. Are Product A and Product B the same? No\n\nProduct A is title: vpn manager five to ten firebox. manufacturer: watchguard technologies inc. price: 1995.0. Product B is title: vpn manager 10 fireboxes watchguard wg4010. manufacturer: . price: 831.36. Are Product A and Product B the same? No\n\n", "selectsoft publishing": "Product A is title: superstart ! science arcade. manufacturer: selectsoft publishing. price: 24.95. Product B is title: superstart ! fun with reading & writing !. manufacturer: . price: 8.49. Are Product A and Product B the same? No\n\nProduct A is title: quickstart pc cleaner xp. manufacturer: selectsoft publishing. price: 19.95. Product B is title: quickstart photo montage studio. manufacturer: . price: 9.95. Are Product A and Product B the same? No\n\nProduct A is title: quickstudy u.s. government. manufacturer: selectsoft publishing. price: 19.95. Product B is title: quickstudy us government. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\nProduct A is title: wacky bird hunter. manufacturer: selectsoft publishing. price: 19.95. Product B is title: wacky farm. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\nProduct A is title: pressure pop !. manufacturer: selectsoft publishing. price: 19.95. Product B is title: pressure pop !. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\n", "learning company": "Product A is title: super solvers mission t.h.i.n.k 25 user site license. manufacturer: learning company. price: 399.95. Product B is title: super solvers mission think 25 user site license. manufacturer: . price: 79.95. Are Product A and Product B the same? No\n\nProduct A is title: super solvers spellbound ! ! school edition for mac. manufacturer: learning company. price: 49.95. Product B is title: super solvers spellbound ! ! school edition for mac. manufacturer: . price: 17.95. Are Product A and Product B the same? No\n\nProduct A is title: bodyworks 6.0 25-user building site license. manufacturer: learning company. price: 399.95. Product B is title: bodyworks 6.0 25-user building site license. manufacturer: . price: 89.95. Are Product A and Product B the same? No\n\n", "m-and-r-technologies": "Product A is title: cross stitch design studio ( jewel case ). manufacturer: m-and-r-technologies. price: 9.99. Product B is title: cross stitch design studio ( jewel case ) ( 8006 ). manufacturer: m-and-r-technologies. price: 9.59. Are Product A and Product B the same? Yes\n\n", "emedia music": "Product A is title: emedia guitar basics. manufacturer: emedia music. price: . Product B is title: emedia music corp emedia blues guitar legends. manufacturer: . price: 24.81. Are Product A and Product B the same? No\n\nProduct A is title: emedia guitar basics. manufacturer: emedia music. price: . Product B is title: emedia music corp my guitar. manufacturer: . price: 24.81. Are Product A and Product B the same? No\n\nProduct A is title: emedia guitar basics. manufacturer: emedia music. price: . Product B is title: emedia essential bass guitar dvd. manufacturer: . price: 16.95. Are Product A and Product B the same? No\n\n", "sony-computer-entertainment": "Product A is title: sony psp media manager. manufacturer: sony-computer-entertainment. price: 24.99. Product B is title: personal imagemanager. manufacturer: smith-micro-software. price: 22.9. Are Product A and Product B the same? No\n\n", "d-link systems inc. .": "Product A is title: vpn client software 5 users. manufacturer: d-link systems inc. .. price: . Product B is title: rfipsc2-1 ipsec vpn client license 1 user. manufacturer: . price: 57.2. Are Product A and Product B the same? No\n\nProduct A is title: vpn client software 5 users. manufacturer: d-link systems inc. .. price: . Product B is title: sr2116010 safenet softremote vpn client license 5 users win. manufacturer: . price: 184.57. Are Product A and Product B the same? No\n\nProduct A is title: vpn client software 5 users. manufacturer: d-link systems inc. .. price: . Product B is title: d-link vpn client software ( 1-user license ) ds-601 ds-601. manufacturer: . price: 42.99. Are Product A and Product B the same? No\n\n", "destineer": "Product A is title: age of empires iii : the warchiefs expansion pack ( mac ). manufacturer: destineer. price: 34.95. Product B is title: the sims : hot date expansion pack. manufacturer: . price: 19.87. Are Product A and Product B the same? No\n\nProduct A is title: starship troopers/first to fight bundle. manufacturer: destineer. price: 19.95. Product B is title: destineer publishing coporation starship troopers/first to fight bundle. manufacturer: . price: 15.08. Are Product A and Product B the same? Yes\n\nProduct A is title: age of empires iii : the warchiefs expansion pack ( mac ). manufacturer: destineer. price: 34.95. Product B is title: age of empires iii : warchiefs expansion pack. manufacturer: . price: 34.95. Are Product A and Product B the same? Yes\n\nProduct A is title: age of empires iii : the warchiefs expansion pack ( mac ). manufacturer: destineer. price: 34.95. Product B is title: destineer inc age of empires iii : the warchiefs expansion pack. manufacturer: . price: 26.14. Are Product A and Product B the same? Yes\n\nProduct A is title: age of empires iii. manufacturer: destineer. price: 49.99. Product B is title: age of empires iii : warchiefs expansion pack. manufacturer: . price: 34.95. Are Product A and Product B the same? No\n\n", "smith-micro-software": "Product A is title: stuffit deluxe v 10.0 ( mac ). manufacturer: smith-micro-software. price: 79.99. Product B is title: allume stuffit deluxe 10.0 os x 10.4 or higher. manufacturer: smith-micro-software. price: 69.99. Are Product A and Product B the same? Yes\n\nProduct A is title: stuffit deluxe v 10.0 ( mac ). manufacturer: smith-micro-software. price: 79.99. Product B is title: allume systems inc stuffit deluxe 11. manufacturer: . price: 42.74. Are Product A and Product B the same? No\n\nProduct A is title: allume boost xp for windows. manufacturer: smith-micro-software. price: 29.99. Product B is title: abacus scrapbook for windows. manufacturer: . price: 26.14. Are Product A and Product B the same? No\n\nProduct A is title: internet cleanup 4.0 mac os 10.4 or higher. manufacturer: smith-micro-software. price: 29.99. Product B is title: allume stuffit deluxe 10.0 os x 10.4 or higher. manufacturer: smith-micro-software. price: 69.99. Are Product A and Product B the same? No\n\nProduct A is title: aquazone bass edition. manufacturer: smith-micro-software. price: 19.99. Product B is title: allume aquazone bass edition. manufacturer: smith-micro-software. price: 19.99. Are Product A and Product B the same? Yes\n\nProduct A is title: allume boost xp for windows. manufacturer: smith-micro-software. price: 29.99. Product B is title: act ! 2004 for windows. manufacturer: . price: 249.95. Are Product A and Product B the same? No\n\nProduct A is title: allume boost xp for windows. manufacturer: smith-micro-software. price: 29.99. Product B is title: powerhouse migo for ipod for windows. manufacturer: . price: 71.99. Are Product A and Product B the same? No\n\n", "serious magic": "Product A is title: serious magic ultra2 master sets library 2 windows ). manufacturer: serious magic. price: 276.5. Product B is title: serious magic visual communicator 2 web presentation software ( windows ) presentation software. manufacturer: . price: 189.95. Are Product A and Product B the same? No\n\nProduct A is title: serious magic ultra2 master sets library 2 windows ). manufacturer: serious magic. price: 276.5. Product B is title: serious magic ultra master sets library 1 effects software. manufacturer: . price: 339.95. Are Product A and Product B the same? Yes\n\n", "sony": "Product A is title: sony acid pro 5. manufacturer: sony. price: 399.95. Product B is title: sony media software acid pro 6 software upgrade from acid pro 5 music production software. manufacturer: sony-pictures-digital-entertainment. price: 129.95. Are Product A and Product B the same? No\n\nProduct A is title: sony acid pro 5. manufacturer: sony. price: 399.95. Product B is title: zone alarm pro 5. manufacturer: . price: 12.9. Are Product A and Product B the same? No\n\nProduct A is title: sony acid pro 5. manufacturer: sony. price: 399.95. Product B is title: sony acid music studio ( pc ). manufacturer: . price: 69.99. Are Product A and Product B the same? No\n\n", "infogrames": "Product A is title: tonka search & rescue 2. manufacturer: infogrames. price: . Product B is title: abacus civil air patrol search & rescue. manufacturer: . price: 26.14. Are Product A and Product B the same? No\n\n", "iris inc.": "Product A is title: readiris pro 11 corporate edition. manufacturer: iris inc.. price: 399.99. Product B is title: midisoft corporation guitar tool worship edition. manufacturer: . price: 43.36. Are Product A and Product B the same? No\n\nProduct A is title: readiris pro 11 corporate edition. manufacturer: iris inc.. price: 399.99. Product B is title: iris inc readiris pro 11 corporate edition. manufacturer: . price: 430.58. Are Product A and Product B the same? No\n\nProduct A is title: readiris pro 11 corporate edition. manufacturer: iris inc.. price: 399.99. Product B is title: readiris pro 11 corporate edition for pc. manufacturer: . price: 349.99. Are Product A and Product B the same? Yes\n\n", "cisco systems ( ciscopro )": "Product A is title: cisco security desktop agent csa-b250-dtop-k9 ). manufacturer: cisco systems ( ciscopro ). price: . Product B is title: cisco csa-b100-srvr-k9 cisco security server agent win + sol 100 agentbundle cisco security server agent license 100 agents win solaris 0746320832586. manufacturer: . price: 55288.67. Are Product A and Product B the same? No\n\n", "whiptail": "", "microspot ltd": "Product A is title: microspot x-rip ( mac ). manufacturer: microspot ltd. price: 349.99. Product B is title: microsoft excel 2007 ( pc ). manufacturer: . price: 229.95. Are Product A and Product B the same? No\n\nProduct A is title: microspot x-rip ( mac ). manufacturer: microspot ltd. price: 349.99. Product B is title: microspot x-rip large format print driver for os x. manufacturer: . price: 335.99. Are Product A and Product B the same? Yes\n\nProduct A is title: microspot x-rip ( mac ). manufacturer: microspot ltd. price: 349.99. Product B is title: microsoft onenote 2007 ( pc ). manufacturer: . price: 99.95. Are Product A and Product B the same? No\n\nProduct A is title: microspot x-rip ( mac ). manufacturer: microspot ltd. price: 349.99. Product B is title: microspot macdraft pe personal edition. manufacturer: . price: 85.99. Are Product A and Product B the same? No\n\nProduct A is title: microspot x-rip ( mac ). manufacturer: microspot ltd. price: 349.99. Product B is title: microspot interiors ( ints3 .6 sb ). manufacturer: . price: 95.95. Are Product A and Product B the same? No\n\n", "sonic-systems-inc .": "Product A is title: upg sgms 1000 incremental node. manufacturer: sonic-systems-inc .. price: . Product B is title: sonicwall gms 1000 upgrade. manufacturer: . price: 63074.12. Are Product A and Product B the same? Yes\n\n", "simply-put-software": "Product A is title: simply put software data eliminator. manufacturer: simply-put-software. price: . Product B is title: spyware x-terminator. manufacturer: . price: 24.9. Are Product A and Product B the same? No\n\nProduct A is title: simply put software data eliminator. manufacturer: simply-put-software. price: . Product B is title: simply put software llc mozy 3.0. manufacturer: simply-put-software-llc. price: 43.3. Are Product A and Product B the same? No\n\nProduct A is title: simply put software got ta sing windows/macintosh ). manufacturer: simply-put-software. price: . Product B is title: webroot software window washer. manufacturer: . price: 29.99. Are Product A and Product B the same? No\n\nProduct A is title: simply put software got ta sing windows/macintosh ). manufacturer: simply-put-software. price: . Product B is title: phase one capture-one pro dslr raw image editing software for macintosh & windows .. manufacturer: . price: 399.95. Are Product A and Product B the same? No\n\nProduct A is title: simply put software got ta sing windows/macintosh ). manufacturer: simply-put-software. price: . Product B is title: simply put software llc gs905-s got ta sing ( win 95 98 me nt 2000 xp/mac 10.0 or higher ). manufacturer: simply-put-software. price: 34.97. Are Product A and Product B the same? Yes\n\n", "sega-of-america-inc .": "Product A is title: sonic mega collection plus. manufacturer: sega-of-america-inc .. price: 19.99. Product B is title: sega of america inc sonic mega collection. manufacturer: sega-of-america-inc .. price: 17.55. Are Product A and Product B the same? Yes\n\nProduct A is title: rome total war gold edition. manufacturer: sega-of-america-inc .. price: 19.99. Product B is title: destineer rise of nations gold edition. manufacturer: . price: 45.99. Are Product A and Product B the same? No\n\nProduct A is title: rome total war gold edition. manufacturer: sega-of-america-inc .. price: 19.99. Product B is title: ghost recon gold edition ( dvd-rom ). manufacturer: . price: 20.95. Are Product A and Product B the same? No\n\nProduct A is title: rome total war gold edition. manufacturer: sega-of-america-inc .. price: 19.99. Product B is title: xbox 360 : call of action 3 gold edition. manufacturer: . price: 59.99. Are Product A and Product B the same? No\n\n", "brighter-minds-media-inc .": "Product A is title: land before time : kindergarten. manufacturer: brighter-minds-media-inc .. price: 9.99. Product B is title: jumpstart kindergarten ( jc ). manufacturer: . price: 9.9. Are Product A and Product B the same? No\n\nProduct A is title: land before time : preschool. manufacturer: brighter-minds-media-inc .. price: 9.99. Product B is title: the land before time : kindergarten adventure. manufacturer: . price: 12.9. Are Product A and Product B the same? No\n\nProduct A is title: land before time : kindergarten. manufacturer: brighter-minds-media-inc .. price: 9.99. Product B is title: brighter minds land before time : kindergarten. manufacturer: brighter-minds-media-inc .. price: 9.99. Are Product A and Product B the same? Yes\n\nProduct A is title: caillou ready for school ( pc & mac ). manufacturer: brighter-minds-media-inc .. price: 19.99. Product B is title: caillou ready for school by brighter minds. manufacturer: brighter-minds-media-inc .. price: 19.99. Are Product A and Product B the same? Yes\n\nProduct A is title: land before time : kindergarten. manufacturer: brighter-minds-media-inc .. price: 9.99. Product B is title: the land before time : kindergarten adventure. manufacturer: . price: 12.9. Are Product A and Product B the same? Yes\n\nProduct A is title: caillou ready for school ( pc & mac ). manufacturer: brighter-minds-media-inc .. price: 19.99. Product B is title: weekly reader mastering elementary & middle school math ( pc/mac ) fogware. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\nProduct A is title: land before time : preschool. manufacturer: brighter-minds-media-inc .. price: 9.99. Product B is title: brighter minds land before time : kindergarten. manufacturer: brighter-minds-media-inc .. price: 9.99. Are Product A and Product B the same? No\n\nProduct A is title: caillou magical adventures. manufacturer: brighter-minds-media-inc .. price: 19.99. Product B is title: caillou magical adventures by brighter minds. manufacturer: brighter-minds-media-inc .. price: 19.99. Are Product A and Product B the same? Yes\n\nProduct A is title: land before time : preschool. manufacturer: brighter-minds-media-inc .. price: 9.99. Product B is title: reader rabbit preschool. manufacturer: the-learning-company. price: 17.9. Are Product A and Product B the same? No\n\nProduct A is title: land before time : preschool. manufacturer: brighter-minds-media-inc .. price: 9.99. Product B is title: the land before time : preschool adventure. manufacturer: brighter-minds-media-inc .. price: 12.9. Are Product A and Product B the same? Yes\n\n", "jungle software": "Product A is title: gorilla 4 standard. manufacturer: jungle software. price: 299.0. Product B is title: adobe cs3 web standard. manufacturer: . price: 1035.99. Are Product A and Product B the same? No\n\nProduct A is title: gorilla 4 standard. manufacturer: jungle software. price: 299.0. Product B is title: microsoft office 2004 standard. manufacturer: . price: 369.99. Are Product A and Product B the same? No\n\n", "imsi": "Product A is title: turbocad mac 3d for mac. manufacturer: imsi. price: 249.99. Product B is title: sims 2 pets for mac. manufacturer: . price: 34.99. Are Product A and Product B the same? No\n\nProduct A is title: imsi turbocad designer v. 11. manufacturer: imsi. price: 39.99. Product B is title: imsi turbocad designer 11. manufacturer: . price: 12.9. Are Product A and Product B the same? Yes\n\nProduct A is title: turbocad mac 3d for mac. manufacturer: imsi. price: 249.99. Product B is title: sound studio 3 for mac. manufacturer: . price: 79.99. Are Product A and Product B the same? No\n\nProduct A is title: turbocad mac 3d for mac. manufacturer: imsi. price: 249.99. Product B is title: imsi design llc turbocad mac 3d. manufacturer: . price: 204.67. Are Product A and Product B the same? Yes\n\nProduct A is title: imsi turbocad designer v. 11. manufacturer: imsi. price: 39.99. Product B is title: turbocad ( r ) designer 2d/3d version 9.1. manufacturer: . price: 29.99. Are Product A and Product B the same? No\n\n", "humongous entertainment": "Product A is title: backyard sports backyard basketball and backyard football. manufacturer: humongous entertainment. price: 19.99. Product B is title: backyard basketball & backyard hockey double pack. manufacturer: . price: 9.95. Are Product A and Product B the same? No\n\nProduct A is title: backyard sports backyard basketball and backyard football. manufacturer: humongous entertainment. price: 19.99. Product B is title: brighter minds backyard sports. manufacturer: . price: 19.99. Are Product A and Product B the same? Yes\n\nProduct A is title: blues clues preschool. manufacturer: humongous entertainment. price: . Product B is title: reader rabbit preschool. manufacturer: the-learning-company. price: 17.9. Are Product A and Product B the same? No\n\nProduct A is title: blues clues preschool. manufacturer: humongous entertainment. price: . Product B is title: brain play preschool 1st grade. manufacturer: . price: 25.99. Are Product A and Product B the same? No\n\nProduct A is title: blues clues preschool. manufacturer: humongous entertainment. price: . Product B is title: brain play preschool-1st grade. manufacturer: . price: 17.9. Are Product A and Product B the same? No\n\nProduct A is title: backyard sports backyard basketball and backyard football. manufacturer: humongous entertainment. price: 19.99. Product B is title: backyard sports : baseball 2007 for pc. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\n", "disney interactive": "Product A is title: millionaire sports edition ( jewel case ). manufacturer: disney interactive. price: 9.99. Product B is title: who wants to be a millionaire sports edition. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\nProduct A is title: millionaire sports edition ( jewel case ). manufacturer: disney interactive. price: 9.99. Product B is title: poetry fiction and drama ( win/mac ) ( jewel case ) ( 10172 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: millionaire sports edition ( jewel case ). manufacturer: disney interactive. price: 9.99. Product B is title: geometry ( win/mac ) ( jewel case ) ( 10158 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: disney 2nd grade active leveling advantage w buzz ( jewel case ) ages 6-8. manufacturer: disney interactive. price: 9.99. Product B is title: disney 's buzz lightyear 2nd grade. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\nProduct A is title: millionaire sports edition ( jewel case ). manufacturer: disney interactive. price: 9.99. Product B is title: merriam-webster 's dictionary ( win/mac ) ( jewel case ) ( 10462 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\n", "nuance academic": "Product A is title: acad upgrade dragon naturallyspeaking pro solution 9.0 ( a289a-fd7-9 .0 ). manufacturer: nuance academic. price: 399.54. Product B is title: nuance dragon naturallyspeaking v. 9.0 professional upgrade. manufacturer: . price: 212.62. Are Product A and Product B the same? No\n\n", "digidesign": "Product A is title: digidesign music production toolkit ( pro tools le ). manufacturer: digidesign. price: 495.0. Product B is title: make finale 2007 software music production software. manufacturer: . price: 429.95. Are Product A and Product B the same? No\n\nProduct A is title: digidesign music production toolkit ( pro tools le ). manufacturer: digidesign. price: 495.0. Product B is title: make songwriter software music production software. manufacturer: . price: 37.95. Are Product A and Product B the same? No\n\nProduct A is title: digidesign music production toolkit ( pro tools le ). manufacturer: digidesign. price: 495.0. Product B is title: apple/emagic logic pro 7.2 software music production software. manufacturer: . price: 999.0. Are Product A and Product B the same? No\n\nProduct A is title: digidesign music production toolkit ( pro tools le ). manufacturer: digidesign. price: 495.0. Product B is title: digidesign music production toolkit music production software. manufacturer: . price: 495.0. Are Product A and Product B the same? Yes\n\nProduct A is title: digidesign music production toolkit ( pro tools le ). manufacturer: digidesign. price: 495.0. Product B is title: sony media software acid pro 5 music production software. manufacturer: . price: 199.95. Are Product A and Product B the same? No\n\nProduct A is title: digidesign music production toolkit ( pro tools le ). manufacturer: digidesign. price: 495.0. Product B is title: apple/emagic logic pro 7.2 software music production software. manufacturer: . price: 999.0. Are Product A and Product B the same? No\n\n", "software cinema": "Product A is title: adobe photoshop cs2 fundamental techniques by julieanne kost. manufacturer: software cinema. price: . Product B is title: adobe photoshop cs3 extended for mac upgrade. manufacturer: . price: 329.99. Are Product A and Product B the same? No\n\nProduct A is title: adobe photoshop cs2 advanced techniques by julieanne kost. manufacturer: software cinema. price: . Product B is title: adobe photoshop cs3 ( v10 .0 ) mac adobe 13102488. manufacturer: . price: 537.65. Are Product A and Product B the same? No\n\nProduct A is title: adobe photoshop cs2 advanced techniques by julieanne kost. manufacturer: software cinema. price: . Product B is title: adobe photoshop cs3 ( v10 .0 ) mac adobe 13102488. manufacturer: . price: 537.65. Are Product A and Product B the same? No\n\nProduct A is title: adobe photoshop cs2 advanced techniques by julieanne kost. manufacturer: software cinema. price: . Product B is title: adobe photoshop lightroom. manufacturer: . price: 264.99. Are Product A and Product B the same? No\n\nProduct A is title: adobe photoshop cs2 advanced techniques by julieanne kost. manufacturer: software cinema. price: . Product B is title: adobe photoshop cs3 extended for mac academic. manufacturer: . price: 295.99. Are Product A and Product B the same? No\n\nProduct A is title: adobe photoshop cs2 fundamental techniques by julieanne kost. manufacturer: software cinema. price: . Product B is title: software cinema dvd-rom : dvdrom : photoshop cs2 fundamental techniques ( training ) photoshop software. manufacturer: . price: 159.95. Are Product A and Product B the same? Yes\n\n", "solidworks corporation": "", "adobe software": "Product A is title: adobe software acrobat pro upgrd pro-pro mac ( adbcd01798mc ). manufacturer: adobe software. price: 273.0. Product B is title: adobe acrobat v8 .0 professional for mac upgrade. manufacturer: . price: 145.99. Are Product A and Product B the same? Yes\n\nProduct A is title: adobe software acrobat pro upgrd pro-pro mac ( adbcd01798mc ). manufacturer: adobe software. price: 273.0. Product B is title: adobe acrobat v8 .0 professional for mac. manufacturer: . price: 405.99. Are Product A and Product B the same? No\n\nProduct A is title: adobe software acrobat pro upgrd pro-pro mac ( adbcd01798mc ). manufacturer: adobe software. price: 273.0. Product B is title: adobe acrobat 7.0 standard academic mac. manufacturer: . price: 98.99. Are Product A and Product B the same? No\n\nProduct A is title: adobe software acrobat pro upgrd pro-pro mac ( adbcd01798mc ). manufacturer: adobe software. price: 273.0. Product B is title: adobe acrobat v8 .0 professional for mac. manufacturer: . price: 405.99. Are Product A and Product B the same? No\n\nProduct A is title: adobe software acrobat pro upgrd pro-pro mac ( adbcd01798mc ). manufacturer: adobe software. price: 273.0. Product B is title: adobe acrobat ( v8 .0 ) pro win up from std adobe 22020452. manufacturer: . price: 151.27. Are Product A and Product B the same? No\n\nProduct A is title: adobe software acrobat pro upgrd pro-pro mac ( adbcd01798mc ). manufacturer: adobe software. price: 273.0. Product B is title: adobe acrobat v8 .0 professional for mac upsell from std-pro. manufacturer: . price: 145.99. Are Product A and Product B the same? No\n\n", "tri synergy": "Product A is title: lavasoft ad-aware plus edition. manufacturer: tri synergy. price: 29.99. Product B is title: lavasoft ad-aware plus edition. manufacturer: . price: 24.9. Are Product A and Product B the same? Yes\n\nProduct A is title: big oil : build an oil empire. manufacturer: tri synergy. price: 19.99. Product B is title: big oil : build an oil empire. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\nProduct A is title: oxford spanish dictionary. manufacturer: tri synergy. price: 19.99. Product B is title: palmspring software 523 oxford american desk dictionary. manufacturer: palmspring software. price: 24.2. Are Product A and Product B the same? No\n\n", "git corp": "Product A is title: captain america the complete comic collection win/mac. manufacturer: git corp. price: 49.95. Product B is title: sega of america inc sonic mega collection. manufacturer: sega-of-america-inc .. price: 17.55. Are Product A and Product B the same? No\n\nProduct A is title: amazing spider-man complete comic book collection win/mac. manufacturer: git corp. price: 49.99. Product B is title: git comic book library collector 's edition vol . 1. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\nProduct A is title: 40 years of the avengers. manufacturer: git corp. price: 49.99. Product B is title: git 40 years of marvel avengers dvd-rom. manufacturer: . price: 39.99. Are Product A and Product B the same? Yes\n\nProduct A is title: captain america the complete comic collection win/mac. manufacturer: git corp. price: 49.95. Product B is title: 11273 hasbro family game collection 2006 complete package 1 user pc cd win. manufacturer: . price: 14.51. Are Product A and Product B the same? No\n\nProduct A is title: 44 years of the fantastic four. manufacturer: git corp. price: 49.99. Product B is title: git 44 years of marvel fantastic 4 dvd-rom. manufacturer: . price: 42.99. Are Product A and Product B the same? Yes\n\nProduct A is title: captain america the complete comic collection win/mac. manufacturer: git corp. price: 49.95. Product B is title: git amazing spider-man the complete collection dvd-rom. manufacturer: . price: 42.99. Are Product A and Product B the same? No\n\nProduct A is title: amazing spider-man complete comic book collection win/mac. manufacturer: git corp. price: 49.99. Product B is title: git amazing spider-man the complete collection dvd-rom. manufacturer: . price: 42.99. Are Product A and Product B the same? Yes\n\nProduct A is title: captain america the complete comic collection win/mac. manufacturer: git corp. price: 49.95. Product B is title: aspyr the sims complete collection. manufacturer: . price: 47.99. Are Product A and Product B the same? No\n\n", "adaptec": "Product A is title: snap enterprise data replicator advanced license for 10000 series. manufacturer: adaptec. price: . Product B is title: 5325302018 snap enterprise data replicator license 1 server linux unix win. manufacturer: . price: 663.22. Are Product A and Product B the same? No\n\nProduct A is title: snap enterprise data replicator advanced license for 10000 series. manufacturer: adaptec. price: . Product B is title: 5325301871 snap appliance snap care snap enterprise data replicator technical support c. manufacturer: . price: 377.06. Are Product A and Product B the same? No\n\n", "majesco entertainment": "", "business objects": "Product A is title: crystal reports xi professional edition. manufacturer: business objects. price: 495.0. Product B is title: business objects crystal reports xi. manufacturer: . price: 459.99. Are Product A and Product B the same? Yes\n\nProduct A is title: crystal reports xi developer. manufacturer: business objects. price: 595.0. Product B is title: upg crystal reports 11 pro french. manufacturer: . price: 264.99. Are Product A and Product B the same? No\n\nProduct A is title: upgrade crystal reports 11 pro french. manufacturer: business objects. price: 479.78. Product B is title: business objects crystal reports xi. manufacturer: . price: 459.99. Are Product A and Product B the same? No\n\nProduct A is title: crystal reports xi professional edition. manufacturer: business objects. price: 495.0. Product B is title: upg crystal reports 11 pro french. manufacturer: . price: 264.99. Are Product A and Product B the same? No\n\nProduct A is title: crystal reports xi professional edition. manufacturer: business objects. price: 495.0. Product B is title: microsoft ( r ) windows ( r ) xp professional edition with sp2. manufacturer: . price: 199.99. Are Product A and Product B the same? No\n\nProduct A is title: upgrade crystal reports 11 pro french. manufacturer: business objects. price: 479.78. Product B is title: upg crystal reports 11 pro french. manufacturer: . price: 264.99. Are Product A and Product B the same? Yes\n\nProduct A is title: crystal reports 11 reporting essentials trng cbt. manufacturer: business objects. price: 199.99. Product B is title: business objects t-59j-e-56-wx crystal rpt 11 report essentials cbt. manufacturer: . price: 178.49. Are Product A and Product B the same? Yes\n\nProduct A is title: crystal reports xi professional edition. manufacturer: business objects. price: 495.0. Product B is title: zipmagic personal edition. manufacturer: . price: 8.95. Are Product A and Product B the same? No\n\n", "dvo enterprises": "Product A is title: cook 'n low fat. manufacturer: dvo enterprises. price: 19.99. Product B is title: dvo enterprises inc. cookn low fat. manufacturer: . price: 17.55. Are Product A and Product B the same? Yes\n\nProduct A is title: dvo cook 'n deluxe 6.0 ultimate recipe organizer. manufacturer: dvo enterprises. price: 39.99. Product B is title: dvo enterprises inc. cookn deluxe 6.0. manufacturer: . price: 34.99. Are Product A and Product B the same? Yes\n\n", "macspeech inc.": "Product A is title: ilisten ( mac ). manufacturer: macspeech inc.. price: 99.99. Product B is title: iwork '06 ( mac ) apple. manufacturer: . price: 79.99. Are Product A and Product B the same? No\n\nProduct A is title: ilisten ( mac ). manufacturer: macspeech inc.. price: 99.99. Product B is title: apple appleworks 6.2.9 ( mac ). manufacturer: . price: 79.99. Are Product A and Product B the same? No\n\nProduct A is title: ilisten ( mac ). manufacturer: macspeech inc.. price: 99.99. Product B is title: . mac ( mac ) apple. manufacturer: . price: 99.99. Are Product A and Product B the same? No\n\n", "magix entertainment": "Product A is title: photostory on cd & dvd 5. manufacturer: magix entertainment. price: 19.99. Product B is title: magix entertainment corp. photostory on cd & dvd 5. manufacturer: . price: 17.55. Are Product A and Product B the same? Yes\n\nProduct A is title: web radio recorder. manufacturer: magix entertainment. price: . Product B is title: magix webradio recorder ( pc ) magix entertainment. manufacturer: . price: 24.99. Are Product A and Product B the same? Yes\n\nProduct A is title: xtreme photostory on cd and dvd 6. manufacturer: magix entertainment. price: 29.99. Product B is title: magix entertainment corp. photostory on cd & dvd 5. manufacturer: . price: 17.55. Are Product A and Product B the same? No\n\nProduct A is title: web radio recorder. manufacturer: magix entertainment. price: . Product B is title: mp3 radio recorder for ipod. manufacturer: . price: 22.9. Are Product A and Product B the same? No\n\nProduct A is title: photostory on cd & dvd 5. manufacturer: magix entertainment. price: 19.99. Product B is title: magix entertainment corp. xtreme photostory on cd and dvd 6. manufacturer: . price: 26.14. Are Product A and Product B the same? No\n\n", "riverdeep-learning-company": "Product A is title: the printshop business card maker ( jewel case ). manufacturer: riverdeep-learning-company. price: 9.99. Product B is title: total 3d home deluxe ( jewel case ) ( jce-th6 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: the printshop business card maker ( jewel case ). manufacturer: riverdeep-learning-company. price: 9.99. Product B is title: encore software 2137421 printshop business card maker ( win 98 me 2000 xp ). manufacturer: . price: 8.64. Are Product A and Product B the same? Yes\n\nProduct A is title: the printshop business card maker ( jewel case ). manufacturer: riverdeep-learning-company. price: 9.99. Product B is title: the printshop 20. manufacturer: . price: 19.9. Are Product A and Product B the same? No\n\nProduct A is title: creating keepsakes scrapbook designer version 2 platinum. manufacturer: riverdeep-learning-company. price: 39.99. Product B is title: encore software 10612 creating keepsakes scrapbook designer deluxe v. 3 sb cs. manufacturer: . price: 17.97. Are Product A and Product B the same? No\n\n", "nuance communications inc. .": "", "acclaim": "", "delorme": "Product A is title: delorme topo usa mapping software 6.0 west region. manufacturer: delorme. price: 49.95. Product B is title: delorme mapping topo usa 6.0 west region. manufacturer: . price: 43.36. Are Product A and Product B the same? Yes\n\nProduct A is title: street atlas usa 2007 plus dvd. manufacturer: delorme. price: 59.95. Product B is title: delorme mapping street atlas usa 2007 plus dvd xx. manufacturer: . price: 53.37. Are Product A and Product B the same? Yes\n\nProduct A is title: delorme topo usa mapping software 6.0 east region. manufacturer: delorme. price: 49.95. Product B is title: delorme mapping topo usa 6.0 west region. manufacturer: . price: 43.36. Are Product A and Product B the same? No\n\nProduct A is title: delorme topo usa mapping software 6.0 east region. manufacturer: delorme. price: 49.95. Product B is title: delorme mapping street atlas usa 2007 plus dvd xx. manufacturer: . price: 53.37. Are Product A and Product B the same? No\n\nProduct A is title: delorme topo usa mapping software 6.0 east region. manufacturer: delorme. price: 49.95. Product B is title: delorme mapping topo usa 6.0 east region. manufacturer: . price: 43.36. Are Product A and Product B the same? Yes\n\n", "selectmedia entertainment": "", "datawatch": "Product A is title: datawatch monarch v7 ntwk-starter cd most 8u fpm32c070-a08n ). manufacturer: datawatch. price: . Product B is title: monarch std ( v9 .0 ) network starter 4-user rtl box datawatch fpm32c090-a04n. manufacturer: . price: 1157.21. Are Product A and Product B the same? No\n\n", "netmanage": "Product A is title: netmanage rumba web as/400 edition 910523-002 ). manufacturer: netmanage. price: . Product B is title: 910523-002 rumba as/400 edition v. 7.2 upgrade package 1 processor tier p50 c. manufacturer: . price: 20069.43. Are Product A and Product B the same? No\n\nProduct A is title: netmanage upg rumba unix-hp edition level 902453-007-c ). manufacturer: netmanage. price: . Product B is title: 902453-007-b rumba unix-hp edition upgrade license 1 user volume level b 5-49 c. manufacturer: . price: 147.73. Are Product A and Product B the same? Yes\n\nProduct A is title: netmanage rumba unix-hp edition v. 7.3 upgrade license 902453-006 ). manufacturer: netmanage. price: . Product B is title: bhe3dus installanywhere enterprise edition v. 8 product upgrade license + 1 year. manufacturer: . price: 4746.16. Are Product A and Product B the same? No\n\nProduct A is title: netmanage rumba web as/400 edition 910584-001 ). manufacturer: netmanage. price: . Product B is title: 910519-004 rumba as/400 edition v. 7.4 upgrade license 1 processor tier p10 c. manufacturer: . price: 3223.26. Are Product A and Product B the same? No\n\nProduct A is title: netmanage rumba web as/400 edition 910584-001 ). manufacturer: netmanage. price: . Product B is title: 910584-001 rumba as/400 edition v. 7.2 upgrade package 1 processor tier p60 c. manufacturer: . price: 26759.24. Are Product A and Product B the same? No\n\nProduct A is title: netmanage rumba unix-hp edition v. 7.3 upgrade license 902453-006 ). manufacturer: netmanage. price: . Product B is title: bhe3dus installanywhere enterprise edition v. 8 product upgrade license + 1 year. manufacturer: . price: 4746.16. Are Product A and Product B the same? No\n\nProduct A is title: netmanage upg rumba unix-hp edition level 902453-007-c ). manufacturer: netmanage. price: . Product B is title: 902453-007-e rumba unix-hp edition upgrade license 1 user volume level e 1000-1999. manufacturer: . price: 106.48. Are Product A and Product B the same? Yes\n\n", "eidos interactive": "Product A is title: startopia. manufacturer: eidos interactive. price: 39.95. Product B is title: startopia. manufacturer: . price: 14.95. Are Product A and Product B the same? No\n\n", "atari": "Product A is title: home budget for dummies. manufacturer: atari. price: . Product B is title: home budget for dummies. manufacturer: . price: 10.95. Are Product A and Product B the same? No\n\nProduct A is title: tonka town ( jewel case ). manufacturer: atari. price: 9.99. Product B is title: geometry ( win/mac ) ( jewel case ) ( 10158 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: backyard skateboarding. manufacturer: atari. price: . Product B is title: backyard skateboarding. manufacturer: . price: 7.95. Are Product A and Product B the same? No\n\nProduct A is title: tonka town ( jewel case ). manufacturer: atari. price: 9.99. Product B is title: total 3d home deluxe ( jewel case ) ( jce-th6 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: encyclopedia britannica profiles : dinosaurs ( jewel case ). manufacturer: atari. price: 9.99. Product B is title: avanquest encyclopedia britannica 2006. manufacturer: . price: 29.0. Are Product A and Product B the same? No\n\n", "filemaker": "Product A is title: filemaker mobile 8 for palm os and pocket pc. manufacturer: filemaker. price: 69.0. Product B is title: resco explorer 2007 for pocketpc windows mobile software for pocket pc. manufacturer: . price: 29.95. Are Product A and Product B the same? No\n\nProduct A is title: filemaker mobile 8 for palm os and pocket pc. manufacturer: filemaker. price: 69.0. Product B is title: splashid windows mobile software for pocket pc. manufacturer: . price: 29.95. Are Product A and Product B the same? No\n\nProduct A is title: filemaker mobile 8 for palm os and pocket pc. manufacturer: filemaker. price: 69.0. Product B is title: filemaker mobile 8. manufacturer: . price: 65.99. Are Product A and Product B the same? No\n\nProduct A is title: filemaker mobile 8 for palm os and pocket pc. manufacturer: filemaker. price: 69.0. Product B is title: td824ll/a filemaker mobile v. 8 complete package 1 user cd win mac palm os. manufacturer: . price: 66.15. Are Product A and Product B the same? No\n\n", "oxford-university-press": "Product A is title: oxford spanish dictionary. manufacturer: oxford-university-press. price: 19.95. Product B is title: palmspring software 523 oxford american desk dictionary. manufacturer: palmspring software. price: 24.2. Are Product A and Product B the same? No\n\nProduct A is title: oxford spanish dictionary. manufacturer: oxford-university-press. price: 19.95. Product B is title: pop up oxford spanish / english dict. manufacturer: oxford-university-press. price: 13.95. Are Product A and Product B the same? Yes\n\n", "eagle games": "Product A is title: tournament poker 2005. manufacturer: eagle games. price: 20.99. Product B is title: tournament poker no limit texas holdem. manufacturer: . price: 19.99. Are Product A and Product B the same? No\n\n", "nero inc.": "Product A is title: nero 8 ultra edition. manufacturer: nero inc.. price: 99.95. Product B is title: nero inc nero 8 ultra edition. manufacturer: . price: 88.83. Are Product A and Product B the same? Yes\n\nProduct A is title: nero 7 ultra edition enhanced. manufacturer: nero inc.. price: 99.99. Product B is title: 70009 nero ultra edition enhanced v. 7 complete package 1 user cd win. manufacturer: . price: 87.76. Are Product A and Product B the same? No\n\nProduct A is title: nero 7 ultra edition enhanced. manufacturer: nero inc.. price: 99.99. Product B is title: nero nero 7 ultra edition enhanced cd/dvd burning software for windows authoring software. manufacturer: . price: 79.99. Are Product A and Product B the same? No\n\nProduct A is title: nero 8 ultra edition. manufacturer: nero inc.. price: 99.95. Product B is title: i copy dvds 2 ultra edition. manufacturer: . price: 29.9. Are Product A and Product B the same? No\n\nProduct A is title: nero 7 ultra edition enhanced. manufacturer: nero inc.. price: 99.99. Product B is title: ahead software 70115 nero 7 ultra edition enhanced mini pc & home entrmnt soln. manufacturer: . price: 83.97. Are Product A and Product B the same? Yes\n\n", "feral interactive": "Product A is title: lego star wars ii : the original trilogy for mac. manufacturer: feral interactive. price: 39.99. Product B is title: xbox : lego star wars. manufacturer: . price: 19.95. Are Product A and Product B the same? No\n\nProduct A is title: chessmaster 9000 ( mac ). manufacturer: feral interactive. price: 39.99. Product B is title: feral interactive limited chessmaster 9000. manufacturer: . price: 35.98. Are Product A and Product B the same? Yes\n\nProduct A is title: chessmaster 9000 ( mac ). manufacturer: feral interactive. price: 39.99. Product B is title: chessmaster 9000 : feral interactive. manufacturer: . price: 38.99. Are Product A and Product B the same? Yes\n\nProduct A is title: black & white platinum pack ( mac ). manufacturer: feral interactive. price: 29.99. Product B is title: feral interactive limited black & white platinum pack. manufacturer: . price: 27.37. Are Product A and Product B the same? Yes\n\nProduct A is title: black & white platinum pack ( mac ). manufacturer: feral interactive. price: 29.99. Product B is title: intervideo windvd 8 platinum ( pc ). manufacturer: . price: 59.99. Are Product A and Product B the same? No\n\nProduct A is title: lego star wars ii : the original trilogy for mac. manufacturer: feral interactive. price: 39.99. Product B is title: aspyr lego star wars ii. manufacturer: . price: 44.95. Are Product A and Product B the same? Yes\n\n", "nuance": "", "global marketing": "Product A is title: iview mediapro 2.5. manufacturer: global marketing. price: 199.99. Product B is title: iview mediapro 2.6 media management. manufacturer: . price: 155.99. Are Product A and Product B the same? Yes\n\n", "masque publishing": "Product A is title: sportsman 's double play. manufacturer: masque publishing. price: 19.95. Product B is title: masque publishing sportsmans double play with american billiards. manufacturer: . price: 17.55. Are Product A and Product B the same? No\n\nProduct A is title: solitaire antics deluxe. manufacturer: masque publishing. price: . Product B is title: masque publishing solitaire antics deluxe. manufacturer: . price: 8.46. Are Product A and Product B the same? Yes\n\nProduct A is title: best of slots 2 ( jewel case ). manufacturer: masque publishing. price: 9.99. Product B is title: cross stitch design studio ( jewel case ) ( 8006 ). manufacturer: m-and-r-technologies. price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: best of slots 2 ( jewel case ). manufacturer: masque publishing. price: 9.99. Product B is title: total 3d home deluxe ( jewel case ) ( jce-th6 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: sportsman 's double play. manufacturer: masque publishing. price: 19.95. Product B is title: masque publishing sportsmans double play. manufacturer: . price: 17.55. Are Product A and Product B the same? Yes\n\n", "lenovo": "Product A is title: the computrace lojack for laptops computer tracking and loss control solution by. manufacturer: lenovo. price: . Product B is title: absolute software lfl-m2-36 computrace lojack for laptops 3 year license mac 10.2 or higher. manufacturer: . price: 80.97. Are Product A and Product B the same? No\n\nProduct A is title: the computrace lojack for laptops computer tracking and loss control solution by. manufacturer: lenovo. price: . Product B is title: computrace lojack for laptops 3 yr subscription. manufacturer: . price: 89.99. Are Product A and Product B the same? No\n\nProduct A is title: the computrace lojack for laptops computer tracking and loss control solution by. manufacturer: lenovo. price: . Product B is title: absolute software lfl-n2-36 computrace lojack for laptops 3 year license ( win 2000 xp ). manufacturer: . price: 85.97. Are Product A and Product B the same? No\n\nProduct A is title: the computrace lojack for laptops computer tracking and loss control solution by. manufacturer: lenovo. price: . Product B is title: absolute software lfl-n2-48 computrace lojack for laptops 4 year license. manufacturer: . price: 105.68. Are Product A and Product B the same? No\n\nProduct A is title: the computrace lojack for laptops computer tracking and loss control solution by. manufacturer: lenovo. price: . Product B is title: computrace lojack for laptops 1 yr subscription. manufacturer: . price: 45.99. Are Product A and Product B the same? No\n\n", "cisco systems enterprise": "Product A is title: cisco ios enterprise services complete package cd28-esk9 = ). manufacturer: cisco systems enterprise. price: . Product B is title: cd72-ar1k9 = cisco ios enterprise/snasw ipsec 3des complete package cd. manufacturer: . price: 10576.76. Are Product A and Product B the same? No\n\nProduct A is title: cisco 2600xm ser ios ip/fw/ids cd26xm-ch = ). manufacturer: cisco systems enterprise. price: . Product B is title: cd26xm-ch = cisco ios ip/fw/ids complete package cd. manufacturer: . price: 360.82. Are Product A and Product B the same? No\n\nProduct A is title: cisco ios enterprise services complete package cd28-esk9 = ). manufacturer: cisco systems enterprise. price: . Product B is title: 22774 vsi-fax v. 6 complete package 1 server cd linux. manufacturer: . price: 1573.45. Are Product A and Product B the same? No\n\nProduct A is title: cisco syst . minor release : requires existing cww-6 .1 - win-mr ). manufacturer: cisco systems enterprise. price: . Product B is title: cisco ciscoworks v. 6.1 for windows upgrade cww-6 .1 - win-mr. manufacturer: . price: 225.0. Are Product A and Product B the same? No\n\nProduct A is title: cisco ios enterprise services complete package cd28-esk9 = ). manufacturer: cisco systems enterprise. price: . Product B is title: cd28-esk9 = cisco ios enterprise services complete package cd. manufacturer: . price: 1001.38. Are Product A and Product B the same? No\n\n", "abacus": "Product A is title: aircraft collector 's second edition. manufacturer: abacus. price: 19.95. Product B is title: neil diamond the neil diamond collection. manufacturer: . price: 22.13. Are Product A and Product B the same? No\n\nProduct A is title: train sim modeler design studio. manufacturer: abacus. price: 39.99. Product B is title: abacus train sim modeler. manufacturer: . price: 29.84. Are Product A and Product B the same? Yes\n\nProduct A is title: national parks : enhanced scenery add-on for microsoft flight simulator 2002 & 2000. manufacturer: abacus. price: 19.99. Product B is title: abacus national parks enhanced scenery. manufacturer: . price: 17.55. Are Product A and Product B the same? Yes\n\nProduct A is title: usa/world extreme landscapes bundle. manufacturer: abacus. price: 39.95. Product B is title: abacus software s546 usa extreme landscapes for fs2004 and fs2002 3cd set. manufacturer: abacus software. price: 28.99. Are Product A and Product B the same? No\n\nProduct A is title: usa/world extreme landscapes bundle. manufacturer: abacus. price: 39.95. Product B is title: abacus software s546 usa extreme landscapes for fs2004 and fs2002 3cd set. manufacturer: abacus software. price: 28.99. Are Product A and Product B the same? No\n\nProduct A is title: aircraft collector 's second edition. manufacturer: abacus. price: 19.95. Product B is title: abacus aircraft collectors second edition. manufacturer: . price: 17.55. Are Product A and Product B the same? Yes\n\nProduct A is title: usa/world extreme landscapes bundle. manufacturer: abacus. price: 39.95. Product B is title: abacus usa/world extreme landscapes bundle. manufacturer: . price: 38.43. Are Product A and Product B the same? Yes\n\nProduct A is title: usa/world extreme landscapes bundle. manufacturer: abacus. price: 39.95. Product B is title: abacus software s546 usa extreme landscapes for fs2004 and fs2002 3cd set. manufacturer: abacus software. price: 28.99. Are Product A and Product B the same? No\n\nProduct A is title: aircraft collector 's second edition. manufacturer: abacus. price: 19.95. Product B is title: family tree maker 16 collectors edition. manufacturer: . price: 99.99. Are Product A and Product B the same? No\n\nProduct A is title: ultra sudoku/ultra kakuro. manufacturer: abacus. price: 29.95. Product B is title: abacus ultra sudoku/ultra kakuro. manufacturer: . price: 17.55. Are Product A and Product B the same? Yes\n\n", "imaginova": "Product A is title: starry night galaxy explorer ( pc & mac ). manufacturer: imaginova. price: 9.99. Product B is title: orion/imaginova starry night galaxy explorer. manufacturer: . price: 8.8. Are Product A and Product B the same? Yes\n\nProduct A is title: starry night galaxy explorer ( pc & mac ). manufacturer: imaginova. price: 9.99. Product B is title: starry night galaxy explorer ( pc & mac ) ( snge-5-c1u ). manufacturer: . price: 9.59. Are Product A and Product B the same? Yes\n\n", "freeverse software": "Product A is title: burning monkey solitaire 2005 edition. manufacturer: freeverse software. price: 24.99. Product B is title: freeverse burning monkey solitaire 3d. manufacturer: . price: 24.99. Are Product A and Product B the same? Yes\n\nProduct A is title: burning monkey solitaire 2005 edition. manufacturer: freeverse software. price: 24.99. Product B is title: freeverse burning monkey mahjong 3d. manufacturer: . price: 24.99. Are Product A and Product B the same? No\n\nProduct A is title: 3d bridge deluxe ( mac ). manufacturer: freeverse software. price: 24.99. Product B is title: freeverse software 4012 3d bridge deluxe ( retail box ) for mac. manufacturer: freeverse software. price: 18.99. Are Product A and Product B the same? No\n\nProduct A is title: 3d bridge deluxe ( mac ). manufacturer: freeverse software. price: 24.99. Product B is title: freeverse 3d bridge deluxe. manufacturer: . price: 24.99. Are Product A and Product B the same? Yes\n\nProduct A is title: sound studio 3 : record edit add effects to audio ( mac ). manufacturer: freeverse software. price: 79.99. Product B is title: sound studio 3 for mac. manufacturer: . price: 79.99. Are Product A and Product B the same? Yes\n\nProduct A is title: burning monkey solitaire 4.0. manufacturer: freeverse software. price: 24.95. Product B is title: freeverse software 7001 burning monkey solitaire 4.0 mac 10.3.9 or later ub. manufacturer: . price: 22.97. Are Product A and Product B the same? No\n\nProduct A is title: burning monkey solitaire 4.0. manufacturer: freeverse software. price: 24.95. Product B is title: freeverse burning monkey solitaire 4.0. manufacturer: . price: 24.95. Are Product A and Product B the same? Yes\n\nProduct A is title: toysight gold for isight : includes 15 games ( mac ). manufacturer: freeverse software. price: 29.99. Product B is title: freeverse software 5014 toysight gold ( mac 10.2 or higher ). manufacturer: . price: 22.97. Are Product A and Product B the same? No\n\nProduct A is title: wingnuts 2 ( mac ). manufacturer: freeverse software. price: 29.99. Product B is title: wingnuts 2 : raina 's revenge. manufacturer: . price: 28.99. Are Product A and Product B the same? No\n\n", "kutoka": "Product A is title: mia 's math adventure : just in time. manufacturer: kutoka. price: 19.99. Product B is title: the land before time : kindergarten adventure. manufacturer: . price: 12.9. Are Product A and Product B the same? No\n\nProduct A is title: mia 's math adventure : just in time. manufacturer: kutoka. price: 19.99. Product B is title: kutoka interactive 61208 mias math adventure ( just in time ) ( win 95 98 me 2000 xp/mac 8.6-9.x ( classic ) x v10 .1 or higher ). manufacturer: . price: 18.97. Are Product A and Product B the same? Yes\n\nProduct A is title: didi & ditto : kindergarten win/mac. manufacturer: kutoka. price: 19.99. Product B is title: jumpstart kindergarten ( jc ). manufacturer: . price: 9.9. Are Product A and Product B the same? No\n\nProduct A is title: didi & ditto : kindergarten win/mac. manufacturer: kutoka. price: 19.99. Product B is title: the land before time : kindergarten adventure. manufacturer: . price: 12.9. Are Product A and Product B the same? No\n\n", "x-oom": "Product A is title: x-oom movie clone 3 gold. manufacturer: x-oom. price: . Product B is title: x-oom internet movies 2. manufacturer: . price: 22.9. Are Product A and Product B the same? No\n\nProduct A is title: x-oom internet movies 2. manufacturer: x-oom. price: . Product B is title: feral interactive the movies. manufacturer: . price: 47.99. Are Product A and Product B the same? No\n\nProduct A is title: x-oom mp3 radio recorder windows ). manufacturer: x-oom. price: . Product B is title: mp3 radio recorder for ipod. manufacturer: . price: 22.9. Are Product A and Product B the same? Yes\n\nProduct A is title: x-oom movie clone 3 gold. manufacturer: x-oom. price: . Product B is title: xoom movie clone 3 gold. manufacturer: . price: 22.9. Are Product A and Product B the same? Yes\n\n", "electronic arts": "Product A is title: battlefield 1942 : deluxe edition. manufacturer: electronic arts. price: 29.99. Product B is title: mastercook deluxe low carb edition. manufacturer: . price: 17.9. Are Product A and Product B the same? No\n\nProduct A is title: battlefield 1942 : deluxe edition. manufacturer: electronic arts. price: 29.99. Product B is title: allume aquazone seven seas deluxe edition. manufacturer: . price: 24.99. Are Product A and Product B the same? No\n\nProduct A is title: battlefield 1942 : deluxe edition. manufacturer: electronic arts. price: 29.99. Product B is title: instant immersion language lab deluxe edition. manufacturer: . price: 14.9. Are Product A and Product B the same? No\n\n", "the-learning-company": "Product A is title: reader rabbit preschool. manufacturer: the-learning-company. price: 29.99. Product B is title: blue 's clues preschool. manufacturer: . price: 7.95. Are Product A and Product B the same? No\n\nProduct A is title: reader rabbit preschool. manufacturer: the-learning-company. price: 29.99. Product B is title: reader rabbit learn to read with phonics ! preschool & kindergarten. manufacturer: . price: 12.95. Are Product A and Product B the same? No\n\nProduct A is title: reader rabbit preschool. manufacturer: the-learning-company. price: 29.99. Product B is title: reader rabbit 1st grade. manufacturer: . price: 17.9. Are Product A and Product B the same? No\n\nProduct A is title: reader rabbit preschool. manufacturer: the-learning-company. price: 29.99. Product B is title: reader rabbit reading learning system 2007. manufacturer: . price: 12.95. Are Product A and Product B the same? No\n\nProduct A is title: treasure mathstorm. manufacturer: the-learning-company. price: 29.99. Product B is title: treasure mathstorm. manufacturer: the-learning-company. price: 9.9. Are Product A and Product B the same? Yes\n\n", "now software": "Product A is title: now up-to-date & contact v5 .0 ( mac ). manufacturer: now software. price: . Product B is title: now software nuw45e1k now up-to-date & contact for windows. manufacturer: now software. price: 114.99. Are Product A and Product B the same? No\n\nProduct A is title: now up-to-date & contact v5 .0 ( mac ). manufacturer: now software. price: . Product B is title: now software nuw45e1k now up-to-date & contact complete product content management 1 user ( s ) complete product standard pc. manufacturer: . price: 107.97. Are Product A and Product B the same? No\n\nProduct A is title: now up-to-date & contact v5 .0 ( mac ). manufacturer: now software. price: . Product B is title: now software nuw45e1k now up-to-date & contact for windows. manufacturer: power-on-software. price: 114.99. Are Product A and Product B the same? No\n\nProduct A is title: now up-to-date & contact v5 .0 ( mac ). manufacturer: now software. price: . Product B is title: now software nuw45e1k now up-to-date & contact for windows. manufacturer: now software. price: 114.99. Are Product A and Product B the same? No\n\n", "destineer publishing": "", "topics-entertainment-software - ( consignment )": "Product A is title: learn2 training for microsoft excel. manufacturer: topics-entertainment-software - ( consignment ). price: 9.99. Product B is title: learn2 training for microsoft powerpoint ( win 95 98 me nt 2000 xp ). manufacturer: topics-entertainment-software - ( consignment ). price: 9.99. Are Product A and Product B the same? No\n\nProduct A is title: learn2 training for microsoft powerpoint. manufacturer: topics-entertainment-software - ( consignment ). price: 9.99. Product B is title: microsoft office and windows training professional. manufacturer: . price: 29.99. Are Product A and Product B the same? No\n\nProduct A is title: learn2 training for microsoft excel. manufacturer: topics-entertainment-software - ( consignment ). price: 9.99. Product B is title: microsoft ( r ) excel 2007. manufacturer: . price: 109.99. Are Product A and Product B the same? No\n\nProduct A is title: learn2 training for microsoft powerpoint. manufacturer: topics-entertainment-software - ( consignment ). price: 9.99. Product B is title: topics entertainment learn2 training for microsoft excel. manufacturer: topics-entertainment-software - ( consignment ). price: 6.56. Are Product A and Product B the same? No\n\nProduct A is title: learn2 training for microsoft excel. manufacturer: topics-entertainment-software - ( consignment ). price: 9.99. Product B is title: topics entertainment learn2 training for microsoft excel. manufacturer: topics-entertainment-software - ( consignment ). price: 6.56. Are Product A and Product B the same? Yes\n\nProduct A is title: learn2 training for microsoft powerpoint. manufacturer: topics-entertainment-software - ( consignment ). price: 9.99. Product B is title: microsoft ( r ) powerpoint ( r ) 2002 upgrade. manufacturer: . price: 108.87. Are Product A and Product B the same? No\n\n", "sonicwall": "Product A is title: sonicwall gms standard edition 10 node license. manufacturer: sonicwall. price: . Product B is title: 01-ssc-7309 sonicwall cfs standard edition for sonicwall pro 1260/2040/3060 / 4060 subscript. manufacturer: . price: 2124.92. Are Product A and Product B the same? No\n\nProduct A is title: sonicwall gms standard edition 10 node license. manufacturer: sonicwall. price: . Product B is title: 01-ssc-5670 sonicwall cfs standard edition for sonicwall pro 4100 subscription license 1. manufacturer: . price: 857.38. Are Product A and Product B the same? No\n\nProduct A is title: sonicwall gms standard edition 10 node license. manufacturer: sonicwall. price: . Product B is title: 01-ssc-3363 sonicwall global management system standard edition license 10 nodes win. manufacturer: . price: 1835.31. Are Product A and Product B the same? No\n\nProduct A is title: sonicwall internet upgrade ( 50-node ). manufacturer: sonicwall. price: . Product B is title: sonicwall gms 1000 upgrade. manufacturer: . price: 63074.12. Are Product A and Product B the same? No\n\nProduct A is title: sonicwall gms standard edition 10 node license. manufacturer: sonicwall. price: . Product B is title: 01-ssc-7309 sonicwall cfs standard edition for sonicwall pro 1260/2040/3060 / 4060 subscript. manufacturer: . price: 2124.92. Are Product A and Product B the same? No\n\n", "alsoft": "Product A is title: alsoft diskwarrior : mac univeral binary. manufacturer: alsoft. price: . Product B is title: alsoft disk warrior v/5 cd. manufacturer: . price: 107.95. Are Product A and Product B the same? No\n\n", "arcmedia": "Product A is title: easy drums ( jewel case ). manufacturer: arcmedia. price: 9.99. Product B is title: cross stitch design studio ( jewel case ) ( 8006 ). manufacturer: m-and-r-technologies. price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: easy drums ( jewel case ). manufacturer: arcmedia. price: 9.99. Product B is title: poetry fiction and drama ( win/mac ) ( jewel case ) ( 10172 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: easy drums ( jewel case ). manufacturer: arcmedia. price: 9.99. Product B is title: geometry ( win/mac ) ( jewel case ) ( 10158 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\n", "re : launch": "Product A is title: piano wizard premier software & stickers & midi cable. manufacturer: re : launch. price: 149.95. Product B is title: intuit quicken premier 2007 software for windows tax & finance software. manufacturer: . price: 73.95. Are Product A and Product B the same? No\n\nProduct A is title: piano wizard premier software & stickers & midi cable. manufacturer: re : launch. price: 149.95. Product B is title: re : launch piano wizard premier software & stickers & midi cable. manufacturer: . price: 113.86. Are Product A and Product B the same? Yes\n\nProduct A is title: bryce 6. manufacturer: re : launch. price: 109.95. Product B is title: re : launch bryce 6. manufacturer: . price: 94.94. Are Product A and Product B the same? Yes\n\n", "grisoft": "Product A is title: grisoft avg anti-virus & anti-spyware 2 year subscription. manufacturer: grisoft. price: 39.95. Product B is title: 01-ssc-6993 sonicwall client/server anti-virus suite subscription license 3 years 50. manufacturer: . price: 3992.98. Are Product A and Product B the same? No\n\nProduct A is title: grisoft avg anti-virus & anti-spyware 2 year subscription. manufacturer: grisoft. price: 39.95. Product B is title: 01-ssc-6997 sonicwall client/server anti-virus suite subscription license 3 years 10. manufacturer: . price: 64740.21. Are Product A and Product B the same? No\n\nProduct A is title: grisoft avg anti-virus & anti-spyware 2 year subscription. manufacturer: grisoft. price: 39.95. Product B is title: 01-ssc-6991 sonicwall client/server anti-virus suite subscription license 3 years 10. manufacturer: . price: 863.87. Are Product A and Product B the same? No\n\nProduct A is title: grisoft avg anti-virus & anti-spyware 2 year subscription. manufacturer: grisoft. price: 39.95. Product B is title: grisoft avg anti-virus & anti-spyware. manufacturer: . price: 32.29. Are Product A and Product B the same? Yes\n\n", "mariner software": "Product A is title: mariner calc speadsheet ( mac ). manufacturer: mariner software. price: 59.99. Product B is title: mariner calc spreadsheet software. manufacturer: . price: 52.99. Are Product A and Product B the same? Yes\n\nProduct A is title: montage ( mac ). manufacturer: mariner software. price: 149.95. Product B is title: adobe illustrator cs3 ( mac ). manufacturer: . price: 599.0. Are Product A and Product B the same? No\n\n", "warner-new-media": "Product A is title: hospital tycoon for pc. manufacturer: warner-new-media. price: 29.99. Product B is title: roller coaster tycoon 3 for pc. manufacturer: . price: 29.99. Are Product A and Product B the same? No\n\nProduct A is title: hospital tycoon for pc. manufacturer: warner-new-media. price: 29.99. Product B is title: warner home video games hospital tycoon. manufacturer: warner-new-media. price: 26.33. Are Product A and Product B the same? Yes\n\n", "extensis corporation": "", "school zone": "Product A is title: time money & fractions grades 1-2. manufacturer: school zone. price: 9.99. Product B is title: time money & fractions grades 1-2 workbook & cd-rom. manufacturer: . price: 12.99. Are Product A and Product B the same? No\n\nProduct A is title: school zone flash action addition/subtraction ( windows/macintosh ). manufacturer: school zone. price: 12.99. Product B is title: school zone interactive flash action phonics made easy. manufacturer: . price: 8.8. Are Product A and Product B the same? No\n\nProduct A is title: school zone flash action addition/subtraction ( windows/macintosh ). manufacturer: school zone. price: 12.99. Product B is title: school zone flash action alphabet/numbers 1-100 ( pc & mac ) ( 08410 ). manufacturer: . price: 9.59. Are Product A and Product B the same? No\n\nProduct A is title: vocabulary puzzles grade 1. manufacturer: school zone. price: 14.95. Product B is title: school zone interactive vocabulary puzzles 1 on track software. manufacturer: . price: 9.45. Are Product A and Product B the same? Yes\n\n", "sony-pictures-home-entertainment": "Product A is title: vegas 6 for pc. manufacturer: sony-pictures-home-entertainment. price: 599.99. Product B is title: dramatica pro 4 for pc. manufacturer: . price: 249.99. Are Product A and Product B the same? No\n\nProduct A is title: vegas 6 for pc. manufacturer: sony-pictures-home-entertainment. price: 599.99. Product B is title: vegas + dvd sony. manufacturer: sony-pictures-digital-entertainment. price: 899.99. Are Product A and Product B the same? No\n\n", "quark": "Product A is title: quarkxpress 7 windows & macintosh ). manufacturer: quark. price: 899.0. Product B is title: quark 119700 quarkxpress 6.5 mac. manufacturer: quark. price: 736.99. Are Product A and Product B the same? No\n\nProduct A is title: quarkxpress 7 windows & macintosh ). manufacturer: quark. price: 899.0. Product B is title: quark xpress 7 ( no returns ). manufacturer: . price: 714.99. Are Product A and Product B the same? No\n\nProduct A is title: quarkxpress 7 windows & macintosh ). manufacturer: quark. price: 899.0. Product B is title: quark quarkxpress 7 upgrade for mac & windows. manufacturer: . price: 244.99. Are Product A and Product B the same? No\n\nProduct A is title: quarkxpress 7 for mac/win .. manufacturer: quark. price: 1289.42. Product B is title: quark xpress 7 ( no returns ). manufacturer: . price: 714.99. Are Product A and Product B the same? No\n\nProduct A is title: quarkxpress 7 windows & macintosh ). manufacturer: quark. price: 899.0. Product B is title: quark xpress 7 upgrade ( no returns ). manufacturer: . price: 239.99. Are Product A and Product B the same? No\n\n", "symantec corporation .": "", "strategy first": "Product A is title: great invasions. manufacturer: strategy first. price: 39.99. Product B is title: strategy first inc. great invasions. manufacturer: . price: 34.07. Are Product A and Product B the same? Yes\n\nProduct A is title: gods : lands of infinity special edition. manufacturer: strategy first. price: 29.99. Product B is title: reader rabbit kindergarten special 2-cd edition. manufacturer: . price: 12.95. Are Product A and Product B the same? No\n\nProduct A is title: alliance future combat. manufacturer: strategy first. price: 19.99. Product B is title: strategy first inc. alliance future combat. manufacturer: . price: 16.37. Are Product A and Product B the same? Yes\n\n", "power-on-software": "Product A is title: power on now up to date & contact windows. manufacturer: power-on-software. price: 129.99. Product B is title: powerhouse migo for ipod for windows. manufacturer: . price: 71.99. Are Product A and Product B the same? No\n\nProduct A is title: power on now up to date & contact windows. manufacturer: power-on-software. price: 129.99. Product B is title: now software nuw45e1k now up-to-date & contact for windows. manufacturer: power-on-software. price: 114.99. Are Product A and Product B the same? Yes\n\n", "vivendi": "Product A is title: world of warcraft pc. manufacturer: vivendi. price: 35.7. Product B is title: world of warcraft burning crusade. manufacturer: . price: 39.99. Are Product A and Product B the same? No\n\nProduct A is title: world of warcraft pc. manufacturer: vivendi. price: 35.7. Product B is title: vivendi-universal games inc 72305 world of warcraft 60 day prepaid. manufacturer: . price: 27.99. Are Product A and Product B the same? No\n\n", "bling": "Product A is title: podmaxx '07. manufacturer: bling. price: 29.99. Product B is title: podmaxx '07 eb carlson. manufacturer: . price: 29.99. Are Product A and Product B the same? Yes\n\nProduct A is title: safekeeper plus. manufacturer: bling. price: 39.99. Product B is title: bling software limited safekeeper plus. manufacturer: . price: 39.99. Are Product A and Product B the same? No\n\nProduct A is title: safekeeper plus. manufacturer: bling. price: 39.99. Product B is title: bling software limited safekeeper plus. manufacturer: . price: 34.75. Are Product A and Product B the same? Yes\n\n", "sos-aggregation-company": "Product A is title: web graphics creator. manufacturer: sos-aggregation-company. price: 39.95. Product B is title: enteractive inc fps creator. manufacturer: . price: 44.58. Are Product A and Product B the same? No\n\nProduct A is title: listen for ipod magnetic time. manufacturer: sos-aggregation-company. price: 39.99. Product B is title: iaudioize magnetic time for pc and mac. manufacturer: . price: 39.99. Are Product A and Product B the same? No\n\nProduct A is title: web graphics creator. manufacturer: sos-aggregation-company. price: 39.95. Product B is title: graphics software. manufacturer: . price: 99.92. Are Product A and Product B the same? No\n\nProduct A is title: listen for ipod magnetic time. manufacturer: sos-aggregation-company. price: 39.99. Product B is title: sos aggregation company iaudioize magnetic time. manufacturer: . price: 31.53. Are Product A and Product B the same? No\n\nProduct A is title: calculation skills v1 add & sub basic core learning. manufacturer: sos-aggregation-company. price: . Product B is title: sos aggregation company brain builder core learning. manufacturer: . price: 15.91. Are Product A and Product B the same? No\n\nProduct A is title: listen for ipod magnetic time. manufacturer: sos-aggregation-company. price: 39.99. Product B is title: listen for ipod magnetic time ( win 2000 xp ). manufacturer: sos-aggregation-company. price: 39.99. Are Product A and Product B the same? Yes\n\nProduct A is title: calculation skills v1 add & sub basic core learning. manufacturer: sos-aggregation-company. price: . Product B is title: sos aggregation company calculation skills v1 add & sub basic core learning. manufacturer: sos-aggregation-company. price: 15.91. Are Product A and Product B the same? Yes\n\n", "avanquest publishing usa inc.": "", "iris": "Product A is title: readiris pro 11 corporate edition ( mac ). manufacturer: iris. price: 499.95. Product B is title: iris inc readiris pro 11 corporate edition. manufacturer: . price: 430.58. Are Product A and Product B the same? Yes\n\nProduct A is title: irispen express translator. manufacturer: iris. price: 149.99. Product B is title: iris inc irispen express translator. manufacturer: . price: 142.29. Are Product A and Product B the same? Yes\n\nProduct A is title: readiris pro 11 corporate edition ( mac ). manufacturer: iris. price: 499.95. Product B is title: smart-sound sonicfire pro 4 corporate edition mac os x audio software for video. manufacturer: . price: 448.95. Are Product A and Product B the same? No\n\nProduct A is title: iriscard pro business card scanner. manufacturer: iris. price: 199.99. Product B is title: iris card pro. manufacturer: navarre distribution services. price: 198.87. Are Product A and Product B the same? Yes\n\nProduct A is title: readiris pro 11 corporate edition ( mac ). manufacturer: iris. price: 499.95. Product B is title: readiris pro 11 corporate edition for pc. manufacturer: . price: 349.99. Are Product A and Product B the same? No\n\nProduct A is title: iriscard pro business card scanner. manufacturer: iris. price: 199.99. Product B is title: encore inc printshop business card maker. manufacturer: riverdeep-learning-company. price: 8.01. Are Product A and Product B the same? No\n\nProduct A is title: iriscard pro business card scanner. manufacturer: iris. price: 199.99. Product B is title: belight business card composer. manufacturer: . price: 39.99. Are Product A and Product B the same? No\n\nProduct A is title: readiris pro 11 corporate edition ( mac ). manufacturer: iris. price: 499.95. Product B is title: iris readiris pro 11 mac ocr. manufacturer: . price: 109.99. Are Product A and Product B the same? No\n\nProduct A is title: iriscard pro business card scanner. manufacturer: iris. price: 199.99. Product B is title: iriscard pro. manufacturer: . price: 174.99. Are Product A and Product B the same? Yes\n\nProduct A is title: irispen express translator. manufacturer: iris. price: 149.99. Product B is title: iris inc irispen express translator. manufacturer: . price: 142.29. Are Product A and Product B the same? Yes\n\n", }, f"{DATASET_PATH}/data_imputation/Buy": "name: Transcend 8GB Compact Flash Card (133x) - TS8GCF133. description: Transcend 8GB CompactFlash Card (133x). Who is the manufacturer? TRANSCEND INFORMATION\n\nname: LG 42LG30 42' LCD TV. description: LG 42LG30 42' LCD HDTV - 12,000:1 Dynamic Contrast Ratio - Invisible Speaker. Who is the manufacturer? LG Electronics\n\nname: Speck Products SeeThru Case for Apple MacBook Air - MBA-PNK-SEE. description: Plastic - Pink. Who is the manufacturer? Speck Products\n\nname: Peerless Universal Tilt Wall Mount. description: Peerless Smart Mount ST660P Universal Tilt Wall Mount for 37' to 60' Screens (Black) up to 200lbs. Who is the manufacturer? Peerless\n\nname: Apple Time Capsule Network Hard Drive - MB277LL/A. description: 1TB - Type A USB. Who is the manufacturer? Apple\n\nname: Sirius SUPH1 Sirius Universal Home Kit. description: Sirius Satellite Radio Receiver. Who is the manufacturer? Sirius\n\nname: OmniMount TV Top Shelf Mount. description: OmniMount CCH1B Set-Top Center-Channel Shelf. Who is the manufacturer? OMNIMOUNT SYSTEMS, INC\n\nname: Monster Cable iFreePlay Cordless Headphone - AI SH HPHONE. description: Connectivity: Wireless - Stereo - Behind-the-neck. Who is the manufacturer? Monster\n\nname: Pure Digital Flip Mino Digital Camcorder - F360B. description: Flip Video Mino 60 min Black. Who is the manufacturer? Pure Digital Technol\n\nname: Elgato EyeTV Hybrid Analog/Digital TV Tuner Stick - 10020630. description: Elgato EyeTV Hybrid TV Tuner Stick for Analog and HDTV Reception - USB. manufacturer? ELGATO SYSTEMS\n", f"{DATASET_PATH}/data_imputation/Restaurant": "name: oceana. addr: 55 e. 54th st.. phone: 212/759-5941. type: seafood. What is the city? new york\n\nname: oceana. addr: 55 e. 54th st.. phone: 212-759-5941. type: seafood. What is the city? new york city\n", f"{DATASET_PATH}/error_detection/Hospital": { "EmergencyService": "Is there a x spelling error in EmergencyService: nan? No\n\nIs there a x spelling error in EmergencyService: yes? No\n\nIs there a x spelling error in EmergencyService: yes? No\n\nIs there a x spelling error in EmergencyService: yex? Yes\n", "City": "Is there a x spelling error in City: nan? No\n\nIs there a x spelling error in City: birmingham? No\n\nIs there a x spelling error in City: sheffield? No\n\nIs there a x spelling error in City: birminghxm? Yes\n", "Score": "Is there a x spelling error in Score: nan? No\n\nIs there a x spelling error in Score: 78%? No\n\nIs there a x spelling error in Score: 1xx%? Yes\n", "HospitalType": "Is there a x spelling error in HospitalType: nan? No\n\nIs there a x spelling error in HospitalType: acute care hospitals? No\n\nIs there a x spelling error in HospitalType: acute care hospitals? No\n\nIs there a x spelling error in HospitalType: acutexcarexhospitals? Yes\n", "CountyName": "Is there a x spelling error in CountyName: nan? No\n\nIs there a x spelling error in CountyName: calhoun? No\n\nIs there a x spelling error in CountyName: marion? No\n\nIs there a x spelling error in CountyName: xhambers? Yes\n", "MeasureName": "Is there a x spelling error in MeasureName: nan? No\n\nIs there a x spelling error in MeasureName: pneumonia patients given the most appropriate initial antibiotic(s)? No\n\nIs there a x spelling error in MeasureName: heart attack patients given aspirin at discharge? No\n\nIs there a x spelling error in MeasureName: allxheartxsurgeryxpatientsxwhosexbloodxsugarx(bloodxglucose)xisxkeptxunderxgoodxcontrolxinxthexdaysxrightxafterxsurgery? Yes\n", "HospitalName": "Is there a x spelling error in HospitalName: nan? No\n\nIs there a x spelling error in HospitalName: fayette medical center? No\n\nIs there a x spelling error in HospitalName: huntsville hospital? No\n\nIs there a x spelling error in HospitalName: medixal xenter enterprise? Yes\n", "Condition": "Is there a x spelling error in Condition: nan? No\n\nIs there a x spelling error in Condition: heart attack? No\n\nIs there a x spelling error in Condition: surgical infection prevention? No\n\nIs there a x spelling error in Condition: hearx axxack? Yes\n", "Stateavg": "Is there a x spelling error in Stateavg: nan? No\n\nIs there a x spelling error in Stateavg: al_pn-6? No\n\nIs there a x spelling error in Stateavg: al_scip-inf-3? No\n\nIs there a x spelling error in Stateavg: al_scipxinfx2? Yes\n\nIs there a x spelling error in Stateavg: al_ami-7a? No\n\nIs there a x spelling error in Stateavg: alxami-7a? Yes\n", "MeasureCode": "Is there a x spelling error in MeasureCode: nan? No\n\nIs there a x spelling error in MeasureCode: ami-7a? No\n\nIs there a x spelling error in MeasureCode: scip-inf-2? No\n\nIs there a x spelling error in MeasureCode: xax-1? Yes\n", "ZipCode": "Is there a x spelling error in ZipCode: nan? No\n\nIs there a x spelling error in ZipCode: 35640? No\n\nIs there a x spelling error in ZipCode: 36302? No\n\nIs there a x spelling error in ZipCode: x6x05? Yes\n", "Sample": "Is there a x spelling error in Sample: nan? No\n\nIs there a x spelling error in Sample: 13 patients? No\n\nIs there a x spelling error in Sample: 97 patients? No\n\nIs there a x spelling error in Sample: 4 xatients? Yes\n", "HospitalOwner": "Is there a x spelling error in HospitalOwner: nan? No\n\nIs there a x spelling error in HospitalOwner: proprietary? No\n\nIs there a x spelling error in HospitalOwner: government - hospital district or authority? No\n\nIs there a x spelling error in HospitalOwner: goverxmext - hospital district or authority? Yes\n", "Address1": "Is there a x spelling error in Address1: nan? No\n\nIs there a x spelling error in Address1: 126 hospital ave? No\n\nIs there a x spelling error in Address1: 2000 pepperell parkway? No\n\nIs there a x spelling error in Address1: 400xnxedwardsxstreet? Yes\n", "Address2": "Is there a x spelling error in Address2: nan? No\n\nIs there a x spelling error in Address2: ami-7a? No\n\nIs there a x spelling error in Address2: scip-inf-2? No\n\nIs there a x spelling error in Address2: xax-1? Yes\n", "Address3": "Is there a x spelling error in Address3: nan? No\n\nIs there a x spelling error in Address3: ami-7a? No\n\nIs there a x spelling error in Address3: scip-inf-2? No\n\nIs there a x spelling error in Address3: xax-1? Yes\n", "ProviderNumber": "Is there a x spelling error in ProviderNumber: nan? No\n\nIs there a x spelling error in ProviderNumber: 10005? No\n\nIs there a x spelling error in ProviderNumber: 10055? No\n\nIs there a x spelling error in ProviderNumber: 1xx32? Yes\n", "State": "Is there a x spelling error in State: nan? No\n\nIs there a x spelling error in State: al? No\n\nIs there a x spelling error in State: al? No\n\nIs there a x spelling error in State: ax? Yes\n", "PhoneNumber": "Is there a x spelling error in PhoneNumber: nan? No\n\nIs there a x spelling error in PhoneNumber: 3343762205? No\n\nIs there a x spelling error in PhoneNumber: 2562651000? No\n\nIs there a x spelling error in PhoneNumber: 2568453x50? Yes\n", }, }
fm_data_tasks-main
fm_data_tasks/utils/constants.py
"""Init."""
fm_data_tasks-main
fm_data_tasks/utils/__init__.py
"""Data utils.""" import logging from functools import partial from pathlib import Path from typing import Dict, List import pandas as pd from fm_data_tasks.utils import constants logger = logging.getLogger(__name__) def sample_train_data(train: pd.DataFrame, n_rows: int): """ Sample train data. Used when random sampling points for prompt. """ res = train.sample(n_rows) return res def serialize_row( row: pd.core.series.Series, column_map: Dict[str, str], sep_tok: str, nan_tok: str, ) -> str: """Turn structured row into string.""" res = [] for c_og, c_map in column_map.items(): if str(row[c_og]) == "nan": row[c_og] = nan_tok else: row[c_og] = f"{row[c_og].strip()}" res.append(f"{c_map}: {row[c_og]}".lstrip()) if len(sep_tok) > 0 and sep_tok != ".": sep_tok = f" {sep_tok}" return f"{sep_tok} ".join(res) def serialize_match_pair( row: pd.core.series.Series, column_mapA: Dict[str, str], column_mapB: Dict[str, str], add_instruction: bool, instruction: str, suffix: str, prod_name: str, sep_tok: str, nan_tok: str, ) -> str: """Turn structured pair of entities into string for matching.""" res = ( f"{prod_name} A is {serialize_row(row, column_mapA, sep_tok, nan_tok)}." f" {prod_name} B is {serialize_row(row, column_mapB, sep_tok, nan_tok)}." f"{suffix} " ) if add_instruction: res = f"{instruction} {res}" return res def serialize_imputation( row: pd.core.series.Series, column_map: Dict[str, str], impute_col: str, add_instruction: bool, instruction: str, suffix: str, sep_tok: str, nan_tok: str, ) -> str: """Turn single entity into string for imputation.""" assert impute_col not in column_map, f"{impute_col} cannot be in column map" # Rename to avoid passing white spaced sep token to serialize_row sep_tok_ws = sep_tok if len(sep_tok) > 0 and sep_tok != ".": sep_tok_ws = f" {sep_tok}" res = f"{serialize_row(row, column_map, sep_tok, nan_tok)}{sep_tok_ws}{suffix} " if add_instruction: res = f"{instruction} {res}" return res def serialize_error_detection_spelling( row: pd.core.series.Series, add_instruction: bool, instruction: str, suffix: str, sep_tok: str, nan_tok: str, ) -> str: """Turn single cell into string for error detection.""" column_map = {row["col_name"]: row["col_name"]} res = f"Is there a x spelling error in {serialize_row(row, column_map, sep_tok, nan_tok)}{suffix} " if add_instruction: res = f"{instruction} {res}" return res def serialize_error_detection( row: pd.core.series.Series, add_prefix: bool, instruction: str, suffix: str, sep_tok: str, nan_tok: str, ) -> str: """Turn single cell into string for error detection.""" column_map = { c: c for c in row.index if str(c) not in ["Unnamed: 0", "text", "col_name", "label_str", "is_clean"] } entire_row = serialize_row(row, column_map, sep_tok, nan_tok) column_map = {row["col_name"]: row["col_name"]} res = f"{entire_row}\n\nIs there an error in {serialize_row(row, column_map, sep_tok, nan_tok)}{suffix} " if add_prefix: res = f"{instruction} {res}" return res def serialize_schema_match( row: pd.core.series.Series, add_prefix: bool, instruction: str, suffix: str, sep_tok: str, nan_tok: str, ) -> str: """Turn single cell into string for schema matching.""" res = f"A is {row['left']}. B is {row['right']}. {suffix} " if add_prefix: res = f"{instruction}\n\n{res}" return res def read_blocked_pairs( split_path: str, tableA: pd.DataFrame, tableB: pd.DataFrame, cols_to_drop: List[str], col_renaming: Dict[str, str], add_instruction: bool, instruction: str, suffix: str, prod_name: str, sep_tok: str, nan_tok: str, ) -> pd.DataFrame: """Read in pre-blocked pairs with T/F match labels.""" for c in cols_to_drop: tableA = tableA.drop(c, axis=1, inplace=False) tableB = tableB.drop(c, axis=1, inplace=False) if len(col_renaming) > 0: tableA = tableA.rename(columns=col_renaming, inplace=False) tableB = tableB.rename(columns=col_renaming, inplace=False) column_mapA = {f"{c}_A": c for c in tableA.columns if c != "id"} column_mapB = {f"{c}_B": c for c in tableB.columns if c != "id"} labels = pd.read_csv(split_path) mergedA = pd.merge(labels, tableA, right_on="id", left_on="ltable_id") merged = pd.merge( mergedA, tableB, right_on="id", left_on="rtable_id", suffixes=("_A", "_B"), ) merged["text"] = merged.apply( lambda row: serialize_match_pair( row, column_mapA, column_mapB, add_instruction, instruction, suffix, prod_name, sep_tok, nan_tok, ), axis=1, ) merged["label_str"] = merged.apply( lambda row: "Yes\n" if row["label"] == 1 else "No\n", axis=1 ) return merged def read_imputation_single( split_path: str, impute_col: str, cols_to_drop: List[str], col_renaming: Dict[str, str], add_instruction: bool, instruction: str, suffix: str, sep_tok: str, nan_tok: str, ) -> pd.DataFrame: """Read in table and create label impute col.""" table = pd.read_csv(split_path) for c in cols_to_drop: table = table.drop(c, axis=1, inplace=False) if len(col_renaming) > 0: table = table.rename(columns=col_renaming, inplace=False) column_map = {c: c for c in table.columns if c != "id" and c != impute_col} table["text"] = table.apply( lambda row: serialize_imputation( row, column_map, impute_col, add_instruction, instruction, suffix, sep_tok, nan_tok, ), axis=1, ) table["label_str"] = table[impute_col].apply(lambda x: f"{x}\n") return table def read_error_detection_single( split_path: str, table: pd.DataFrame, cols_to_drop: List[str], col_renaming: Dict[str, str], add_instruction: bool, instruction: str, suffix: str, sep_tok: str, nan_tok: str, spelling: bool, ) -> pd.DataFrame: """Read in table and create label impute col.""" for c in cols_to_drop: table = table.drop(c, axis=1, inplace=False) if len(col_renaming) > 0: table = table.rename(columns=col_renaming, inplace=False) # row_id, col_name, is_clean labels = pd.read_csv(split_path) if spelling: merged = pd.merge(labels, table, left_on="row_id", right_index=True) merged["text"] = merged.apply( lambda row: serialize_error_detection_spelling( row, add_instruction, instruction, suffix, sep_tok, nan_tok, ), axis=1, ) else: merged = table merged["text"] = merged.apply( lambda row: serialize_error_detection( row, add_instruction, instruction, suffix, sep_tok, nan_tok, ), axis=1, ) merged["label_str"] = merged.apply( lambda row: "No\n" if row["is_clean"] == 1 else "Yes\n", axis=1 ) return merged def read_schema_match_single( split_path: str, table: pd.DataFrame, cols_to_drop: List[str], col_renaming: Dict[str, str], add_instruction: bool, instruction: str, suffix: str, sep_tok: str, nan_tok: str, ) -> pd.DataFrame: """Read in table and create label impute col.""" file = pd.read_csv(split_path) for c in cols_to_drop: file = file.drop(c, axis=1, inplace=False) if len(col_renaming) > 0: file = file.rename(columns=col_renaming, inplace=False) # row_id, col_name, is_clean # labels = pd.read_csv(split_path) # merged = pd.merge(labels, table, left_on="Unnamed: 0", right_index=True) file["text"] = file.apply( lambda row: serialize_schema_match( row, add_instruction, instruction, suffix, sep_tok, nan_tok, ), axis=1, ) file["label_str"] = file.apply( lambda row: "No\n" if row["label"] == 0 else "Yes\n", axis=1 ) return file def read_raw_data( data_dir: str, add_instruction: bool = False, task_instruction_idx: int = 0, sep_tok: str = ".", nan_tok: str = "nan", ): """Read in data where each directory is unique for a task.""" data_files_sep = {"test": {}, "train": {}, "validation": {}} logger.info(f"Processing {data_dir}") if data_dir not in constants.DATA2TASK: raise ValueError( f"{data_dir} not one of {constants.DATA2TASK.keys()}. Make sure to set DATASET_PATH." ) task = constants.DATA2TASK[data_dir] instruction = constants.DATA2INSTRUCT[data_dir] suffix = constants.DATA2SUFFIX[data_dir] cols_to_drop = constants.DATA2DROPCOLS[data_dir] col_renaming = constants.DATA2COLREMAP[data_dir] data_dir_p = Path(data_dir) if task == "entity_matching": train_file = data_dir_p / "train.csv" valid_file = data_dir_p / "valid.csv" test_file = data_dir_p / "test.csv" tableA_file = data_dir_p / "tableA.csv" tableB_file = data_dir_p / "tableB.csv" tableA = pd.read_csv(tableA_file) tableB = pd.read_csv(tableB_file) label_col = "label" read_data_func = partial( read_blocked_pairs, tableA=tableA, tableB=tableB, cols_to_drop=cols_to_drop, col_renaming=col_renaming, add_instruction=add_instruction, instruction=instruction, suffix=suffix, prod_name=constants.MATCH_PROD_NAME[data_dir], sep_tok=sep_tok, nan_tok=nan_tok, ) elif task == "data_imputation": train_file = data_dir_p / "train.csv" valid_file = data_dir_p / "valid.csv" test_file = data_dir_p / "test.csv" label_col = constants.IMPUTE_COLS[data_dir] read_data_func = partial( read_imputation_single, impute_col=label_col, cols_to_drop=cols_to_drop, col_renaming=col_renaming, add_instruction=add_instruction, instruction=instruction, suffix=suffix, sep_tok=sep_tok, nan_tok=nan_tok, ) elif task == "error_detection_spelling": train_file = data_dir_p / "train.csv" valid_file = data_dir_p / "valid.csv" test_file = data_dir_p / "test.csv" table_file = data_dir_p / "table.csv" table = pd.read_csv(table_file) label_col = "is_clean" read_data_func = partial( read_error_detection_single, table=table, cols_to_drop=cols_to_drop, col_renaming=col_renaming, add_instruction=add_instruction, instruction=instruction, suffix=suffix, sep_tok=sep_tok, nan_tok=nan_tok, spelling=True, ) elif task == "error_detection": train_file = data_dir_p / "train.csv" valid_file = data_dir_p / "valid.csv" test_file = data_dir_p / "test.csv" table_file = data_dir_p / "table.csv" table = pd.read_csv(table_file) label_col = "is_clean" read_data_func = partial( read_error_detection_single, table=table, cols_to_drop=cols_to_drop, col_renaming=col_renaming, add_instruction=add_instruction, instruction=instruction, suffix=suffix, sep_tok=sep_tok, nan_tok=nan_tok, spelling=False, ) elif task == "schema_matching": train_file = data_dir_p / "train.csv" valid_file = data_dir_p / "valid.csv" test_file = data_dir_p / "test.csv" table_file = data_dir_p / "table.csv" label_col = "label" table = pd.read_csv(table_file) read_data_func = partial( read_schema_match_single, table=table, cols_to_drop=cols_to_drop, col_renaming=col_renaming, add_instruction=add_instruction, instruction=instruction, suffix=suffix, sep_tok=sep_tok, nan_tok=nan_tok, ) else: raise ValueError(f"Task {task} not recognized.") data_files_sep["train"] = read_data_func(train_file) # Read validation if valid_file.exists(): data_files_sep["validation"] = read_data_func(valid_file) # Read test if test_file.exists(): data_files_sep["test"] = read_data_func(test_file) return data_files_sep, label_col def read_data( data_dir: str, class_balanced: bool = False, add_instruction: bool = False, task_instruction_idx: int = 0, max_train_samples: int = -1, max_train_percent: float = -1, sep_tok: str = ".", nan_tok: str = "nan", ): """Read in data where each directory is unique for a task.""" data_files_sep, label_col = read_raw_data( data_dir=data_dir, add_instruction=add_instruction, task_instruction_idx=task_instruction_idx, sep_tok=sep_tok, nan_tok=nan_tok, ) task = constants.DATA2TASK[data_dir] # Don't class balance on open ended classificiation tasks if class_balanced and task != "data_imputation": # Class balance sample the train data label_cnts = data_files_sep["train"].groupby(label_col).count() sample_per_class = label_cnts.min()["text"] logger.info(f"Class balanced: train sample per class: {sample_per_class}") data_files_sep["train"] = ( data_files_sep["train"] .groupby(label_col, group_keys=False) .apply(lambda x: x.sample(sample_per_class, random_state=42)) ) # Shuffle train data data_files_sep["train"] = ( data_files_sep["train"].sample(frac=1, random_state=42).reset_index(drop=True) ) if max_train_samples > 0: orig_train_len = len(data_files_sep["train"]) if max_train_samples > 1.0: raise ValueError("max_train_samples must be between 0 and 1") max_examples = int(max_train_samples * orig_train_len) data_files_sep["train"] = data_files_sep["train"].iloc[:max_examples] logger.info( f"Length of {data_dir} train is " f"{data_files_sep['train'].shape[0]} from {orig_train_len}" ) return data_files_sep
fm_data_tasks-main
fm_data_tasks/utils/data_utils.py