repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
Myyyr/segmentation
|
[
"6b9423e327cff1eb23599404031b7fb8e9ecf75d"
] |
[
"models/layers/loss.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.loss import _Loss\nfrom torch.autograd import Function, Variable\nimport numpy as np\ndef cross_entropy_2D(input, target, weight=None, size_average=True):\n n, c, h, w = input.size()\n log_p = F.log_softmax(input, dim=1)\n log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)\n target = target.view(target.numel())\n loss = F.nll_loss(log_p, target, weight=weight, size_average=False)\n if size_average:\n loss /= float(target.numel())\n return loss\n\n\ndef cross_entropy_3D(input, target, weight=None, size_average=True):\n n, c, h, w, s = input.size()\n log_p = F.log_softmax(input, dim=1)\n log_p = log_p.transpose(1, 2).transpose(2, 3).transpose(3, 4).contiguous().view(-1, c)\n target = target.view(target.numel())\n loss = F.nll_loss(log_p, target, weight=weight, size_average=False)\n if size_average:\n loss /= float(target.numel())\n return loss\n\n\nclass SoftDiceLoss(nn.Module):\n def __init__(self, n_classes):\n super(SoftDiceLoss, self).__init__()\n self.one_hot_encoder = One_Hot(n_classes).forward\n self.n_classes = n_classes\n\n def forward(self, input, target):\n smooth = 0.01\n batch_size = input.size(0)\n\n input = F.softmax(input, dim=1)\n # print(\"In Loss Sum 0 :\",np.sum(input.cpu().detach().numpy()[:,0,...]))\n # print(\"In Loss Sum 1 :\",np.sum(input.cpu().detach().numpy()[:,1,...]))\n input = input.view(batch_size, self.n_classes, -1)\n target = self.one_hot_encoder(target).contiguous().view(batch_size, self.n_classes, -1)\n\n inter = torch.sum(input * target, 2) + smooth\n union = torch.sum(input, 2) + torch.sum(target, 2) + smooth\n\n score = torch.sum(2.0 * inter / union)\n score = 1.0 - score / (float(batch_size) * float(self.n_classes))\n\n return score\n\n\nclass CustomSoftDiceLoss(nn.Module):\n def __init__(self, n_classes, class_ids):\n super(CustomSoftDiceLoss, self).__init__()\n self.one_hot_encoder = One_Hot(n_classes).forward\n self.n_classes = n_classes\n self.class_ids = class_ids\n\n def forward(self, input, target):\n smooth = 0.01\n batch_size = input.size(0)\n\n input = F.softmax(input[:,self.class_ids], dim=1).view(batch_size, len(self.class_ids), -1)\n target = self.one_hot_encoder(target).contiguous().view(batch_size, self.n_classes, -1)\n target = target[:, self.class_ids, :]\n\n inter = torch.sum(input * target, 2) + smooth\n union = torch.sum(input, 2) + torch.sum(target, 2) + smooth\n\n score = torch.sum(2.0 * inter / union)\n score = 1.0 - score / (float(batch_size) * float(self.n_classes))\n\n return score\n\n\nclass One_Hot(nn.Module):\n def __init__(self, depth):\n super(One_Hot, self).__init__()\n self.depth = depth\n self.ones = torch.sparse.torch.eye(depth).cuda()\n\n def forward(self, X_in):\n n_dim = X_in.dim()\n output_size = X_in.size() + torch.Size([self.depth])\n num_element = X_in.numel()\n X_in = X_in.data.long().view(num_element)\n out = Variable(self.ones.index_select(0, X_in)).view(output_size)\n return out.permute(0, -1, *range(1, n_dim)).squeeze(dim=2).float()\n\n def __repr__(self):\n return self.__class__.__name__ + \"({})\".format(self.depth)\n\n\nif __name__ == '__main__':\n from torch.autograd import Variable\n depth=3\n batch_size=2\n encoder = One_Hot(depth=depth).forward\n y = Variable(torch.LongTensor(batch_size, 1, 1, 2 ,2).random_() % depth).cuda() # 4 classes,1x3x3 img\n y_onehot = encoder(y)\n x = Variable(torch.randn(y_onehot.size()).float())#.cuda()\n dicemetric = SoftDiceLoss(n_classes=depth)\n dicemetric(x,y)"
] |
[
[
"torch.Size",
"torch.nn.functional.log_softmax",
"torch.LongTensor",
"torch.sparse.torch.eye",
"torch.nn.functional.softmax",
"torch.nn.functional.nll_loss",
"torch.sum"
]
] |
cmuartfab/handtrack-evaluation
|
[
"e98d2c553b50ded19235e76f7624949ac18ab3ee"
] |
[
"label_tool/labeler.py"
] |
[
"# Labeling tool for creating the hand tracking dataset for Dranimate\n#\n#\n# Program reads images from a given directory and lets the user draw points on\n# the image with their mouse. These selected (x,y) coordinates are then saved\n# into a text file in a user specified output directory. The program stores all\n# the text files in a directory called labels.\n#\n# To run in command line: \n# python labeler.py --input <InputDir> --output <OutputDir>\n# Ex. python labeler.py --input <path/to/images/> --output <path/>\n#\n# Press 'd' to move on to the next image\n# Press 'esc' to quit the program\n#\n# The data is stored in textfile as the (x,y) coordinates of the fingers in this order\n# \t(x1,y1) => pinky\n# \t(x2,y2) => ring\n# \t(x3,y3) => middle\n# \t(x4,y4) => index\n# \t(x5,y5) => thumb\n\nimport cv2\nimport numpy as np\nimport glob\nimport argparse\nimport os\n\n### Mouse event to save x,y pixel coordinates ###\ndef savePixelCoordinate(event, x, y, flags, param):\n\tglobal points\n\tif event == cv2.EVENT_LBUTTONDOWN:\n\t\tcv2.circle(img,(x,y),5,(255,0,0),-1)\n\t\tpoints.append((x,y))\n\t\tprint(points)\n\n### Display the image ###\ndef displayImage(img):\n\tglobal points\n\tquit = False\n\n\twhile(1):\n\t\t# Show the image in a new window\n\t\tcv2.imshow('image', img)\n\n\t\t# Wait for a key press\n\t\tk = cv2.waitKey(1) & 0xFF\n\t\tif k == ord('d'): # Press d for done\n\t\t\tbreak\n\t\telif k == 27:\n\t\t\tquit = True\n\t\t\tbreak\n\n\t# Destroy the window\n\tcv2.destroyAllWindows()\n\treturn quit\n\n#### MAIN ####\nparser = argparse.ArgumentParser()\nparser.add_argument('--input', help='image input directory')\nparser.add_argument('--output', help='textfile output directory')\nargs = parser.parse_args()\n\n# Create output directory\noutDirectory = args.output + \"labels/\"\nif not os.path.exists(outDirectory):\n os.makedirs(outDirectory)\n\npoints = [];\n\nfor imgPath in glob.glob(args.input + \"*.jpg\"):\n\t# Read the image using the path\n\timg = cv2.imread(imgPath)\n\tcv2.namedWindow('image')\n\n\t# Intialize mouse callback\n\tcv2.setMouseCallback('image', savePixelCoordinate)\n\n\t# Show image in a new window\n\tdone = displayImage(img)\n\n\t# Check if we can quit the program\n\tif done:\n\t\tbreak\n\n\t# Save points to text file\n\tfileName = os.path.basename(imgPath)\n\tfileName = os.path.splitext(fileName)[0]\n\tnp.savetxt(outDirectory + fileName + '.txt', points, fmt='%i')\n\n\t# Reset points for next image\n\tpoints = [];\n\nprint('bye bye!')\n"
] |
[
[
"numpy.savetxt"
]
] |
haofengsiji/synthetic-to-real-semantic-segmentation
|
[
"048b18871b07052d67f5356e6b31218c2f85b29a",
"048b18871b07052d67f5356e6b31218c2f85b29a"
] |
[
"modeling/backbone/mobilenet.py",
"val.py"
] |
[
"import os\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport math\nfrom modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d\nimport torch.utils.model_zoo as model_zoo\n\ndef conv_bn(inp, oup, stride, BatchNorm):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n BatchNorm(oup),\n nn.ReLU6(inplace=True)\n )\n\n\ndef fixed_padding(inputs, kernel_size, dilation):\n kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))\n return padded_inputs\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, dilation, expand_ratio, BatchNorm):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = round(inp * expand_ratio)\n self.use_res_connect = self.stride == 1 and inp == oup\n self.kernel_size = 3\n self.dilation = dilation\n\n if expand_ratio == 1:\n self.conv = nn.Sequential(\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False),\n BatchNorm(hidden_dim),\n nn.ReLU6(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, 1, 1, bias=False),\n BatchNorm(oup),\n )\n else:\n self.conv = nn.Sequential(\n # pw\n nn.Conv2d(inp, hidden_dim, 1, 1, 0, 1, bias=False),\n BatchNorm(hidden_dim),\n nn.ReLU6(inplace=True),\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False),\n BatchNorm(hidden_dim),\n nn.ReLU6(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, 1, bias=False),\n BatchNorm(oup),\n )\n\n def forward(self, x):\n x_pad = fixed_padding(x, self.kernel_size, dilation=self.dilation)\n if self.use_res_connect:\n x = x + self.conv(x_pad)\n else:\n x = self.conv(x_pad)\n return x\n\n\nclass MobileNetV2(nn.Module):\n def __init__(self, output_stride=8, BatchNorm=None, width_mult=1., pretrained=True):\n super(MobileNetV2, self).__init__()\n block = InvertedResidual\n input_channel = 32\n current_stride = 1\n rate = 1\n interverted_residual_setting = [\n # t, c, n, s\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n ]\n\n # building first layer\n input_channel = int(input_channel * width_mult)\n self.features = [conv_bn(3, input_channel, 2, BatchNorm)]\n current_stride *= 2\n # building inverted residual blocks\n for t, c, n, s in interverted_residual_setting:\n if current_stride == output_stride:\n stride = 1\n dilation = rate\n rate *= s\n else:\n stride = s\n dilation = 1\n current_stride *= s\n output_channel = int(c * width_mult)\n for i in range(n):\n if i == 0:\n self.features.append(block(input_channel, output_channel, stride, dilation, t, BatchNorm))\n else:\n self.features.append(block(input_channel, output_channel, 1, dilation, t, BatchNorm))\n input_channel = output_channel\n self.features = nn.Sequential(*self.features)\n self._initialize_weights()\n\n if pretrained:\n self._load_pretrained_model()\n\n self.low_level_features = self.features[0:4]\n self.high_level_features = self.features[4:]\n\n def forward(self, x):\n low_level_feat = self.low_level_features(x)\n x = self.high_level_features(low_level_feat)\n return x, low_level_feat\n\n def _load_pretrained_model(self):\n pretrain_dict = torch.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),'./mobilenet_VOC.pth'))\n model_dict = {}\n state_dict = self.state_dict()\n for k, v in pretrain_dict.items():\n if k in state_dict:\n model_dict[k] = v\n state_dict.update(model_dict)\n self.load_state_dict(state_dict)\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, math.sqrt(2. / n))\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\nif __name__ == \"__main__\":\n input = torch.rand(1, 3, 512, 512)\n model = MobileNetV2(output_stride=16, BatchNorm=nn.BatchNorm2d)\n output, low_level_feat = model(input)\n print(output.size())\n print(low_level_feat.size())",
"import os\nfrom tqdm import tqdm\nimport argparse\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom modeling.backbone.mobilenet import MobileNetV2\nfrom modeling.assp import ASPP\nfrom modeling.domian import DomainClassifer\nfrom modeling.decoder import Decoder\nfrom modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d\nfrom modeling.sync_batchnorm.replicate import patch_replication_callback\nfrom utils.metrics import Evaluator\nfrom utils.loss import SegmentationLosses,DomainLosses\nfrom utils.lr_scheduler import LR_Scheduler\nfrom utils.calculate_weights import calculate_weigths_labels\n\nfrom PIL import Image\nfrom dataloders import make_data_loader\n\nclass Trainer(object):\n def __init__(self, args):\n self.args = args\n\n\n # Define Dataloader\n kwargs = {'num_workers': args.workers, 'pin_memory': True}\n self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)\n\n # Define network\n if args.sync_bn == True:\n BN = SynchronizedBatchNorm2d\n else:\n BN = nn.BatchNorm2d\n ### deeplabV3 start ###\n self.backbone_model = MobileNetV2(output_stride = args.out_stride,\n BatchNorm = BN)\n self.assp_model = ASPP(backbone = args.backbone,\n output_stride = args.out_stride,\n BatchNorm = BN)\n self.y_model = Decoder(num_classes = self.nclass,\n backbone = args.backbone,\n BatchNorm = BN)\n ### deeplabV3 end ###\n self.d_model = DomainClassifer(backbone = args.backbone,\n BatchNorm = BN)\n f_params = list(self.backbone_model.parameters()) + list(self.assp_model.parameters())\n y_params = list(self.y_model.parameters())\n d_params = list(self.d_model.parameters())\n\n # Define Optimizer\n if args.optimizer == 'SGD':\n self.task_optimizer = torch.optim.SGD(f_params+y_params, lr= args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay, nesterov=args.nesterov)\n self.d_optimizer = torch.optim.SGD(d_params, lr= args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay, nesterov=args.nesterov)\n self.d_inv_optimizer = torch.optim.SGD(f_params, lr= args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay, nesterov=args.nesterov)\n self.c_optimizer = torch.optim.SGD(f_params+y_params, lr= args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay, nesterov=args.nesterov)\n elif args.optimizer == 'Adam':\n self.task_optimizer = torch.optim.Adam(f_params + y_params, lr=args.lr)\n self.d_optimizer = torch.optim.Adam(d_params, lr=args.lr)\n self.d_inv_optimizer = torch.optim.Adam(f_params, lr=args.lr)\n self.c_optimizer = torch.optim.Adam(f_params+y_params, lr=args.lr)\n else:\n raise NotImplementedError\n\n # Define Criterion\n # whether to use class balanced weights\n if args.use_balanced_weights:\n classes_weights_path = 'dataloders\\\\datasets\\\\'+args.dataset + '_classes_weights.npy'\n if os.path.isfile(classes_weights_path):\n weight = np.load(classes_weights_path)\n else:\n weight = calculate_weigths_labels(self.train_loader, self.nclass, classes_weights_path, self.args.dataset)\n weight = torch.from_numpy(weight.astype(np.float32))\n else:\n weight = None\n self.task_loss = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)\n self.domain_loss = DomainLosses(cuda=args.cuda).build_loss()\n self.ca_loss = ''\n\n # Define Evaluator\n self.evaluator = Evaluator(self.nclass)\n\n # Define lr scheduler\n self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr,\n args.epochs, len(self.train_loader))\n\n # Using cuda\n if args.cuda:\n self.backbone_model = torch.nn.DataParallel(self.backbone_model, device_ids=self.args.gpu_ids)\n self.assp_model = torch.nn.DataParallel(self.assp_model, device_ids=self.args.gpu_ids)\n self.y_model = torch.nn.DataParallel(self.y_model, device_ids=self.args.gpu_ids)\n self.d_model = torch.nn.DataParallel(self.d_model, device_ids=self.args.gpu_ids)\n patch_replication_callback(self.backbone_model)\n patch_replication_callback(self.assp_model)\n patch_replication_callback(self.y_model)\n patch_replication_callback(self.d_model)\n self.backbone_model = self.backbone_model.cuda()\n self.assp_model = self.assp_model.cuda()\n self.y_model = self.y_model.cuda()\n self.d_model = self.d_model.cuda()\n\n # Resuming checkpoint\n self.best_pred = 0.0\n if args.resume is not None:\n if not os.path.isfile(args.resume):\n raise RuntimeError(\"=> no checkpoint found at '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n if args.cuda:\n self.backbone_model.module.load_state_dict(checkpoint['backbone_model_state_dict'])\n self.assp_model.module.load_state_dict(checkpoint['assp_model_state_dict'])\n self.y_model.module.load_state_dict(checkpoint['y_model_state_dict'])\n self.d_model.module.load_state_dict(checkpoint['d_model_state_dict'])\n else:\n self.backbone_model.load_state_dict(checkpoint['backbone_model_state_dict'])\n self.assp_model.load_state_dict(checkpoint['assp_model_state_dict'])\n self.y_model.load_state_dict(checkpoint['y_model_state_dict'])\n self.d_model.load_state_dict(checkpoint['d_model_state_dict'])\n if not args.ft:\n self.task_optimizer.load_state_dict(checkpoint['task_optimizer'])\n self.d_optimizer.load_state_dict(checkpoint['d_optimizer'])\n self.d_inv_optimizer.load_state_dict(checkpoint['d_inv_optimizer'])\n self.c_optimizer.load_state_dict(checkpoint['c_optimizer'])\n if self.args.dataset == 'gtav':\n self.best_pred = checkpoint['best_pred']\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n\n # Clear start epoch if fine-tuning\n if args.ft:\n args.start_epoch = 0\n\n\n\n def validation(self, epoch):\n self.backbone_model.eval()\n self.assp_model.eval()\n self.y_model.eval()\n self.d_model.eval()\n self.evaluator.reset()\n tbar = tqdm(self.val_loader, desc='\\r')\n test_loss = 0.0\n for i, sample in enumerate(tbar):\n image, target = sample['image'], sample['label']\n if self.args.cuda:\n image, target = image.cuda(), target.cuda()\n with torch.no_grad():\n high_feature, low_feature = self.backbone_model(image)\n high_feature = self.assp_model(high_feature)\n output = F.interpolate(self.y_model(high_feature, low_feature), image.size()[2:], \\\n mode='bilinear', align_corners=True)\n task_loss = self.task_loss(output, target)\n test_loss += task_loss.item()\n tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))\n pred = output.data.cpu().numpy()\n target = target.cpu().numpy()\n pred = np.argmax(pred, axis=1)\n # Add batch sample into evaluator\n self.evaluator.add_batch(target, pred)\n\n # Fast test during the training\n Acc = self.evaluator.Pixel_Accuracy()\n Acc_class = self.evaluator.Pixel_Accuracy_Class()\n mIoU,IoU = self.evaluator.Mean_Intersection_over_Union()\n FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()\n ClassName = [\"road\",\n \"sidewalk\",\n \"building\",\n \"wall\",\n \"fence\",\n \"pole\",\n \"light\",\n \"sign\",\n \"vegetation\",\n \"terrain\",\n \"sky\",\n \"person\",\n \"rider\",\n \"car\",\n \"truck\",\n \"bus\",\n \"train\",\n \"motocycle\",\n \"bicycle\"]\n with open('val_info.txt','a') as f1:\n f1.write('Validation:'+'\\n')\n f1.write('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]) + '\\n')\n f1.write(\"Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, FWIoU) + '\\n')\n f1.write('Loss: %.3f' % test_loss + '\\n' + '\\n')\n f1.write('Class IOU: ' + '\\n')\n for idx in range(19):\n f1.write('\\t' + ClassName[idx] + (': \\t' if len(ClassName[idx])>5 else ': \\t\\t') + str(IoU[idx]) + '\\n')\n\n print('Validation:')\n print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))\n print(\"Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, FWIoU))\n print('Loss: %.3f' % test_loss)\n print(IoU)\n\n new_pred = mIoU\n \n\n def imgsaver(self, img, imgname, miou):\n im1 = np.uint8(img.transpose(1,2,0)).squeeze()\n #filename_list = sorted(os.listdir(self.args.test_img_root))\n\n valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]\n class_map = dict(zip(range(19), valid_classes))\n im1_np = np.uint8(np.zeros([513,513]))\n for _validc in range(19):\n im1_np[im1 == _validc] = class_map[_validc]\n saveim1 = Image.fromarray(im1_np, mode='L')\n saveim1 = saveim1.resize((1280,640), Image.NEAREST)\n # saveim1.save('result_val/'+imgname)\n\n palette = [[128,64,128],\n [244,35,232],\n [70,70,70],\n [102,102,156],\n [190,153,153],\n [153,153,153],\n [250,170,30],\n [220,220,0],\n [107,142,35],\n [152,251,152],\n [70,130,180],\n [220,20,60],\n [255,0,0],\n [0,0,142],\n [0,0,70],\n [0,60,100],\n [0,80,100],\n [0,0,230],\n [119,11,32]]\n #[0,0,0]]\n class_color_map = dict(zip(range(19), palette))\n im2_np = np.uint8(np.zeros([513,513,3]))\n for _validc in range(19):\n im2_np[im1 == _validc] = class_color_map[_validc]\n saveim2 = Image.fromarray(im2_np)\n saveim2 = saveim2.resize((1280,640), Image.NEAREST)\n saveim2.save('result_val/'+imgname[:-4]+'_color_'+str(miou)+'_.png')\n # print('saving: '+filename_list[idx])\n\n\n def validationSep(self, epoch):\n self.backbone_model.eval()\n self.assp_model.eval()\n self.y_model.eval()\n self.d_model.eval()\n self.evaluator.reset()\n tbar = tqdm(self.val_loader, desc='\\r')\n test_loss = 0.0\n for i, sample in enumerate(tbar):\n image, target = sample['image'], sample['label']\n self.evaluator.reset()\n if self.args.cuda:\n image, target = image.cuda(), target.cuda()\n with torch.no_grad():\n high_feature, low_feature = self.backbone_model(image)\n high_feature = self.assp_model(high_feature)\n output = F.interpolate(self.y_model(high_feature, low_feature), image.size()[2:], \\\n mode='bilinear', align_corners=True)\n task_loss = self.task_loss(output, target)\n test_loss += task_loss.item()\n tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))\n pred = output.data.cpu().numpy()\n target = target.cpu().numpy()\n pred = np.argmax(pred, axis=1)\n # Add batch sample into evaluator\n self.evaluator.add_batch(target, pred)\n mIoU,IoU = self.evaluator.Mean_Intersection_over_Union()\n self.imgsaver(pred, sample['name'][0], mIoU)\n \n\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Deeplab_Wild Training\")\n parser.add_argument('--backbone', type=str, default='mobilenet',\n choices=['mobilenet'],\n help='backbone name (default: mobilenet)')\n parser.add_argument('--out-stride', type=int, default=16,\n help='network output stride (default: 16)')\n parser.add_argument('--dataset', type=str, default='gtav2cityscapes',\n choices=['gtav2cityscapes','gtav'],\n help='dataset name (default: gtav2cityscapes)')\n # path to the training dataset\n parser.add_argument('--src_img_root', type=str, default='/home/yaojy/DeepLearningProject/data/GTA_V/train_img',\n help='path to the source training images')\n parser.add_argument('--src_label_root', type=str, default='/home/yaojy/DeepLearningProject/data/GTA_V/train_label',\n help='path to the source training labels')\n parser.add_argument('--tgt_img_root', type=str, default='/home/yaojy/DeepLearningProject/data/CItyscapes/train_img',\n help='path to the target training images')\n # path to the validation dataset\n parser.add_argument('--val_img_root', type=str, default='/home/yaojy/DeepLearningProject/data/CItyscapes/train_img',\n help='path to the validation training images')\n parser.add_argument('--val_label_root', type=str, default='/home/yaojy/DeepLearningProject/data/CItyscapes/val_label',\n help='path to the validation training labels')\n # path to the test dataset\n parser.add_argument('--test_img_root', type=str, default='/home/yaojy/DeepLearningProject/data/CItyscapes/test_img',\n help='path to the test training images')\n parser.add_argument('--test_label_root', type=str, default='',\n help='path to the test training labels')\n parser.add_argument('--workers', type=int, default=4,\n metavar='N', help='dataloader threads')\n parser.add_argument('--base-size', type=int, default=513,\n help='base image size')\n parser.add_argument('--crop-size', type=int, default=513,\n help='crop image size')\n parser.add_argument('--sync-bn', type=bool, default=None,\n help='whether to use sync bn (default: auto)')\n parser.add_argument('--freeze-bn', type=bool, default=False,\n help='whether to freeze bn parameters (default: False)')\n parser.add_argument('--loss-type', type=str, default='ce',\n choices=['ce', 'focal'],\n help='loss func type (default: ce)')\n parser.add_argument('--no_d_loss', type=bool, default=False,\n help='whether to use domain transfer loss(default: False)')\n # training hyper params\n parser.add_argument('--epochs', type=int, default=200, metavar='N',\n help='number of epochs to train (default: auto)')\n parser.add_argument('--optimizer', type=str, default='Adam',\n choices = ['SGD','Adam'],\n help='the method of optimizer (default: SGD)')\n parser.add_argument('--start_epoch', type=int, default=0,\n metavar='N', help='start epochs (default:0)')\n parser.add_argument('--batch-size', type=int, default=4,\n metavar='N', help='input batch size for \\\n training (default: auto)')\n parser.add_argument('--test-batch-size', type=int, default=1,\n metavar='N', help='input batch size for \\\n testing (default: auto)')\n # optimizer params\n parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',\n help='learning rate (default: auto)')\n parser.add_argument('--lr-scheduler', type=str, default='poly',\n choices=['poly', 'step', 'cos'],\n help='lr scheduler mode: (default: poly)')\n parser.add_argument('--momentum', type=float, default=0.9,\n metavar='M', help='momentum (default: 0.9)')\n parser.add_argument('--weight-decay', type=float, default=5e-4,\n metavar='M', help='w-decay (default: 5e-4)')\n parser.add_argument('--nesterov', action='store_true', default=False,\n help='whether use nesterov (default: False)')\n parser.add_argument('--use_balanced_weights', action='store_true', default=False,\n help='whether use balanced weights (default: True)')\n # cuda, seed and logging\n parser.add_argument('--no-cuda', action='store_true', default=\n False, help='disables CUDA training')\n parser.add_argument('--gpu-ids', type=str, default='0',\n help='use which gpu to train, must be a \\\n comma-separated list of integers only (default=0)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n # checking point\n parser.add_argument('--resume', type=str, default='/home/zhengfang/proj/synthetic-to-real-semantic-segmentation/run/gtav/deeplab-mobilenet/experiment_0/checkpoint.pth.tar',\n help='put the path to resuming file if needed')\n parser.add_argument('--checkname', type=str, default=None,\n help='set the checkpoint name')\n # finetuning pre-trained models\n parser.add_argument('--ft', action='store_true', default=True,\n help='finetuning on a different dataset')\n # evaluation option\n parser.add_argument('--eval-interval', type=int, default=1,\n help='evaluuation interval (default: 1)')\n parser.add_argument('--no-val', action='store_true', default=False,\n help='skip validation during training')\n args = parser.parse_args()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n if args.cuda:\n try:\n args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]\n except ValueError:\n raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')\n\n if args.sync_bn is None:\n if args.cuda and len(args.gpu_ids) > 1:\n args.sync_bn = True\n else:\n args.sync_bn = False\n\n # default settings for epochs, batch_size and lr\n if args.epochs is None:\n epoches = {\n 'gtav2cityscapes': 200,\n 'gtav':200\n }\n args.epochs = epoches[args.dataset.lower()]\n\n if args.batch_size is None:\n args.batch_size = 4 * len(args.gpu_ids)\n\n if args.test_batch_size is None:\n args.test_batch_size = args.batch_size\n\n if args.lr is None:\n lrs = {\n 'gtav2cityscapes': 0.001,\n 'gtav':0.001\n }\n args.lr = lrs[args.dataset.lower()] / (4 * len(args.gpu_ids)) * args.batch_size\n\n if args.checkname is None:\n args.checkname = 'deeplab-' + str(args.backbone)\n print(args)\n torch.manual_seed(args.seed)\n trainer = Trainer(args)\n trainer.validationSep(0)\n trainer.validation(0)\n\n\n\n\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.rand",
"torch.nn.Sequential",
"torch.nn.init.kaiming_normal_",
"torch.nn.ReLU6",
"torch.nn.Conv2d",
"torch.nn.functional.pad"
],
[
"numpy.zeros",
"torch.no_grad",
"torch.optim.SGD",
"torch.optim.Adam",
"numpy.load",
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.argmax",
"torch.load",
"torch.nn.DataParallel"
]
] |
veb-101/Deeplearning-Specialization-Coursera
|
[
"19efc9ef30133960b96a65c7babaaeeee19a34f6"
] |
[
"Course 2 - Improving Deep Neural Networks/week 1/Assignment 1/init_utils.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport sklearn\nimport sklearn.datasets\n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(x)\n \"\"\"\n s = 1/(1+np.exp(-x))\n return s\n\ndef relu(x):\n \"\"\"\n Compute the relu of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- relu(x)\n \"\"\"\n s = np.maximum(0,x)\n \n return s\n\ndef forward_propagation(X, parameters):\n \"\"\"\n Implements the forward propagation (and computes the loss) presented in Figure 2.\n \n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat)\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n W1 -- weight matrix of shape ()\n b1 -- bias vector of shape ()\n W2 -- weight matrix of shape ()\n b2 -- bias vector of shape ()\n W3 -- weight matrix of shape ()\n b3 -- bias vector of shape ()\n \n Returns:\n loss -- the loss function (vanilla logistic loss)\n \"\"\"\n \n # retrieve parameters\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n \n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n z1 = np.dot(W1, X) + b1\n a1 = relu(z1)\n z2 = np.dot(W2, a1) + b2\n a2 = relu(z2)\n z3 = np.dot(W3, a2) + b3\n a3 = sigmoid(z3)\n \n cache = (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3)\n \n return a3, cache\n\ndef backward_propagation(X, Y, cache):\n \"\"\"\n Implement the backward propagation presented in figure 2.\n \n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat)\n cache -- cache output from forward_propagation()\n \n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n m = X.shape[1]\n (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3) = cache\n \n dz3 = 1./m * (a3 - Y)\n dW3 = np.dot(dz3, a2.T)\n db3 = np.sum(dz3, axis=1, keepdims = True)\n \n da2 = np.dot(W3.T, dz3)\n dz2 = np.multiply(da2, np.int64(a2 > 0))\n dW2 = np.dot(dz2, a1.T)\n db2 = np.sum(dz2, axis=1, keepdims = True)\n \n da1 = np.dot(W2.T, dz2)\n dz1 = np.multiply(da1, np.int64(a1 > 0))\n dW1 = np.dot(dz1, X.T)\n db1 = np.sum(dz1, axis=1, keepdims = True)\n \n gradients = {\"dz3\": dz3, \"dW3\": dW3, \"db3\": db3,\n \"da2\": da2, \"dz2\": dz2, \"dW2\": dW2, \"db2\": db2,\n \"da1\": da1, \"dz1\": dz1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients\n\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters \n grads -- python dictionary containing your gradients, output of n_model_backward\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n parameters['W' + str(i)] = ... \n parameters['b' + str(i)] = ...\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for k in range(L):\n parameters[\"W\" + str(k+1)] = parameters[\"W\" + str(k+1)] - learning_rate * grads[\"dW\" + str(k+1)]\n parameters[\"b\" + str(k+1)] = parameters[\"b\" + str(k+1)] - learning_rate * grads[\"db\" + str(k+1)]\n \n return parameters\n\ndef compute_loss(a3, Y):\n \n \"\"\"\n Implement the loss function\n \n Arguments:\n a3 -- post-activation, output of forward propagation\n Y -- \"true\" labels vector, same shape as a3\n \n Returns:\n loss - value of the loss function\n \"\"\"\n \n m = Y.shape[1]\n logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)\n loss = 1./m * np.nansum(logprobs)\n \n return loss\n\ndef load_cat_dataset():\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n train_set_x_orig = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\n test_set_x_orig = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n \n train_set_x = train_set_x_orig/255\n test_set_x = test_set_x_orig/255\n\n return train_set_x, train_set_y, test_set_x, test_set_y, classes\n\n\ndef predict(X, y, parameters):\n \"\"\"\n This function is used to predict the results of a n-layer neural network.\n \n Arguments:\n X -- data set of examples you would like to label\n parameters -- parameters of the trained model\n \n Returns:\n p -- predictions for the given dataset X\n \"\"\"\n \n m = X.shape[1]\n p = np.zeros((1,m), dtype = np.int)\n \n # Forward propagation\n a3, caches = forward_propagation(X, parameters)\n \n # convert probas to 0/1 predictions\n for i in range(0, a3.shape[1]):\n if a3[0,i] > 0.5:\n p[0,i] = 1\n else:\n p[0,i] = 0\n\n # print results\n print(\"Accuracy: \" + str(np.mean((p[0,:] == y[0,:]))))\n \n return p\n\ndef plot_decision_boundary(model, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(X[0, :], X[1, :], c=np.reshape(y, -1), cmap=plt.cm.Spectral)\n plt.show()\n \ndef predict_dec(parameters, X):\n \"\"\"\n Used for plotting decision boundary.\n \n Arguments:\n parameters -- python dictionary containing your parameters \n X -- input data of size (m, K)\n \n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n \n # Predict using forward propagation and a classification threshold of 0.5\n a3, cache = forward_propagation(X, parameters)\n predictions = (a3>0.5)\n return predictions\n\ndef load_dataset():\n np.random.seed(1)\n train_X, train_Y = sklearn.datasets.make_circles(n_samples=300, noise=.05)\n np.random.seed(2)\n test_X, test_Y = sklearn.datasets.make_circles(n_samples=100, noise=.05)\n # Visualize the data\n plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y, s=40, cmap=plt.cm.Spectral);\n train_X = train_X.T\n train_Y = train_Y.reshape((1, train_Y.shape[0]))\n test_X = test_X.T\n test_Y = test_Y.reshape((1, test_Y.shape[0]))\n return train_X, train_Y, test_X, test_Y"
] |
[
[
"numpy.dot",
"numpy.exp",
"numpy.mean",
"numpy.log",
"numpy.arange",
"sklearn.datasets.make_circles",
"matplotlib.pyplot.scatter",
"numpy.array",
"matplotlib.pyplot.contourf",
"numpy.zeros",
"numpy.reshape",
"numpy.nansum",
"matplotlib.pyplot.show",
"numpy.random.seed",
"numpy.sum",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel",
"numpy.int64",
"numpy.maximum"
]
] |
lbcsept/keras-CenterNet
|
[
"18dbe80686644fc4c56cf12f8455c9214a9aae28"
] |
[
"generators/common.py"
] |
[
"import cv2\nimport keras\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport warnings\n\nfrom generators.utils import get_affine_transform, affine_transform\nfrom generators.utils import gaussian_radius, draw_gaussian, gaussian_radius_2, draw_gaussian_2\n\n\nclass Generator(keras.utils.Sequence):\n \"\"\"\n Abstract generator class.\n \"\"\"\n\n def __init__(\n self,\n multi_scale=False,\n multi_image_sizes=(320, 352, 384, 416, 448, 480, 512, 544, 576, 608),\n misc_effect=None,\n visual_effect=None,\n batch_size=1,\n group_method='ratio', # one of 'none', 'random', 'ratio'\n shuffle_groups=True,\n input_size=512,\n max_objects=100\n ):\n \"\"\"\n Initialize Generator object.\n\n Args:\n batch_size: The size of the batches to generate.\n group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).\n shuffle_groups: If True, shuffles the groups each epoch.\n input_size:\n max_objects:\n \"\"\"\n self.misc_effect = misc_effect\n self.visual_effect = visual_effect\n self.batch_size = int(batch_size)\n self.group_method = group_method\n self.shuffle_groups = shuffle_groups\n self.input_size = input_size\n self.output_size = self.input_size // 4\n self.max_objects = max_objects\n self.groups = None\n self.multi_scale = multi_scale\n self.multi_image_sizes = multi_image_sizes\n self.current_index = 0\n\n # Define groups\n self.group_images()\n\n # Shuffle when initializing\n if self.shuffle_groups:\n random.shuffle(self.groups)\n\n def on_epoch_end(self):\n if self.shuffle_groups:\n random.shuffle(self.groups)\n self.current_index = 0\n\n def size(self):\n \"\"\"\n Size of the dataset.\n \"\"\"\n raise NotImplementedError('size method not implemented')\n\n def num_classes(self):\n \"\"\"\n Number of classes in the dataset.\n \"\"\"\n raise NotImplementedError('num_classes method not implemented')\n\n def has_label(self, label):\n \"\"\"\n Returns True if label is a known label.\n \"\"\"\n raise NotImplementedError('has_label method not implemented')\n\n def has_name(self, name):\n \"\"\"\n Returns True if name is a known class.\n \"\"\"\n raise NotImplementedError('has_name method not implemented')\n\n def name_to_label(self, name):\n \"\"\"\n Map name to label.\n \"\"\"\n raise NotImplementedError('name_to_label method not implemented')\n\n def label_to_name(self, label):\n \"\"\"\n Map label to name.\n \"\"\"\n raise NotImplementedError('label_to_name method not implemented')\n\n def image_aspect_ratio(self, image_index):\n \"\"\"\n Compute the aspect ratio for an image with image_index.\n \"\"\"\n raise NotImplementedError('image_aspect_ratio method not implemented')\n\n def load_image(self, image_index):\n \"\"\"\n Load an image at the image_index.\n \"\"\"\n raise NotImplementedError('load_image method not implemented')\n\n def load_annotations(self, image_index):\n \"\"\"\n Load annotations for an image_index.\n \"\"\"\n raise NotImplementedError('load_annotations method not implemented')\n\n def load_annotations_group(self, group):\n \"\"\"\n Load annotations for all images in group.\n \"\"\"\n # load_annotations {'labels': np.array, 'annotations': np.array}\n annotations_group = [self.load_annotations(image_index) for image_index in group]\n for annotations in annotations_group:\n assert (isinstance(annotations,\n dict)), '\\'load_annotations\\' should return a list of dictionaries, received: {}'.format(\n type(annotations))\n assert (\n 'labels' in annotations), '\\'load_annotations\\' should return a list of dictionaries that contain \\'labels\\' and \\'bboxes\\'.'\n assert (\n 'bboxes' in annotations), '\\'load_annotations\\' should return a list of dictionaries that contain \\'labels\\' and \\'bboxes\\'.'\n\n return annotations_group\n\n def filter_annotations(self, image_group, annotations_group, group):\n \"\"\"\n Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.\n \"\"\"\n # test all annotations\n for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):\n # test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]\n invalid_indices = np.where(\n (annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) |\n (annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1]) |\n (annotations['bboxes'][:, 0] < 0) |\n (annotations['bboxes'][:, 1] < 0) |\n (annotations['bboxes'][:, 2] <= 0) |\n (annotations['bboxes'][:, 3] <= 0) |\n (annotations['bboxes'][:, 2] > image.shape[1]) |\n (annotations['bboxes'][:, 3] > image.shape[0])\n )[0]\n\n # delete invalid indices\n if len(invalid_indices):\n warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(\n group[index],\n image.shape,\n annotations['bboxes'][invalid_indices, :]\n ))\n for k in annotations_group[index].keys():\n annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)\n if annotations['bboxes'].shape[0] == 0:\n warnings.warn('Image with id {} (shape {}) contains no valid boxes before transform'.format(\n group[index],\n image.shape,\n ))\n return image_group, annotations_group\n\n def clip_transformed_annotations(self, image_group, annotations_group, group):\n \"\"\"\n Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.\n \"\"\"\n # test all annotations\n filtered_image_group = []\n filtered_annotations_group = []\n for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):\n image_height = image.shape[0]\n image_width = image.shape[1]\n # x1\n annotations['bboxes'][:, 0] = np.clip(annotations['bboxes'][:, 0], 0, image_width - 2)\n # y1\n annotations['bboxes'][:, 1] = np.clip(annotations['bboxes'][:, 1], 0, image_height - 2)\n # x2\n annotations['bboxes'][:, 2] = np.clip(annotations['bboxes'][:, 2], 1, image_width - 1)\n # y2\n annotations['bboxes'][:, 3] = np.clip(annotations['bboxes'][:, 3], 1, image_height - 1)\n # test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]\n small_indices = np.where(\n (annotations['bboxes'][:, 2] - annotations['bboxes'][:, 0] < 10) |\n (annotations['bboxes'][:, 3] - annotations['bboxes'][:, 1] < 10)\n )[0]\n\n # delete invalid indices\n if len(small_indices):\n for k in annotations_group[index].keys():\n annotations_group[index][k] = np.delete(annotations[k], small_indices, axis=0)\n # import cv2\n # for invalid_index in small_indices:\n # x1, y1, x2, y2 = annotations['bboxes'][invalid_index]\n # label = annotations['labels'][invalid_index]\n # class_name = self.labels[label]\n # print('width: {}'.format(x2 - x1))\n # print('height: {}'.format(y2 - y1))\n # cv2.rectangle(image, (int(round(x1)), int(round(y1))), (int(round(x2)), int(round(y2))), (0, 255, 0), 2)\n # cv2.putText(image, class_name, (int(round(x1)), int(round(y1))), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 1)\n # cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n # cv2.imshow('image', image)\n # cv2.waitKey(0)\n if annotations_group[index]['bboxes'].shape[0] != 0:\n filtered_image_group.append(image)\n filtered_annotations_group.append(annotations_group[index])\n else:\n warnings.warn('Image with id {} (shape {}) contains no valid boxes after transform'.format(\n group[index],\n image.shape,\n ))\n\n return filtered_image_group, filtered_annotations_group\n\n def load_image_group(self, group):\n \"\"\"\n Load images for all images in a group.\n \"\"\"\n return [self.load_image(image_index) for image_index in group]\n\n def random_visual_effect_group_entry(self, image, annotations):\n \"\"\"\n Randomly transforms image and annotation.\n \"\"\"\n # apply visual effect\n image = self.visual_effect(image)\n return image, annotations\n\n def random_visual_effect_group(self, image_group, annotations_group):\n \"\"\"\n Randomly apply visual effect on each image.\n \"\"\"\n assert (len(image_group) == len(annotations_group))\n\n if self.visual_effect is None:\n # do nothing\n return image_group, annotations_group\n\n for index in range(len(image_group)):\n # apply effect on a single group entry\n image_group[index], annotations_group[index] = self.random_visual_effect_group_entry(\n image_group[index], annotations_group[index]\n )\n\n return image_group, annotations_group\n\n def random_transform_group_entry(self, image, annotations, transform=None):\n \"\"\"\n Randomly transforms image and annotation.\n \"\"\"\n # randomly transform both image and annotations\n if transform is not None or self.transform_generator:\n if transform is None:\n transform = adjust_transform_for_image(next(self.transform_generator), image,\n self.transform_parameters.relative_translation)\n\n # apply transformation to image\n image = apply_transform(transform, image, self.transform_parameters)\n\n # Transform the bounding boxes in the annotations.\n annotations['bboxes'] = annotations['bboxes'].copy()\n for index in range(annotations['bboxes'].shape[0]):\n annotations['bboxes'][index, :] = transform_aabb(transform, annotations['bboxes'][index, :])\n\n return image, annotations\n\n def random_transform_group(self, image_group, annotations_group):\n \"\"\"\n Randomly transforms each image and its annotations.\n \"\"\"\n\n assert (len(image_group) == len(annotations_group))\n\n for index in range(len(image_group)):\n # transform a single group entry\n image_group[index], annotations_group[index] = self.random_transform_group_entry(image_group[index],\n annotations_group[index])\n\n return image_group, annotations_group\n\n def random_misc_group_entry(self, image, annotations):\n \"\"\"\n Randomly transforms image and annotation.\n \"\"\"\n assert annotations['bboxes'].shape[0] != 0\n\n # randomly transform both image and annotations\n image, boxes = self.misc_effect(image, annotations['bboxes'])\n # Transform the bounding boxes in the annotations.\n annotations['bboxes'] = boxes\n return image, annotations\n\n def random_misc_group(self, image_group, annotations_group):\n \"\"\"\n Randomly transforms each image and its annotations.\n \"\"\"\n\n assert (len(image_group) == len(annotations_group))\n\n if self.misc_effect is None:\n return image_group, annotations_group\n\n for index in range(len(image_group)):\n # transform a single group entry\n image_group[index], annotations_group[index] = self.random_misc_group_entry(image_group[index],\n annotations_group[index])\n\n return image_group, annotations_group\n\n def preprocess_group_entry(self, image, annotations):\n \"\"\"\n Preprocess image and its annotations.\n \"\"\"\n\n # preprocess the image\n image, scale, offset_h, offset_w = self.preprocess_image(image)\n\n # apply resizing to annotations too\n annotations['bboxes'] *= scale\n annotations['bboxes'][:, [0, 2]] += offset_w\n annotations['bboxes'][:, [1, 3]] += offset_h\n # print(annotations['bboxes'][:, [2, 3]] - annotations['bboxes'][:, [0, 1]])\n return image, annotations\n\n def preprocess_group(self, image_group, annotations_group):\n \"\"\"\n Preprocess each image and its annotations in its group.\n \"\"\"\n assert (len(image_group) == len(annotations_group))\n\n for index in range(len(image_group)):\n # preprocess a single group entry\n image_group[index], annotations_group[index] = self.preprocess_group_entry(image_group[index],\n annotations_group[index])\n\n return image_group, annotations_group\n\n def group_images(self):\n \"\"\"\n Order the images according to self.order and makes groups of self.batch_size.\n \"\"\"\n # determine the order of the images\n\n order = list(range(self.size()))\n if self.group_method == 'random':\n random.shuffle(order)\n elif self.group_method == 'ratio':\n order.sort(key=lambda x: self.image_aspect_ratio(x))\n\n # divide into groups, one group = one batch\n self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in\n range(0, len(order), self.batch_size)]\n\n def compute_inputs(self, image_group, annotations_group):\n \"\"\"\n Compute inputs for the network using an image_group.\n \"\"\"\n # construct an image batch object\n batch_images = np.zeros((len(image_group), self.input_size, self.input_size, 3), dtype=np.float32)\n\n batch_hms = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()),\n dtype=np.float32)\n batch_hms_2 = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()),\n dtype=np.float32)\n batch_whs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)\n batch_regs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)\n batch_reg_masks = np.zeros((len(image_group), self.max_objects), dtype=np.float32)\n batch_indices = np.zeros((len(image_group), self.max_objects), dtype=np.float32)\n\n # copy all images to the upper left part of the image batch object\n for b, (image, annotations) in enumerate(zip(image_group, annotations_group)):\n c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)\n s = max(image.shape[0], image.shape[1]) * 1.0\n trans_input = get_affine_transform(c, s, self.input_size)\n\n # inputs\n image = self.preprocess_image(image, c, s, tgt_w=self.input_size, tgt_h=self.input_size)\n batch_images[b] = image\n\n # outputs\n bboxes = annotations['bboxes']\n assert bboxes.shape[0] != 0\n class_ids = annotations['labels']\n assert class_ids.shape[0] != 0\n\n trans_output = get_affine_transform(c, s, self.output_size)\n for i in range(bboxes.shape[0]):\n bbox = bboxes[i].copy()\n cls_id = class_ids[i]\n # (x1, y1)\n bbox[:2] = affine_transform(bbox[:2], trans_output)\n # (x2, y2)\n bbox[2:] = affine_transform(bbox[2:], trans_output)\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.output_size - 1)\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.output_size - 1)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n if h > 0 and w > 0:\n radius_h, radius_w = gaussian_radius((math.ceil(h), math.ceil(w)))\n radius_h = max(0, int(radius_h))\n radius_w = max(0, int(radius_w))\n\n radius = gaussian_radius_2((math.ceil(h), math.ceil(w)))\n radius = max(0, int(radius))\n ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n ct_int = ct.astype(np.int32)\n draw_gaussian(batch_hms[b, :, :, cls_id], ct_int, radius_h, radius_w)\n draw_gaussian_2(batch_hms_2[b, :, :, cls_id], ct_int, radius)\n batch_whs[b, i] = 1. * w, 1. * h\n batch_indices[b, i] = ct_int[1] * self.output_size + ct_int[0]\n batch_regs[b, i] = ct - ct_int\n batch_reg_masks[b, i] = 1\n\n # hm = batch_hms[b, :, :, cls_id]\n # hm = np.round(hm * 255).astype(np.uint8)\n # hm = cv2.cvtColor(hm, cv2.COLOR_GRAY2BGR)\n # hm_2 = batch_hms_2[b, :, :, cls_id]\n # hm_2 = np.round(hm_2 * 255).astype(np.uint8)\n # hm_2 = cv2.cvtColor(hm_2, cv2.COLOR_GRAY2BGR)\n # cv2.rectangle(hm, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 1)\n # cv2.rectangle(hm_2, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 1)\n # cv2.namedWindow('hm', cv2.WINDOW_NORMAL)\n # cv2.imshow('hm', np.hstack([hm, hm_2]))\n # cv2.waitKey()\n # print(np.sum(batch_reg_masks[b]))\n # for i in range(self.num_classes()):\n # plt.subplot(4, 5, i + 1)\n # hm = batch_hms[b, :, :, i]\n # plt.imshow(hm, cmap='gray')\n # plt.axis('off')\n # plt.show()\n # hm = np.sum(batch_hms[0], axis=-1)\n # hm = np.round(hm * 255).astype(np.uint8)\n # hm = cv2.cvtColor(hm, cv2.COLOR_GRAY2BGR)\n # hm_2 = np.sum(batch_hms_2[0], axis=-1)\n # hm_2 = np.round(hm_2 * 255).astype(np.uint8)\n # hm_2 = cv2.cvtColor(hm_2, cv2.COLOR_GRAY2BGR)\n # for i in range(bboxes.shape[0]):\n # x1, y1 = np.round(affine_transform(bboxes[i, :2], trans_input)).astype(np.int32)\n # x2, y2 = np.round(affine_transform(bboxes[i, 2:], trans_input)).astype(np.int32)\n # x1_, y1_ = np.round(affine_transform(bboxes[i, :2], trans_output)).astype(np.int32)\n # x2_, y2_ = np.round(affine_transform(bboxes[i, 2:], trans_output)).astype(np.int32)\n # class_id = class_ids[i]\n # cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 1)\n # cv2.putText(image, str(class_id), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 2.0getAffineTransform, (0, 0, 0), 3)\n # cv2.rectangle(hm, (x1_, y1_), (x2_, y2_), (0, 255, 0), 1)\n # cv2.rectangle(hm_2, (x1_, y1_), (x2_, y2_), (0, 255, 0), 1)\n # cv2.namedWindow('hm', cv2.WINDOW_NORMAL)\n # cv2.imshow('hm', np.hstack([hm, hm_2]))\n # cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n # cv2.imshow('image', image)\n # cv2.waitKey()\n return [batch_images, batch_hms_2, batch_whs, batch_regs, batch_reg_masks, batch_indices]\n\n def compute_targets(self, image_group, annotations_group):\n \"\"\"\n Compute target outputs for the network using images and their annotations.\n \"\"\"\n return np.zeros((len(image_group),))\n\n def compute_inputs_targets(self, group):\n \"\"\"\n Compute inputs and target outputs for the network.\n \"\"\"\n\n # load images and annotations\n # list\n image_group = self.load_image_group(group)\n annotations_group = self.load_annotations_group(group)\n\n # check validity of annotations\n image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)\n\n # randomly apply visual effect\n image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)\n #\n # # randomly transform data\n # image_group, annotations_group = self.random_transform_group(image_group, annotations_group)\n\n # randomly apply misc effect\n image_group, annotations_group = self.random_misc_group(image_group, annotations_group)\n #\n # # perform preprocessing steps\n # image_group, annotations_group = self.preprocess_group(image_group, annotations_group)\n #\n # # check validity of annotations\n # image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)\n\n if len(image_group) == 0:\n return None, None\n\n # compute network inputs\n inputs = self.compute_inputs(image_group, annotations_group)\n\n # compute network targets\n targets = self.compute_targets(image_group, annotations_group)\n\n return inputs, targets\n\n def __len__(self):\n \"\"\"\n Number of batches for generator.\n \"\"\"\n\n return len(self.groups)\n\n def __getitem__(self, index):\n \"\"\"\n Keras sequence method for generating batches.\n \"\"\"\n group = self.groups[self.current_index]\n if self.multi_scale:\n if self.current_index % 10 == 0:\n random_size_index = np.random.randint(0, len(self.multi_image_sizes))\n self.image_size = self.multi_image_sizes[random_size_index]\n inputs, targets = self.compute_inputs_targets(group)\n while inputs is None:\n current_index = self.current_index + 1\n if current_index >= len(self.groups):\n current_index = current_index % (len(self.groups))\n self.current_index = current_index\n group = self.groups[self.current_index]\n inputs, targets = self.compute_inputs_targets(group)\n current_index = self.current_index + 1\n if current_index >= len(self.groups):\n current_index = current_index % (len(self.groups))\n self.current_index = current_index\n return inputs, targets\n\n def preprocess_image(self, image, c, s, tgt_w, tgt_h):\n trans_input = get_affine_transform(c, s, (tgt_w, tgt_h))\n image = cv2.warpAffine(image, trans_input, (tgt_w, tgt_h), flags=cv2.INTER_LINEAR)\n image = image.astype(np.float32)\n\n image[..., 0] -= 103.939\n image[..., 1] -= 116.779\n image[..., 2] -= 123.68\n\n return image\n\n def get_transformed_group(self, group):\n image_group = self.load_image_group(group)\n annotations_group = self.load_annotations_group(group)\n\n # check validity of annotations\n image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)\n\n # randomly transform data\n image_group, annotations_group = self.random_transform_group(image_group, annotations_group)\n return image_group, annotations_group\n\n def get_cropped_and_rotated_group(self, group):\n image_group = self.load_image_group(group)\n annotations_group = self.load_annotations_group(group)\n\n # check validity of annotations\n image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)\n\n # randomly transform data\n image_group, annotations_group = self.random_crop_group(image_group, annotations_group)\n image_group, annotations_group = self.random_rotate_group(image_group, annotations_group)\n return image_group, annotations_group\n"
] |
[
[
"numpy.where",
"numpy.array",
"numpy.delete",
"numpy.clip"
]
] |
OlegJakushkin/s3prl
|
[
"c0e41f07fa56f0f79b5bf3839b4d0a4cf7c421bf"
] |
[
"downstream/libri_phone/model.py"
] |
[
"# -*- coding: utf-8 -*- #\n\"\"\"*********************************************************************************************\"\"\"\n# FileName [ model.py ]\n# Synopsis [ the 1-hidden model ]\n# Author [ S3PRL ]\n# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]\n\"\"\"*********************************************************************************************\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ConvBank(nn.Module):\n def __init__(self, input_dim, output_class_num, kernels, cnn_size, hidden_size, dropout, **kwargs):\n super(ConvBank, self).__init__()\n self.drop_p = dropout\n \n self.in_linear = nn.Linear(input_dim, hidden_size)\n latest_size = hidden_size\n\n # conv bank\n self.cnns = nn.ModuleList()\n assert len(kernels) > 0\n for kernel in kernels:\n self.cnns.append(nn.Conv1d(latest_size, cnn_size, kernel, padding=kernel//2))\n latest_size = cnn_size * len(kernels)\n\n self.out_linear = nn.Linear(latest_size, output_class_num)\n\n def forward(self, features):\n hidden = F.dropout(F.relu(self.in_linear(features)), p=self.drop_p)\n\n conv_feats = []\n hidden = hidden.transpose(1, 2).contiguous()\n for cnn in self.cnns: \n conv_feats.append(cnn(hidden))\n hidden = torch.cat(conv_feats, dim=1).transpose(1, 2).contiguous()\n hidden = F.dropout(F.relu(hidden), p=self.drop_p)\n\n predicted = self.out_linear(hidden)\n return predicted\n\n\nclass Framelevel1Hidden(nn.Module):\n def __init__(self, input_dim, output_class_num, hidden_size, dropout, **kwargs):\n super(Framelevel1Hidden, self).__init__()\n \n # init attributes\n self.in_linear = nn.Linear(input_dim, hidden_size) \n self.out_linear = nn.Linear(hidden_size, output_class_num)\n self.drop = nn.Dropout(dropout) \n self.act_fn = nn.functional.relu \n\n\n def forward(self, features):\n hidden = self.in_linear(features)\n hidden = self.drop(hidden)\n hidden = self.act_fn(hidden)\n predicted = self.out_linear(hidden)\n return predicted\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.nn.functional.relu"
]
] |
Juggernaut93/SSH-pytorch
|
[
"9a6159c6692bac5c2f090a91f691e798c52da2b5"
] |
[
"model/utils/config.py"
] |
[
"import os\nimport os.path as osp\nimport numpy as np\n# `pip install easydict` if you don't have it\nfrom easydict import EasyDict as edict\n\n__C = edict()\n# Consumers can get config by:\n# from fast_rcnn_config import cfg\ncfg = __C\n\n#\n# Training options\n#\n\n__C.TRAIN = edict()\n\n# Online hard negative mining\n__C.TRAIN.HARD_POSITIVE_MINING = True\n__C.TRAIN.HARD_NEGATIVE_MINING = True\n__C.TRAIN.BG_THRESH_LOW = 0.0\n\n__C.TRAIN.ORIG_SIZE = False\n\n# Initial learning rate\n__C.TRAIN.LEARNING_RATE = 0.001\n\n# Momentum\n__C.TRAIN.MOMENTUM = 0.9\n\n# Weight decay, for regularization\n__C.TRAIN.WEIGHT_DECAY = 0.0005\n\n# Factor for reducing the learning rate\n__C.TRAIN.GAMMA = 0.1\n\n# Step size for reducing the learning rate, currently only support one step\n__C.TRAIN.STEPSIZE = [30000]\n\n# Iteration intervals for showing the loss during training, on command line interface\n__C.TRAIN.DISPLAY = 50\n# Iteration intervals for save check point\n__C.TRAIN.CHECKPOINT = 500\n# Whether to double the learning rate for bias\n__C.TRAIN.DOUBLE_BIAS = True\n\n# Whether to initialize the weights with truncated normal distribution\n__C.TRAIN.TRUNCATED = False\n\n# Whether to have weight decay on bias as well\n__C.TRAIN.BIAS_DECAY = False\n\n# Whether to add ground truth boxes to the pool when sampling regions\n__C.TRAIN.USE_GT = False\n\n# Whether to use aspect-ratio grouping of training images, introduced merely for saving\n# GPU memory\n__C.TRAIN.ASPECT_GROUPING = False\n\n# The number of snapshots kept, older ones are deleted to save space\n__C.TRAIN.SNAPSHOT_KEPT = 3\n\n# The time interval for saving tensorflow summaries\n__C.TRAIN.SUMMARY_INTERVAL = 180\n\n# Scale to use during training (can list multiple scales)\n# The scale is the pixel size of an image's shortest side\n__C.TRAIN.SCALES = (600,800)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TRAIN.MAX_SIZE = 1200\n\n# Trim size for input images to create minibatch\n__C.TRAIN.TRIM_HEIGHT = 600\n__C.TRAIN.TRIM_WIDTH = 600\n\n# Images to use per minibatch\n__C.TRAIN.IMS_PER_BATCH = 1\n\n# Minibatch size (number of regions of interest [ROIs])\n__C.TRAIN.BATCH_SIZE = 256\n\n# Fraction of minibatch that is labeled foreground (i.e. class > 0)\n__C.TRAIN.FG_FRACTION = 0.25\n\n# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)\n__C.TRAIN.FG_THRESH = 0.5\n\n# Overlap threshold for a ROI to be considered background (class = 0 if\n# overlap in [LO, HI))\n__C.TRAIN.BG_THRESH_HI = 0.5\n__C.TRAIN.BG_THRESH_LO = 0.0\n\n# Use horizontally-flipped images during training?\n__C.TRAIN.USE_FLIPPED = True\n\n# Train bounding-box regressors\n__C.TRAIN.BBOX_REG = True\n\n# Overlap required between a ROI and ground-truth box in order for that ROI to\n# be used as a bounding-box regression training example\n__C.TRAIN.BBOX_THRESH = 0.5\n\n# Iterations between snapshots\n__C.TRAIN.SNAPSHOT_ITERS = 5000\n\n# solver.prototxt specifies the snapshot path prefix, this adds an optional\n# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel\n__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'\n# __C.TRAIN.SNAPSHOT_INFIX = ''\n\n# Use a prefetch thread in roi_data_layer.layer\n# So far I haven't found this useful; likely more engineering work is required\n# __C.TRAIN.USE_PREFETCH = False\n\n# Normalize the targets (subtract empirical mean, divide by empirical stddev)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS = True\n# Deprecated (inside weights)\n__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n# Normalize the targets using \"precomputed\" (or made up) means and stdevs\n# (BBOX_NORMALIZE_TARGETS must also be True)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True\n__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)\n__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)\n\n# Train using these proposals\n__C.TRAIN.PROPOSAL_METHOD = 'gt'\n\n# Make minibatches from images that have similar aspect ratios (i.e. both\n# tall and thin or both short and wide) in order to avoid wasting computation\n# on zero-padding.\n\n# Use RPN to detect objects\n__C.TRAIN.HAS_RPN = True\n# IOU >= thresh: positive example\n__C.TRAIN.ANCHOR_POSITIVE_OVERLAP = 0.5\n# IOU < thresh: negative example\n__C.TRAIN.ANCHOR_NEGATIVE_OVERLAP = 0.3\n# If an anchor statisfied by positive and negative conditions set to negative\n__C.TRAIN.RPN_CLOBBER_POSITIVES = False\n# Max number of foreground examples\n__C.TRAIN.RPN_FG_FRACTION = 0.25\n# Total number of examples\n__C.TRAIN.RPN_BATCHSIZE = 384\n# NMS threshold used on RPN proposals\n__C.TRAIN.RPN_NMS_THRESH = 0.7\n# Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000\n# Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TRAIN.RPN_POST_NMS_TOP_N = 2000\n# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)\n__C.TRAIN.RPN_MIN_SIZE = 4\n# Deprecated (outside weights)\n__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n# Give the positive RPN examples weight of p * 1 / {num positives}\n# and give negatives a weight of (1 - p)\n# Set to -1.0 to use uniform example weighting\n__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0\n# Whether to use all ground truth bounding boxes for training,\n# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''\n__C.TRAIN.USE_ALL_GT = True\n\n# Whether to tune the batch normalization parameters during training\n__C.TRAIN.BN_TRAIN = False\n\n#\n# Testing options\n#\n__C.TEST = edict()\n\n# Scale to use during testing (can NOT list multiple scales)\n# The scale is the pixel size of an image's shortest side\n__C.TEST.SCALES = (1200,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TEST.MAX_SIZE = 1600\n\n__C.TEST.ORIG_SIZE = False\n# Overlap threshold used for non-maximum suppression (suppress boxes with\n# IoU >= this threshold)\n__C.TEST.NMS = 0.3\n\n# Experimental: treat the (K+1) units in the cls_score layer as linear\n# predictors (trained, eg, with one-vs-rest SVMs).\n__C.TEST.SVM = False\n\n# Test using bounding-box regressors\n__C.TEST.BBOX_REG = True\n\n# Propose boxes\n__C.TEST.HAS_RPN = False\n\n# Test using these proposals\n__C.TEST.PROPOSAL_METHOD = 'gt'\n\n## NMS threshold used on RPN proposals\n__C.TEST.RPN_NMS_THRESH = 0.3\n## Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TEST.RPN_PRE_NMS_TOP_N = 6000\n\n## Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TEST.RPN_POST_NMS_TOP_N = 300\n\n# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)\n__C.TEST.RPN_MIN_SIZE = 16\n\n# Testing mode, default to be 'nms', 'top' is slower but better\n# See report for details\n__C.TEST.MODE = 'nms'\n\n# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select\n__C.TEST.RPN_TOP_N = 5000\n\n#\n# ResNet options\n#\n\n__C.RESNET = edict()\n\n# Option to set if max-pooling is appended after crop_and_resize.\n# if true, the region will be resized to a square of 2xPOOLING_SIZE,\n# then 2x2 max-pooling is applied; otherwise the region will be directly\n# resized to a square of POOLING_SIZE\n__C.RESNET.MAX_POOL = False\n\n# Number of fixed blocks during training, by default the first of all 4 blocks is fixed\n# Range: 0 (none) to 3 (all)\n__C.RESNET.FIXED_BLOCKS = 1\n\n#\n# MobileNet options\n#\n\n__C.MOBILENET = edict()\n\n# Whether to regularize the depth-wise filters during training\n__C.MOBILENET.REGU_DEPTH = False\n\n# Number of fixed layers during training, by default the first of all 14 layers is fixed\n# Range: 0 (none) to 12 (all)\n__C.MOBILENET.FIXED_LAYERS = 5\n\n# Weight decay for the mobilenet weights\n__C.MOBILENET.WEIGHT_DECAY = 0.00004\n\n# Depth multiplier\n__C.MOBILENET.DEPTH_MULTIPLIER = 1.\n\n#\n# MISC\n#\n\n# The mapping from image coordinates to feature map coordinates might cause\n# some boxes that are distinct in image space to become identical in feature\n# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor\n# for identifying duplicate boxes.\n# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16\n__C.DEDUP_BOXES = 1. / 16.\n\n# Pixel mean values (BGR order) as a (1, 1, 3) array\n# We use the same pixel mean for all networks even though it's not exactly what\n# they were trained with\n__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])\n\n# For reproducibility\n__C.RNG_SEED = 3\n\n# A small number that's used many times\n__C.EPS = 1e-14\n\n# Root directory of project\n__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))\n\n# Data directory\n__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))\n\n# Name (or path to) the matlab executable\n__C.MATLAB = 'matlab'\n\n# Place outputs under an experiments directory\n__C.EXP_DIR = 'default'\n\n# Use GPU implementation of non-maximum suppression\n__C.USE_GPU_NMS = True\n\n# Default GPU device id\n__C.GPU_ID = 0\n\n__C.POOLING_MODE = 'crop'\n\n# Size of the pooled region after RoI pooling\n__C.POOLING_SIZE = 7\n\n# Maximal number of gt rois in an image during Training\n__C.MAX_NUM_GT_BOXES = 20\n\n# Anchor scales for RPN\n__C.ANCHOR_SCALES = [8, 16, 32]\n\n# Anchor ratios for RPN\n__C.ANCHOR_RATIOS = [0.5, 1, 2]\n\n# Feature stride for RPN\n__C.FEAT_STRIDE = [16, ]\n\n__C.CUDA = False\n\n__C.CROP_RESIZE_WITH_MAX_POOL = True\n\nimport pdb\n\n\ndef get_output_dir(imdb_name, net_name=None,output_dir='output'):\n \"\"\"Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n\n outdir = osp.abspath(osp.join(cfg.ROOT_DIR, output_dir, cfg.EXP_DIR, imdb_name))\n if net_name is not None:\n outdir = osp.join(outdir, net_name)\n\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\n\ndef get_output_tb_dir(imdb, weights_filename):\n \"\"\"Return the directory where tensorflow summaries are placed.\n If the directory does not exist, it is created.\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))\n if weights_filename is None:\n weights_filename = 'default'\n outdir = osp.join(outdir, weights_filename)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\n\ndef _merge_a_into_b(a, b):\n \"\"\"Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n \"\"\"\n if type(a) is not edict:\n return\n\n for k, v in a.items():\n # a must specify keys that are in b\n if k not in b:\n raise KeyError('{} is not a valid config key'.format(k))\n\n # the types must match, too\n old_type = type(b[k])\n if old_type is not type(v):\n if isinstance(b[k], np.ndarray):\n v = np.array(v, dtype=b[k].dtype)\n else:\n raise ValueError(('Type mismatch ({} vs. {}) '\n 'for config key: {}').format(type(b[k]),\n type(v), k))\n\n # recursively merge dicts\n if type(v) is edict:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print(('Error under config key: {}'.format(k)))\n raise\n else:\n b[k] = v\n\n\ndef cfg_from_file(filename):\n \"\"\"Load a config file and merge it into the default options.\"\"\"\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)\n\n\ndef cfg_from_list(cfg_list):\n \"\"\"Set config keys via list (e.g., from command line).\"\"\"\n from ast import literal_eval\n assert len(cfg_list) % 2 == 0\n for k, v in zip(cfg_list[0::2], cfg_list[1::2]):\n key_list = k.split('.')\n d = __C\n for subkey in key_list[:-1]:\n assert subkey in d\n d = d[subkey]\n subkey = key_list[-1]\n assert subkey in d\n try:\n value = literal_eval(v)\n except:\n # handle the case when v is a string literal\n value = v\n assert type(value) == type(d[subkey]), \\\n 'type {} does not match original type {}'.format(\n type(value), type(d[subkey]))\n d[subkey] = value\n"
] |
[
[
"numpy.array"
]
] |
lvrcek/consensus-net
|
[
"560957f315751822e1ddf8c097eb7b712ceadff3"
] |
[
"experiments/karla/diplomski-rad/blade/pb/datasets/n3-all-indels/finished-experiments/model-n3-indel-5.py"
] |
[
"from comet_ml import Experiment\n\nexperiment = Experiment(api_key=\"oda8KKpxlDgWmJG5KsYrrhmIV\", project_name=\"consensusnet\")\n\nimport numpy as np\nfrom keras.models import Model\nfrom keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Input\nfrom keras.layers import Conv1D, MaxPooling1D, Conv2D\n\nimport sys\nmodule_path = '/home/diplomski-rad/consensus-net/src/python/dataset/'\nif module_path not in sys.path:\n print('Adding dataset module.')\n sys.path.append(module_path)\n\nimport dataset\n\nX_train = np.load('../dataset-n3-X-reshaped-train.npy')\nX_validate = np.load('../dataset-n3-X-reshaped-validate.npy')\ny_train = np.load('../dataset-n3-y-reshaped-train.npy')\ny_validate = np.load('../dataset-n3-y-reshaped-validate.npy')\n\nexample_shape = X_train.shape[1:]\ninput_layer = Input(shape=example_shape)\nconv_1 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(input_layer)\nbn_1 = BatchNormalization()(conv_1)\nconv_2 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(bn_1)\ndrop_1 = Dropout(0.25)(conv_2)\n\nflatten = Flatten()(drop_1)\npredictions = Dense(4, activation='softmax')(flatten)\n\nmodel = Model(input_layer, predictions)\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nprint(model.summary())\n\nbatch_size = 10000\nepochs = 50\n\nmodel.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate))\n"
] |
[
[
"numpy.load"
]
] |
eumiro/OpenOA
|
[
"c0d3d4125e971dc5e48b41a4ab75767ace277457"
] |
[
"operational_analysis/toolkits/met_data_processing.py"
] |
[
"\"\"\"\nThis module provides methods for processing meteorological data.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport scipy.constants as const\n\n\ndef compute_wind_direction(u, v):\n \"\"\"Compute wind direction given u and v wind vector components\n\n Args:\n u(:obj:`pandas.Series`): the zonal component of the wind; units of m/s\n v(:obj:`pandas.Series`): the meridional component of the wind; units of m/s\n\n Returns:\n :obj:`pandas.Series`: wind direction; units of degrees\n \"\"\"\n wd = 180 + np.arctan2(u, v) * 180 / np.pi # Calculate wind direction in degrees\n wd[wd == 360] = 0 # Make calculations of 360 equal to 0\n\n return wd\n\n\ndef compute_u_v_components(wind_speed, wind_dir):\n \"\"\"Compute vector components of the horizontal wind given wind speed and direction\n\n Args:\n wind_speed(pandas.Series): horizontal wind speed; units of m/s\n wind_dir(pandas.Series): wind direction; units of degrees\n\n Returns:\n (tuple):\n u(pandas.Series): the zonal component of the wind; units of m/s.\n v(pandas.Series): the meridional component of the wind; units of m/s\n \"\"\"\n # Send exception if any negative data found\n if (wind_speed[wind_speed < 0].size > 0) | (wind_dir[wind_dir < 0].size > 0):\n raise Exception('Some of your wind speed or direction data is negative. Check your data')\n\n u = np.round(-wind_speed * np.sin(wind_dir * np.pi / 180), 10) # round to 10 digits\n v = np.round(-wind_speed * np.cos(wind_dir * np.pi / 180), 10)\n\n return u, v\n\n\ndef compute_air_density(temp_col, pres_col, humi_col = None):\n \"\"\"\n Calculate air density from the ideal gas law based on the definition provided by IEC 61400-12\n given pressure, temperature and relative humidity.\n \n This function assumes temperature and pressure are reported in standard units of measurement\n (i.e. Kelvin for temperature, Pascal for pressure, humidity has no dimension).\n \n Humidity values are optional. According to the IEC a humiditiy of 50% (0.5) is set as default value. \n\n Args:\n temp_col(:obj:`array-like`): array with temperature values; units of Kelvin\n pres_col(:obj:`array-like`): array with pressure values; units of Pascals\n humi_col(:obj:`array-like`): optional array with relative humidity values; dimensionless (range 0 to 1) \n\n Returns:\n :obj:`pandas.Series`: Rho, calcualted air density; units of kg/m3\n \"\"\"\n # Check if humidity column is provided and create default humidity array with values of 0.5 if necessary\n if humi_col != None:\n rel_humidity = humi_col\n else:\n rel_humidity = np.repeat(.5, temp_col.shape[0])\n # Send exception if any negative data found\n if np.any(temp_col < 0) | np.any(pres_col < 0) | np.any(rel_humidity < 0):\n raise Exception('Some of your temperature, pressure or humidity data is negative. Check your data.')\n\n #protect against python 2 integer division rules\n temp_col = temp_col.astype(float)\n pres_col = pres_col.astype(float)\n\n R_const = 287.05 # Gas constant for dry air, units of J/kg/K\n Rw_const = 461.5 # Gas constant of water vapour, unit J/kg/K\n rho = ((1/temp_col)*(pres_col/R_const-rel_humidity*(0.0000205*np.exp(0.0631846*temp_col))*\n (1/R_const-1/Rw_const)))\n\n return rho\n\n\ndef pressure_vertical_extrapolation(p0, temp_avg, z0, z1):\n \"\"\"\n Extrapolate pressure from height z0 to height z1 given the average temperature in the layer.\n The hydostatic equation is used to peform the extrapolation.\n\n Args:\n p0(:obj:`pandas.Series`): pressure at height z0; units of Pascals\n temp_avg(:obj:`pandas.Series`): mean temperature between z0 and z1; units of Kelvin\n z0(:obj:`pandas.Series`): height above surface; units of meters\n z1(:obj:`pandas.Series`): extrapolation height; units of meters\n\n Returns:\n :obj:`pandas.Series`: p1, extrapolated pressure at z1; units of Pascals\n \"\"\"\n # Send exception if any negative data found\n if (p0[p0 < 0].size > 0) | (temp_avg[temp_avg < 0].size > 0):\n raise Exception('Some of your temperature of pressure data is negative. Check your data')\n\n R_const = 287.058 # Gas constant for dry air, units of J/kg/K\n p1 = p0 * np.exp(-const.g * (z1 - z0) / R_const / temp_avg) # Pressure at z1\n\n return p1\n\n\ndef air_density_adjusted_wind_speed(wind_col, density_col):\n \"\"\"\n Apply air density correction to wind speed measurements following IEC-61400-12-1 standard\n\n Args:\n wind_col(:obj:`str`): array containing the wind speed data; units of m/s\n density_col(:obj:`str`): array containing the air density data; units of kg/m3\n\n Returns:\n :obj:`pandas.Series`: density-adjusted wind speeds; units of m/s\n \"\"\"\n rho_mean = density_col.mean() # Mean air density across sample\n dens_adjusted_ws = wind_col * np.power(density_col / rho_mean, 1. / 3) # Density adjusted wind speeds\n\n return dens_adjusted_ws\n\n\ndef compute_turbulence_intensity(mean_col, std_col):\n \"\"\"\n Compute turbulence intensity\n\n Args:\n mean_col(:obj:`array`): array containing the wind speed mean data; units of m/s\n std_col(:obj:`array`): array containing the wind speed standard deviation data; units of m/s\n\n Returns:\n :obj:`array`: turbulence intensity, (unitless ratio)\n \"\"\"\n return std_col / mean_col\n\n\ndef compute_shear(df, windspeed_heights, ref_col='empty'):\n \"\"\"\n Compute shear coefficient between wind speed measurements\n\n Args:\n df(:obj:`pandas.DataFrame`): dataframe with wind speed columns\n windspeed_heights(:obj:`dict`): keys are strings of columns in <df> containing wind speed data, values are\n associated sensor heights (m)\n ref_col(:obj:`str`): data column name for the data to use as the normalization value; only pertinent if\n optimizing over multiple measurements\n\n Returns:\n :obj:`pandas.Series`: shear coefficient (unitless)\n \"\"\"\n\n # Convert wind speed heights to float\n windspeed_heights = \\\n dict(list(zip(list(windspeed_heights.keys()), [float(value) for value in list(windspeed_heights.values())])))\n\n keys = list(windspeed_heights.keys())\n if len(keys) <= 1:\n raise Exception('More than one wind speed measurement required to compute shear.')\n elif len(keys) == 2:\n # If there are only two measurements, no optimization possible\n wind_a = keys[0]\n wind_b = keys[1]\n height_a = windspeed_heights[wind_a]\n height_b = windspeed_heights[wind_b]\n return (np.log(df[wind_b]) - np.log(df[wind_a])) / (np.log(height_b) - np.log(height_a))\n else:\n from scipy.optimize import curve_fit\n\n def power_law(x, alpha):\n return (x) ** alpha\n\n # Normalize wind speeds and heights to reference values\n df_norm = df[keys].div(df[ref_col], axis=0)\n h0 = windspeed_heights[ref_col]\n windspeed_heights = {k: v / h0 for k, v in windspeed_heights.items()}\n\n # Rename columns to be windspeed measurement heights\n df_norm = df_norm.rename(columns=windspeed_heights)\n\n alpha = pd.DataFrame(np.ones((len(df_norm), 1)) * np.nan, index=df_norm.index, columns=['alpha'])\n\n # For each row\n for time in df_norm.index:\n\n t = (df_norm.loc[time] # Take the row as a series, the index will be the column names,\n .reset_index() # Resetting the index yields the heights as a column\n .to_numpy()) # Numpy array: each row a sensor, column 0 the heights, column 1 the measurment\n t = t[~np.isnan(t).any(axis=1)] # Drop rows (sensors) for which the measurement was nan\n h = t[:, 0] # The measurement heights\n u = t[:, 1] # The measurements\n if np.shape(u)[0] <= 1: # If less than two measurements were available, leave value as nan\n continue\n else:\n alpha.loc[time, 'alpha'] = curve_fit(power_law, h, u)[0][0] # perform least square optimization\n\n return alpha['alpha']\n\n\ndef compute_veer(wind_a, height_a, wind_b, height_b):\n \"\"\"\n Compute veer between wind direction measurements\n\n Args:\n wind_a, wind_b(:obj:`array`): arrays containing the wind direction mean data; units of deg\n height_a, height_b(:obj:`array`): sensor heights (m)\n\n Returns:\n veer(:obj:`array`): veer (deg/m)\n \"\"\"\n\n # Calculate wind direction change\n delta_dir = wind_b - wind_a\n\n # Convert absolute values greater than 180 to normal range\n delta_dir[delta_dir > 180] = delta_dir[delta_dir > 180] - 360.\n delta_dir[delta_dir <= (-180)] = delta_dir[delta_dir <= (-180)] + 360.\n\n return delta_dir / (height_b - height_a)\n"
] |
[
[
"numpy.sin",
"numpy.isnan",
"scipy.optimize.curve_fit",
"numpy.log",
"numpy.exp",
"numpy.shape",
"numpy.any",
"numpy.power",
"numpy.cos",
"numpy.arctan2",
"numpy.repeat"
]
] |
lifedespicable/Python_Learning
|
[
"b3ec851c80a8137d64d02d15a3566af49b9a7ba7"
] |
[
"Python_Learning/plt_test.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\n# 绘制简单的曲线\n# plt.plot([1, 3, 5], [4, 8, 10])\n# plt.show() # 这条命令没有作用,只是单纯让曲线在Pycharm当中显示\n#\n# x = np.linspace(-np.pi, np.pi, 100) # x的定义域为-3.14 ~ 3.14,中间间隔100个元素\n# plt.plot(x, np.sin(x))\n# # 显示所画的图\n# plt.show()\n\n# x = np.linspace(-np.pi * 2, np.pi * 2, 100) # x的定义域为-2pi ~ 2pi,中间间隔100个元素\n# plt.figure(1, dpi= 50) # 创建图表1\n# for i in range(1, 5): # 画四条线\n# plt.plot(x, np.sin(x / i))\n# plt.show()\n\n# plt.figure(1, dpi= 50) # 创建图表1,dpi代表图片精细度,dpi越大图片文件越大,杂志要300以上\n# data = [1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 5, 6, 4]\n# plt.hist(data) # 只要传入数据,直方图就会统计数据出现的次数,hist是表示的是直方图\n# plt.show()\n\n# x = np.arange(1, 10)\n# y = x\n# fig = plt.figure()\n# plt.scatter(x, y, c= 'r', marker= 'o') # c='r'表示散点的颜色为红色,marker表示指定散点的形状为圆形\n# plt.show()\n\n# 使用Pandas与Matplotlib进行结合\niris = pd.read_csv('iris_training.csv')\nprint(iris.head())\nprint(iris.shape)\nprint(type(iris))\nprint(iris.dtypes)\n\n# 绘制散点图\n# iris.plot(kind = 'scatter', x = '-0.07415843', y = '-0.115833877')\n\n# 没啥用,只是让Pandas的plot()方法在Pycharm上显示\n# plt.show()\n\n# 设置样式\nsns.set(style= 'white', color_codes= True)\n# 设置绘制格式为散点图\nsns.jointplot(x = '120', y = '4', data= iris, height = 5)\n# distplot绘制曲线\nsns.distplot(iris['120'])\n# 没啥用,只是让Pandas的plot()方法在Pycharm上显示\nplt.show()\n\n# FacetGrid 一般绘图函数\n# hue 彩色显示分类 0/1/2\n# plt.scatter 绘制散点图\n# add_legend() 显示分类的描述信息\n# sns.FacetGrid(iris, hue= 'virginica', height= 5).map(plt.scatter, '-0.07415843', '-0.115833877').add_legend()\n# sns.FacetGrid(iris, hue= 'virginica', height= 5).map(plt.scatter, '0.005250311', '-0.062491073').add_legend()\n# 没啥用,只是让Pandas的plot()方法在Pycharm上显示\n# plt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"pandas.read_csv"
]
] |
kmaehashi/numba
|
[
"6ecb815772900705f2bc64821a6c1fd632b2b666"
] |
[
"numba/cuda/tests/cudapy/test_ipc.py"
] |
[
"import multiprocessing as mp\nimport itertools\nimport traceback\nimport pickle\n\nimport numpy as np\n\nfrom numba import cuda\nfrom numba.cuda.testing import (skip_on_cudasim, skip_under_cuda_memcheck,\n ContextResettingTestCase, ForeignArray)\nimport unittest\n\n\ndef core_ipc_handle_test(the_work, result_queue):\n try:\n arr = the_work()\n # Catch anything going wrong in the worker function\n except: # noqa: E722\n # FAILED. propagate the exception as a string\n succ = False\n out = traceback.format_exc()\n else:\n # OK. send the ndarray back\n succ = True\n out = arr\n result_queue.put((succ, out))\n\n\ndef base_ipc_handle_test(handle, size, result_queue):\n def the_work():\n dtype = np.dtype(np.intp)\n with cuda.open_ipc_array(handle, shape=size // dtype.itemsize,\n dtype=dtype) as darr:\n # copy the data to host\n return darr.copy_to_host()\n\n core_ipc_handle_test(the_work, result_queue)\n\n\ndef serialize_ipc_handle_test(handle, result_queue):\n def the_work():\n dtype = np.dtype(np.intp)\n darr = handle.open_array(cuda.current_context(),\n shape=handle.size // dtype.itemsize,\n dtype=dtype)\n # copy the data to host\n arr = darr.copy_to_host()\n handle.close()\n return arr\n\n core_ipc_handle_test(the_work, result_queue)\n\n\ndef ipc_array_test(ipcarr, result_queue):\n try:\n with ipcarr as darr:\n arr = darr.copy_to_host()\n try:\n # should fail to reopen\n with ipcarr:\n pass\n except ValueError as e:\n if str(e) != 'IpcHandle is already opened':\n raise AssertionError('invalid exception message')\n else:\n raise AssertionError('did not raise on reopen')\n # Catch any exception so we can propagate it\n except: # noqa: E722\n # FAILED. propagate the exception as a string\n succ = False\n out = traceback.format_exc()\n else:\n # OK. send the ndarray back\n succ = True\n out = arr\n result_queue.put((succ, out))\n\n\n@skip_under_cuda_memcheck('Hangs cuda-memcheck')\n@skip_on_cudasim('Ipc not available in CUDASIM')\nclass TestIpcMemory(ContextResettingTestCase):\n def test_ipc_handle(self):\n # prepare data for IPC\n arr = np.arange(10, dtype=np.intp)\n devarr = cuda.to_device(arr)\n\n # create IPC handle\n ctx = cuda.current_context()\n ipch = ctx.get_ipc_handle(devarr.gpu_data)\n\n # manually prepare for serialization as bytes\n handle_bytes = bytes(ipch.handle)\n size = ipch.size\n\n # spawn new process for testing\n ctx = mp.get_context('spawn')\n result_queue = ctx.Queue()\n args = (handle_bytes, size, result_queue)\n proc = ctx.Process(target=base_ipc_handle_test, args=args)\n proc.start()\n succ, out = result_queue.get()\n if not succ:\n self.fail(out)\n else:\n np.testing.assert_equal(arr, out)\n proc.join(3)\n\n def variants(self):\n # Test with no slicing and various different slices\n indices = (None, slice(3, None), slice(3, 8), slice(None, 8))\n # Test with a Numba DeviceNDArray, or an array from elsewhere through\n # the CUDA Array Interface\n foreigns = (False, True)\n return itertools.product(indices, foreigns)\n\n def check_ipc_handle_serialization(self, index_arg=None, foreign=False):\n # prepare data for IPC\n arr = np.arange(10, dtype=np.intp)\n devarr = cuda.to_device(arr)\n if index_arg is not None:\n devarr = devarr[index_arg]\n if foreign:\n devarr = cuda.as_cuda_array(ForeignArray(devarr))\n expect = devarr.copy_to_host()\n\n # create IPC handle\n ctx = cuda.current_context()\n ipch = ctx.get_ipc_handle(devarr.gpu_data)\n\n # pickle\n buf = pickle.dumps(ipch)\n ipch_recon = pickle.loads(buf)\n self.assertIs(ipch_recon.base, None)\n self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))\n self.assertEqual(ipch_recon.size, ipch.size)\n\n # spawn new process for testing\n ctx = mp.get_context('spawn')\n result_queue = ctx.Queue()\n args = (ipch, result_queue)\n proc = ctx.Process(target=serialize_ipc_handle_test, args=args)\n proc.start()\n succ, out = result_queue.get()\n if not succ:\n self.fail(out)\n else:\n np.testing.assert_equal(expect, out)\n proc.join(3)\n\n def test_ipc_handle_serialization(self):\n for index, foreign, in self.variants():\n with self.subTest(index=index, foreign=foreign):\n self.check_ipc_handle_serialization(index, foreign)\n\n def check_ipc_array(self, index_arg=None, foreign=False):\n # prepare data for IPC\n arr = np.arange(10, dtype=np.intp)\n devarr = cuda.to_device(arr)\n # Slice\n if index_arg is not None:\n devarr = devarr[index_arg]\n if foreign:\n devarr = cuda.as_cuda_array(ForeignArray(devarr))\n expect = devarr.copy_to_host()\n ipch = devarr.get_ipc_handle()\n\n # spawn new process for testing\n ctx = mp.get_context('spawn')\n result_queue = ctx.Queue()\n args = (ipch, result_queue)\n proc = ctx.Process(target=ipc_array_test, args=args)\n proc.start()\n succ, out = result_queue.get()\n if not succ:\n self.fail(out)\n else:\n np.testing.assert_equal(expect, out)\n proc.join(3)\n\n def test_ipc_array(self):\n for index, foreign, in self.variants():\n with self.subTest(index=index, foreign=foreign):\n self.check_ipc_array(index, foreign)\n\n\ndef staged_ipc_handle_test(handle, device_num, result_queue):\n def the_work():\n with cuda.gpus[device_num]:\n this_ctx = cuda.devices.get_context()\n deviceptr = handle.open_staged(this_ctx)\n arrsize = handle.size // np.dtype(np.intp).itemsize\n hostarray = np.zeros(arrsize, dtype=np.intp)\n cuda.driver.device_to_host(\n hostarray, deviceptr, size=handle.size,\n )\n handle.close()\n return hostarray\n\n core_ipc_handle_test(the_work, result_queue)\n\n\ndef staged_ipc_array_test(ipcarr, device_num, result_queue):\n try:\n with cuda.gpus[device_num]:\n with ipcarr as darr:\n arr = darr.copy_to_host()\n try:\n # should fail to reopen\n with ipcarr:\n pass\n except ValueError as e:\n if str(e) != 'IpcHandle is already opened':\n raise AssertionError('invalid exception message')\n else:\n raise AssertionError('did not raise on reopen')\n # Catch any exception so we can propagate it\n except: # noqa: E722\n # FAILED. propagate the exception as a string\n succ = False\n out = traceback.format_exc()\n else:\n # OK. send the ndarray back\n succ = True\n out = arr\n result_queue.put((succ, out))\n\n\n@skip_under_cuda_memcheck('Hangs cuda-memcheck')\n@skip_on_cudasim('Ipc not available in CUDASIM')\nclass TestIpcStaged(ContextResettingTestCase):\n def test_staged(self):\n # prepare data for IPC\n arr = np.arange(10, dtype=np.intp)\n devarr = cuda.to_device(arr)\n\n # spawn new process for testing\n mpctx = mp.get_context('spawn')\n result_queue = mpctx.Queue()\n\n # create IPC handle\n ctx = cuda.current_context()\n ipch = ctx.get_ipc_handle(devarr.gpu_data)\n # pickle\n buf = pickle.dumps(ipch)\n ipch_recon = pickle.loads(buf)\n self.assertIs(ipch_recon.base, None)\n self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))\n self.assertEqual(ipch_recon.size, ipch.size)\n\n # Test on every CUDA devices\n for device_num in range(len(cuda.gpus)):\n args = (ipch, device_num, result_queue)\n proc = mpctx.Process(target=staged_ipc_handle_test, args=args)\n proc.start()\n succ, out = result_queue.get()\n proc.join(3)\n if not succ:\n self.fail(out)\n else:\n np.testing.assert_equal(arr, out)\n\n def test_ipc_array(self):\n for device_num in range(len(cuda.gpus)):\n # prepare data for IPC\n arr = np.random.random(10)\n devarr = cuda.to_device(arr)\n ipch = devarr.get_ipc_handle()\n\n # spawn new process for testing\n ctx = mp.get_context('spawn')\n result_queue = ctx.Queue()\n args = (ipch, device_num, result_queue)\n proc = ctx.Process(target=staged_ipc_array_test, args=args)\n proc.start()\n succ, out = result_queue.get()\n proc.join(3)\n if not succ:\n self.fail(out)\n else:\n np.testing.assert_equal(arr, out)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.zeros",
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.random.random",
"numpy.dtype"
]
] |
emschimmel/BrainPi
|
[
"96c0ccb727507bcce4d48292f77bb80a77164a0c"
] |
[
"1IntegrationTests/py-impl/PythonEyePiClient.py"
] |
[
"#!/usr/bin/env python\n\n# not used in this project.\n\nimport sys\nsys.path.append('../gen-py')\n\nfrom EyePi.ttypes import EyePiInput\nfrom EyePi.ttypes import ConfirmInput\nfrom GenericStruct.ttypes import ActionEnum\nfrom WeatherPi.ttypes import WeatherInput\n\nfrom ConnectionHelpers.DeviceRegistrator import DeviceRegistrator\nfrom ConnectionHelpers.ConnectEyePi import ConnectEyePi\n\nfrom thrift import Thrift\nimport cv2\nimport os.path\nimport random # test\nimport numpy as np\nimport pickle\n\nsys.path.append('../../')\nimport config\n\n### test\ndef read_image():\n root, dirs, files=next(os.walk(config.file_path))\n imageCollection=list(filter(lambda filename:filename.endswith('.jpg'), files))\n imageCollection+=list(filter(lambda filename:filename.endswith('.png'), files))\n return random.choice(imageCollection)\n### end test\n\n\ntry:\n ## mock! ###\n # normally a device would properly register itself and keep the token.\n # But in development case, the cahce is resetted every time. This mock registers the device.\n device_token = DeviceRegistrator().register_device()\n ### end mock ###\n\n input = EyePiInput()\n filename = config.file_path +read_image()\n print('image == '+filename)\n file = open(filename, 'rb')\n readfile = file.read()\n\n nparr = np.fromstring(readfile, np.uint8)\n image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n input.image = pickle.dumps(obj=image, protocol=None, fix_imports=False)\n\n actions = dict()\n weather_input = WeatherInput()\n weather_input.location = 'Amsterdam,nl'\n actionParameter = pickle.dumps(obj=weather_input, protocol=None, fix_imports=False)\n actions[ActionEnum.WEATHER] = actionParameter\n input.action = actions\n #parameter = GenericObject()\n #parameter.stringValue = \"%s\" % 'Amsterdam,nl'\n\n input.deviceToken = device_token\n #input.action = ActionEnum.WEATHER\n #input.actionParameters = parameter\n\n output = ConnectEyePi().handleRequest(input)\n print(output)\n if output.ok:\n for face in output.personCollection:\n confirm_input = ConfirmInput()\n confirm_input.image = face.image\n confirm_input.person = face.person\n ConnectEyePi().confimFace(confirm_input)\nexcept Thrift.TException as tx:\n print(\"%s\" % (tx.message))\n"
] |
[
[
"numpy.fromstring"
]
] |
PratyushTripathy/unet
|
[
"bb2e552d4a239df62ad3840a9f1176437790df90"
] |
[
"data.py"
] |
[
"from __future__ import print_function\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport numpy as np \nimport os\nimport glob\nimport skimage.io as io\nimport skimage.transform as trans\n\nSky = [128,128,128]\nBuilding = [128,0,0]\nPole = [192,192,128]\nRoad = [128,64,128]\nPavement = [60,40,222]\nTree = [128,128,0]\nSignSymbol = [192,128,128]\nFence = [64,64,128]\nCar = [64,0,128]\nPedestrian = [64,64,0]\nBicyclist = [0,128,192]\nUnlabelled = [0,0,0]\n\nCOLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement,\n Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])\n\n\ndef adjustData(img,mask,flag_multi_class,num_class):\n if(flag_multi_class):\n img = img / 255\n mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0]\n new_mask = np.zeros(mask.shape + (num_class,))\n for i in range(num_class):\n #for one pixel in the image, find the class in mask and convert it into one-hot vector\n #index = np.where(mask == i)\n #index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i)\n #new_mask[index_mask] = 1\n new_mask[mask == i,i] = 1\n new_mask = np.reshape(new_mask,(new_mask.shape[0],new_mask.shape[1]*new_mask.shape[2],new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask,(new_mask.shape[0]*new_mask.shape[1],new_mask.shape[2]))\n mask = new_mask\n elif(np.max(img) > 1):\n img = img / 255\n mask = mask /255\n mask[mask > 0.5] = 1\n mask[mask <= 0.5] = 0\n return (img,mask)\n\ndef trainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = \"grayscale\",\n mask_color_mode = \"grayscale\",image_save_prefix = \"image\",mask_save_prefix = \"mask\",\n flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (256,256),seed = 1):\n image_datagen = ImageDataGenerator(**aug_dict)\n mask_datagen = ImageDataGenerator(**aug_dict)\n \n image_generator = image_datagen.flow_from_directory(\n train_path,\n classes = [image_folder],\n class_mode = None,\n color_mode = image_color_mode,\n target_size = target_size,\n batch_size = batch_size,\n save_to_dir = save_to_dir,\n save_prefix = image_save_prefix,\n seed = seed)\n \n mask_generator = mask_datagen.flow_from_directory(\n train_path,\n classes = [mask_folder],\n class_mode = None,\n color_mode = mask_color_mode,\n target_size = target_size,\n batch_size = batch_size,\n save_to_dir = save_to_dir,\n save_prefix = mask_save_prefix,\n seed = seed)\n \n train_generator = zip(image_generator, mask_generator)\n \n for (img, mask) in train_generator:\n img,mask = adjustData(img,mask,flag_multi_class,num_class)\n yield (img, mask) \n \n\ndef testGenerator(test_path,num_image = 30,target_size = (256,256),flag_multi_class = False,as_gray = True):\n for i in range(num_image):\n img = io.imread(os.path.join(test_path,\"%d.png\"%i),as_gray = as_gray)\n img = img / 255\n img = trans.resize(img,target_size)\n img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img\n img = np.reshape(img,(1,)+img.shape)\n yield img\n\ndef geneTrainNpy(image_path,mask_path,flag_multi_class = False,num_class = 2,image_prefix = \"image\",mask_prefix = \"mask\",image_as_gray = True,mask_as_gray = True):\n image_name_arr = glob.glob(os.path.join(image_path,\"%s*.png\"%image_prefix))\n image_arr = []\n mask_arr = []\n for index,item in enumerate(image_name_arr):\n img = io.imread(item,as_gray = image_as_gray)\n img = np.reshape(img,img.shape + (1,)) if image_as_gray else img\n mask = io.imread(item.replace(image_path,mask_path).replace(image_prefix,mask_prefix),as_gray = mask_as_gray)\n mask = np.reshape(mask,mask.shape + (1,)) if mask_as_gray else mask\n img,mask = adjustData(img,mask,flag_multi_class,num_class)\n image_arr.append(img)\n mask_arr.append(mask)\n image_arr = np.array(image_arr)\n mask_arr = np.array(mask_arr)\n return image_arr,mask_arr\n\n\ndef labelVisualize(num_class,color_dict,img):\n img = img[:,:,0] if len(img.shape) == 3 else img\n img_out = np.zeros(img.shape + (3,))\n for i in range(num_class):\n img_out[img == i,:] = color_dict[i]\n return img_out / 255\n\n\n\ndef saveResult(save_path,npyfile,flag_multi_class = False,num_class = 2):\n for i,item in enumerate(npyfile):\n img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0]\n io.imsave(os.path.join(save_path,\"%d_predict.png\"%i),img)\n"
] |
[
[
"numpy.max",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.array",
"numpy.reshape",
"numpy.zeros"
]
] |
navjotts/flax
|
[
"5ffd0006701e4b162ae906c4f089553600d3114c"
] |
[
"examples/pixelcnn/train.py"
] |
[
"# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PixelCNN++ example.\"\"\"\n\n# See issue #620.\n# pytype: disable=wrong-keyword-args\n\nimport functools\nimport datetime\n\nfrom absl import logging\nfrom flax import jax_utils\nfrom flax import optim\nimport input_pipeline\nimport pixelcnn\nfrom flax.metrics import tensorboard\nfrom flax.training import checkpoints\nfrom flax.training import common_utils\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport ml_collections\nimport tensorflow as tf\n\n\ndef get_summary_writers(workdir):\n current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')\n log_dir = workdir + '/log/' + current_time\n train_log_dir = log_dir + '/train'\n eval_log_dir = log_dir + '/eval'\n train_summary_writer = tensorboard.SummaryWriter(train_log_dir)\n eval_summary_writer = tensorboard.SummaryWriter(eval_log_dir)\n return train_summary_writer, eval_summary_writer\n\n\ndef model(config: ml_collections.ConfigDict, **kwargs):\n return pixelcnn.PixelCNNPP(\n depth=config.n_resnet,\n features=config.n_feature,\n logistic_components=config.n_logistic_mix,\n **kwargs)\n\n\ndef neg_log_likelihood_loss(nn_out, images):\n # The log-likelihood in bits per pixel-channel\n means, inv_scales, logit_weights = (\n pixelcnn.conditional_params_from_outputs(nn_out, images))\n log_likelihoods = pixelcnn.logprob_from_conditional_params(\n images, means, inv_scales, logit_weights)\n return -jnp.mean(log_likelihoods) / (jnp.log(2) * np.prod(images.shape[-3:]))\n\n\ndef train_step(config: ml_collections.ConfigDict, learning_rate_fn, optimizer,\n ema, batch, dropout_rng):\n \"\"\"Perform a single training step.\"\"\"\n\n def loss_fn(params):\n \"\"\"loss function used for training.\"\"\"\n pcnn_out = model(\n config,\n dropout_p=config.dropout_rate).apply({'params': params},\n batch['image'],\n rngs={'dropout': dropout_rng},\n train=True)\n return neg_log_likelihood_loss(pcnn_out, batch['image'])\n\n lr = learning_rate_fn(optimizer.state.step)\n grad_fn = jax.value_and_grad(loss_fn)\n loss, grad = grad_fn(optimizer.target)\n grad = jax.lax.pmean(grad, 'batch')\n optimizer = optimizer.apply_gradient(grad, learning_rate=lr)\n\n # Compute exponential moving average (aka Polyak decay)\n ema_decay = config.polyak_decay\n ema = jax.tree_multimap(lambda ema, p: ema * ema_decay + (1 - ema_decay) * p,\n ema, optimizer.target)\n\n metrics = {'loss': jax.lax.pmean(loss, 'batch'), 'learning_rate': lr}\n return optimizer, ema, metrics\n\n\ndef eval_step(config, params, batch):\n images = batch['image']\n pcnn_out = model(config).apply({'params': params}, images, train=False)\n return {\n 'loss': jax.lax.pmean(neg_log_likelihood_loss(pcnn_out, images), 'batch')\n }\n\n\ndef load_and_shard_tf_batch(xs):\n local_device_count = jax.local_device_count()\n\n def _prepare(x):\n # Use _numpy() for zero-copy conversion between TF and NumPy.\n x = x._numpy() # pylint: disable=protected-access\n return x.reshape((local_device_count, -1) + x.shape[1:])\n\n return jax.tree_map(_prepare, xs)\n\n\ndef restore_checkpoint(workdir: str, optimizer, ema):\n return checkpoints.restore_checkpoint(workdir, (optimizer, ema))\n\n\ndef save_checkpoint(workdir: str, optimizer, ema, step):\n optimizer, ema = jax_utils.unreplicate((optimizer, ema))\n checkpoints.save_checkpoint(workdir, (optimizer, ema), step, keep=3)\n\n\ndef train_and_evaluate(config: ml_collections.ConfigDict, workdir: str):\n \"\"\"Runs a training and evaluation loop.\n\n Args:\n config: Configuration to use.\n workdir: Working directory for checkpoints and TF summaries. If this\n contains checkpoint training will be resumed from the latest checkpoint.\n \"\"\"\n tf.io.gfile.makedirs(workdir)\n\n batch_size = config.batch_size\n n_devices = jax.device_count()\n if jax.process_count() > 1:\n raise ValueError('PixelCNN++ example should not be run on more than 1 host'\n ' (for now)')\n if batch_size % n_devices > 0:\n raise ValueError('Batch size must be divisible by the number of devices')\n\n train_summary_writer, eval_summary_writer = get_summary_writers(workdir)\n # Load dataset\n data_source = input_pipeline.DataSource(config)\n train_ds = data_source.train_ds\n eval_ds = data_source.eval_ds\n steps_per_epoch = data_source.ds_info.splits[\n 'train'].num_examples // config.batch_size\n # Create dataset batch iterators\n train_iter = iter(train_ds)\n num_train_steps = train_ds.cardinality().numpy()\n steps_per_checkpoint = 1000\n\n # Create the model using data-dependent initialization. Don't shard the init\n # batch.\n assert config.init_batch_size <= batch_size\n init_batch = next(train_iter)['image']._numpy()[:config.init_batch_size]\n\n rng = jax.random.PRNGKey(config.seed)\n rng, init_rng, dropout_rng = jax.random.split(rng, 3)\n\n initial_variables = model(config).init(\n {\n 'params': init_rng,\n 'dropout': dropout_rng\n }, init_batch, train=False)['params']\n optimizer_def = optim.Adam(beta1=0.95, beta2=0.9995)\n optimizer = optimizer_def.create(initial_variables)\n\n optimizer, ema = restore_checkpoint(workdir, optimizer, initial_variables)\n ema = initial_variables\n step_offset = int(optimizer.state.step)\n\n optimizer, ema = jax_utils.replicate((optimizer, ema))\n\n # Learning rate schedule\n learning_rate_fn = lambda step: config.learning_rate * config.lr_decay**step\n\n # pmap the train and eval functions\n p_train_step = jax.pmap(\n functools.partial(train_step, config, learning_rate_fn),\n axis_name='batch')\n p_eval_step = jax.pmap(\n functools.partial(eval_step, config=config), axis_name='batch')\n\n # Gather metrics\n train_metrics = []\n\n for step, batch in zip(range(step_offset, num_train_steps), train_iter):\n # Load and shard the TF batch\n batch = load_and_shard_tf_batch(batch)\n\n # Generate a PRNG key that will be rolled into the batch.\n rng, step_rng = jax.random.split(rng)\n sharded_rngs = common_utils.shard_prng_key(step_rng)\n\n # Train step\n optimizer, ema, metrics = p_train_step(optimizer, ema, batch, sharded_rngs)\n train_metrics.append(metrics)\n\n # Quick indication that training is happening.\n logging.log_first_n(logging.INFO, 'Finished training step %d.', 5, step)\n\n if (step + 1) % steps_per_epoch == 0:\n epoch = step // steps_per_epoch\n # We've finished an epoch\n train_metrics = common_utils.get_metrics(train_metrics)\n # Get training epoch summary for logging\n train_summary = jax.tree_map(lambda x: x.mean(), train_metrics)\n # Send stats to Tensorboard\n for key, vals in train_metrics.items():\n for i, val in enumerate(vals):\n train_summary_writer.scalar(key, val, step - len(vals) + i + 1)\n # Reset train metrics\n train_metrics = []\n\n # Evaluation\n eval_metrics = []\n for eval_batch in eval_ds:\n # Load and shard the TF batch\n eval_batch = load_and_shard_tf_batch(eval_batch)\n # Step\n metrics = p_eval_step(ema, eval_batch)\n eval_metrics.append(metrics)\n eval_metrics = common_utils.get_metrics(eval_metrics)\n # Get eval epoch summary for logging\n eval_summary = jax.tree_map(lambda x: x.mean(), eval_metrics)\n\n # Log epoch summary\n logging.info('Epoch %d: TRAIN loss=%.6f, EVAL loss=%.6f', epoch,\n train_summary['loss'], eval_summary['loss'])\n\n eval_summary_writer.scalar('loss', eval_summary['loss'], step)\n train_summary_writer.flush()\n eval_summary_writer.flush()\n\n if (step + 1) % steps_per_checkpoint == 0 or step + 1 == num_train_steps:\n save_checkpoint(workdir, optimizer, ema, step)\n"
] |
[
[
"numpy.prod",
"tensorflow.io.gfile.makedirs"
]
] |
cdeil/pandas
|
[
"1accb6e7fdf8742e0aa304abbf1531a3b9633ce8"
] |
[
"pandas/core/tools/timedeltas.py"
] |
[
"\"\"\"\ntimedelta support tools\n\"\"\"\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import NaT\nfrom pandas._libs.tslibs.timedeltas import Timedelta, parse_timedelta_unit\n\nfrom pandas.core.dtypes.common import is_list_like\nfrom pandas.core.dtypes.generic import ABCIndexClass, ABCSeries\n\nfrom pandas.core.arrays.timedeltas import sequence_to_td64ns\n\n\ndef to_timedelta(arg, unit=None, errors=\"raise\"):\n \"\"\"\n Convert argument to timedelta.\n\n Timedeltas are absolute differences in times, expressed in difference\n units (e.g. days, hours, minutes, seconds). This method converts\n an argument from a recognized timedelta format / value into\n a Timedelta type.\n\n Parameters\n ----------\n arg : str, timedelta, list-like or Series\n The data to be converted to timedelta. The character M by itself,\n e.g. '1M', is treated as minute, not month. The characters Y and y\n are treated as the mean length of the Gregorian calendar year -\n 365.2425 days or 365 days 5 hours 49 minutes 12 seconds.\n unit : str, optional\n Denotes the unit of the arg for numeric `arg`. Defaults to ``\"ns\"``.\n\n Possible values:\n\n * 'W'\n * 'D' / 'days' / 'day'\n * 'hours' / 'hour' / 'hr' / 'h'\n * 'm' / 'minute' / 'min' / 'minutes' / 'T'\n * 'S' / 'seconds' / 'sec' / 'second'\n * 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L'\n * 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U'\n * 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N'\n\n .. versionchanged:: 1.1.0\n\n Must not be specified when `arg` context strings and\n ``errors=\"raise\"``.\n\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n - If 'raise', then invalid parsing will raise an exception.\n - If 'coerce', then invalid parsing will be set as NaT.\n - If 'ignore', then invalid parsing will return the input.\n\n Returns\n -------\n timedelta64 or numpy.array of timedelta64\n Output type returned if parsing succeeded.\n\n See Also\n --------\n DataFrame.astype : Cast argument to a specified dtype.\n to_datetime : Convert argument to datetime.\n convert_dtypes : Convert dtypes.\n\n Examples\n --------\n Parsing a single string to a Timedelta:\n\n >>> pd.to_timedelta('1 days 06:05:01.00003')\n Timedelta('1 days 06:05:01.000030')\n >>> pd.to_timedelta('15.5us')\n Timedelta('0 days 00:00:00.000015500')\n\n Parsing a list or array of strings:\n\n >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])\n TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT],\n dtype='timedelta64[ns]', freq=None)\n\n Converting numbers by specifying the `unit` keyword argument:\n\n >>> pd.to_timedelta(np.arange(5), unit='s')\n TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02',\n '0 days 00:00:03', '0 days 00:00:04'],\n dtype='timedelta64[ns]', freq=None)\n >>> pd.to_timedelta(np.arange(5), unit='d')\n TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq=None)\n \"\"\"\n if unit is not None:\n unit = parse_timedelta_unit(unit)\n\n if errors not in (\"ignore\", \"raise\", \"coerce\"):\n raise ValueError(\"errors must be one of 'ignore', 'raise', or 'coerce'}\")\n\n if unit in {\"Y\", \"y\", \"M\"}:\n raise ValueError(\n \"Units 'M', 'Y', and 'y' are no longer supported, as they do not \"\n \"represent unambiguous timedelta values durations.\"\n )\n\n if arg is None:\n return arg\n elif isinstance(arg, ABCSeries):\n values = _convert_listlike(arg._values, unit=unit, errors=errors)\n return arg._constructor(values, index=arg.index, name=arg.name)\n elif isinstance(arg, ABCIndexClass):\n return _convert_listlike(arg, unit=unit, errors=errors, name=arg.name)\n elif isinstance(arg, np.ndarray) and arg.ndim == 0:\n # extract array scalar and process below\n arg = arg.item()\n elif is_list_like(arg) and getattr(arg, \"ndim\", 1) == 1:\n return _convert_listlike(arg, unit=unit, errors=errors)\n elif getattr(arg, \"ndim\", 1) > 1:\n raise TypeError(\n \"arg must be a string, timedelta, list, tuple, 1-d array, or Series\"\n )\n\n if isinstance(arg, str) and unit is not None:\n raise ValueError(\"unit must not be specified if the input is/contains a str\")\n\n # ...so it must be a scalar value. Return scalar.\n return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors)\n\n\ndef _coerce_scalar_to_timedelta_type(r, unit=\"ns\", errors=\"raise\"):\n \"\"\"Convert string 'r' to a timedelta object.\"\"\"\n try:\n result = Timedelta(r, unit)\n except ValueError:\n if errors == \"raise\":\n raise\n elif errors == \"ignore\":\n return r\n\n # coerce\n result = NaT\n\n return result\n\n\ndef _convert_listlike(arg, unit=None, errors=\"raise\", name=None):\n \"\"\"Convert a list of objects to a timedelta index object.\"\"\"\n if isinstance(arg, (list, tuple)) or not hasattr(arg, \"dtype\"):\n # This is needed only to ensure that in the case where we end up\n # returning arg (errors == \"ignore\"), and where the input is a\n # generator, we return a useful list-like instead of a\n # used-up generator\n arg = np.array(list(arg), dtype=object)\n\n try:\n value = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0]\n except ValueError:\n if errors == \"ignore\":\n return arg\n else:\n # This else-block accounts for the cases when errors='raise'\n # and errors='coerce'. If errors == 'raise', these errors\n # should be raised. If errors == 'coerce', we shouldn't\n # expect any errors to be raised, since all parsing errors\n # cause coercion to pd.NaT. However, if an error / bug is\n # introduced that causes an Exception to be raised, we would\n # like to surface it.\n raise\n\n from pandas import TimedeltaIndex\n\n value = TimedeltaIndex(value, unit=\"ns\", name=name)\n return value\n"
] |
[
[
"pandas._libs.tslibs.timedeltas.parse_timedelta_unit",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.arrays.timedeltas.sequence_to_td64ns",
"pandas._libs.tslibs.timedeltas.Timedelta",
"pandas.TimedeltaIndex"
]
] |
novabiosignals/novainstrumentation
|
[
"02d29983f970077613143db66d8df2bce59f8714"
] |
[
"novainstrumentation/tools.py"
] |
[
"import pylab as pl\r\nimport numpy as np\r\nfrom os import path\r\nfrom numpy import abs, linspace, sin, pi, int16\r\nimport pandas\r\n\r\n\r\ndef plotfft(s, fmax, doplot=False):\r\n \"\"\" This functions computes the fft of a signal, returning the frequency\r\n and their magnitude values.\r\n\r\n Parameters\r\n ----------\r\n s: array-like\r\n the input signal.\r\n fmax: int\r\n the sampling frequency.\r\n doplot: boolean\r\n a variable to indicate whether the plot is done or not.\r\n\r\n Returns\r\n -------\r\n f: array-like\r\n the frequency values (xx axis)\r\n fs: array-like\r\n the amplitude of the frequency values (yy axis)\r\n \"\"\"\r\n\r\n fs = abs(np.fft.fft(s))\r\n f = linspace(0, fmax / 2, len(s) / 2)\r\n if doplot:\r\n pl.plot(f[1:len(s) / 2], fs[1:len(s) / 2])\r\n return (f[1:len(s) / 2].copy(), fs[1:len(s) / 2].copy())\r\n\r\n\r\ndef synthbeats2(duration, meanhr=60, stdhr=1, samplingfreq=250):\r\n #Minimaly based on the parameters from:\r\n #http://physionet.cps.unizar.es/physiotools/ecgsyn/Matlab/ecgsyn.m\r\n #Inputs: duration in seconds\r\n #Returns: signal, peaks\r\n\r\n ibi = 60 / float(meanhr) * samplingfreq\r\n\r\n sibi = ibi - 60 / (float(meanhr) - stdhr) * samplingfreq\r\n\r\n peaks = np.arange(0, duration * samplingfreq, ibi)\r\n\r\n peaks[1:] = peaks[1:] + np.random.randn(len(peaks) - 1) * sibi\r\n\r\n if peaks[-1] >= duration * samplingfreq:\r\n peaks = peaks[:-1]\r\n peaks = peaks.astype('int')\r\n signal = np.zeros(duration * samplingfreq)\r\n signal[peaks] = 1.0\r\n\r\n return signal, peaks\r\n\r\n\r\ndef synthbeats(duration, meanhr=60, stdhr=1, samplingfreq=250, sinfreq=None):\r\n #Minimaly based on the parameters from:\r\n #http://physionet.cps.unizar.es/physiotools/ecgsyn/Matlab/ecgsyn.m\r\n #If freq exist it will be used to generate a sin instead of using rand\r\n #Inputs: duration in seconds\r\n #Returns: signal, peaks\r\n\r\n t = np.arange(duration * samplingfreq) / float(samplingfreq)\r\n signal = np.zeros(len(t))\r\n\r\n print(len(t))\r\n print(len(signal))\r\n\r\n if sinfreq == None:\r\n\r\n npeaks = 1.2 * (duration * meanhr / 60)\r\n # add 20% more beats for some cummulative error\r\n hr = pl.randn(npeaks) * stdhr + meanhr\r\n peaks = pl.cumsum(60. / hr) * samplingfreq\r\n peaks = peaks.astype('int')\r\n peaks = peaks[peaks < t[-1] * samplingfreq]\r\n\r\n else:\r\n hr = meanhr + sin(2 * pi * t * sinfreq) * float(stdhr)\r\n index = int(60. / hr[0] * samplingfreq)\r\n peaks = []\r\n while index < len(t):\r\n peaks += [index]\r\n index += int(60. / hr[index] * samplingfreq)\r\n\r\n signal[peaks] = 1.0\r\n\r\n return t, signal, peaks\r\n\r\n\r\ndef load_with_cache(file_, recache=False, sampling=1,\r\n columns=None, temp_dir='.', data_type='int16'):\r\n \"\"\"@brief This function loads a file from the current directory and saves\r\n the cached file to later executions. It's also possible to make a recache\r\n or a subsampling of the signal and choose only a few columns of the signal,\r\n to accelerate the opening process.\r\n\r\n @param file String: the name of the file to open.\r\n @param recache Boolean: indication whether it's done recache or not\r\n (default = false).\r\n @param sampling Integer: the sampling step. if 1, the signal isn't\r\n sampled (default = 1).\r\n @param columns Array-Like: the columns to read from the file. if None,\r\n all columns are considered (default = None).\r\n\r\n @return data Array-Like: the data from the file.\r\n TODO: Should save cache in a different directory\r\n TODO: Create test function and check size of generated files\r\n TODO: receive a file handle\r\n \"\"\"\r\n\r\n \r\n \r\n cfile = '%s.npy' % file_\r\n\r\n if (not path.exists(cfile)) or recache:\r\n if columns == None:\r\n data = np.loadtxt(file_)[::sampling, :]\r\n else:\r\n data = np.loadtxt(file_)[::sampling, columns]\r\n\r\n np.save(cfile, data.astype(data_type))\r\n else:\r\n data = np.load(cfile)\r\n return data\r\n\r\n\r\ndef load_data(filename):\r\n \"\"\"\r\n :rtype : numpy matrix\r\n \"\"\"\r\n data = pandas.read_csv(filename, header=None, delimiter='\\t', skiprows=9)\r\n return data.as_matrix()\r\n \r\n\r\n"
] |
[
[
"numpy.sin",
"numpy.zeros",
"numpy.load",
"numpy.fft.fft",
"numpy.loadtxt",
"numpy.arange",
"pandas.read_csv"
]
] |
VoIlAlex/pytorchresearch
|
[
"c4f08cd0ec6b78788e682005c099aef4582640cb"
] |
[
"docs/example_1/back.py"
] |
[
"import pytorchresearch as ptr\n\n\nimport torch\nimport torchvision\n\nif __name__ == \"__main__\":\n # transform for data\n transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225)\n )\n ])\n\n # dataloaders\n trainset = torchvision.datasets.CIFAR100(root='./data/datasets', train=True,\n download=True, transform=transform)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=32,\n shuffle=True, num_workers=2)\n testset = torchvision.datasets.CIFAR100(root='./data/datasets', train=False,\n download=True, transform=transform)\n testloader = torch.utils.data.DataLoader(testset, batch_size=32,\n shuffle=False, num_workers=2)\n\n # model specific stuff\n model = torchvision.models.MobileNetV2(num_classes=100)\n if torch.cuda.is_available():\n model = model.cuda()\n optimizer = torch.optim.SGD(\n params=model.parameters(),\n lr=0.001,\n momentum=0.9\n )\n criterion = torch.nn.CrossEntropyLoss()\n\n # MAGIC GOES HERE\n\n research = ptr.ModelResearch(\n research_path='.temp',\n research_scheme=[\n ptr.ModelConfigurationItem(),\n ptr.CurrentIterationItem(print_end=' ', iteration_modulo=10),\n ptr.LossPrintItem(iteration_modulo=10),\n ptr.LossVisualizationItem(iteration_modulo=10)\n ],\n model=model,\n optimizer=optimizer,\n criterion=criterion,\n watch_test=False\n )\n\n research.start_research_session(\n trainloader, testloader, epochs=1, iteration_modulo=20)\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
]
] |
litchiar/ArknightsAutoHelper
|
[
"55cc4c751ba899aaa3cabe4222687a6ef930f267"
] |
[
"addons/start_sp_stage/__init__.py"
] |
[
"import logging\nimport os\nimport random\nimport time\nfrom functools import lru_cache\n\nimport cv2\nimport numpy as np\n\nimport imgreco.main\nfrom Arknights.helper import logger\nfrom addons.activity import ActivityAddOn, get_stage_map\nfrom addons.base import BaseAddOn, pil2cv, crop_cv_by_rect, show_img\nfrom addons.common_cache import load_game_data\nfrom imgreco.ocr.cnocr import ocr_and_correct\n\nicon1 = cv2.imread(os.path.join(os.path.realpath(os.path.dirname(__file__)), 'icon1.png'), cv2.IMREAD_GRAYSCALE)\nicon2 = cv2.imread(os.path.join(os.path.realpath(os.path.dirname(__file__)), 'icon2.png'), cv2.IMREAD_GRAYSCALE)\n\n\n@lru_cache(maxsize=1)\ndef get_activity_infos():\n return load_game_data('activity_table')['basicInfo']\n\n\n@lru_cache()\ndef get_available_activity(display_type=None):\n activity_infos = get_activity_infos()\n name_set = set()\n for aid, info in activity_infos.items():\n if info.get('displayType') in {'SIDESTORY', 'BRANCHLINE'}:\n if info['displayType'] == 'BRANCHLINE' or info.get('isReplicate'):\n raw_name = info['name'][:-3] if info.get('isReplicate') else info['name']\n if display_type is None or display_type == info['displayType']:\n name_set.add(raw_name)\n return name_set\n\n\ndef get_activity_name(activity):\n name = activity['name']\n if activity['isReplicate']:\n return name[:-3]\n return name\n\n\ndef crop_image_only_outside(gray_img, raw_img, threshold=128, padding=3):\n mask = gray_img > threshold\n m, n = gray_img.shape\n mask0, mask1 = mask.any(0), mask.any(1)\n col_start, col_end = mask0.argmax(), n - mask0[::-1].argmax()\n row_start, row_end = mask1.argmax(), m - mask1[::-1].argmax()\n return raw_img[row_start - padding:row_end + padding, col_start - padding:col_end + padding]\n\n\nclass StartSpStageAddon(BaseAddOn):\n def __init__(self, helper=None):\n super(StartSpStageAddon, self).__init__(helper)\n self.scale = self.helper.viewport[1] / 720\n if self.helper.viewport != (1280, 720):\n logger.warning('It may produce some weird effects when the resolution is not 1280x720.')\n\n def apply_scale(self, value):\n if self.scale == 1:\n return value\n return int(value * self.scale)\n\n def run(self, stage_code: str, repeat_times: int = 1000, try_current_activity=True):\n stage_code = stage_code.upper()\n if try_current_activity:\n try:\n return ActivityAddOn(self.helper).run(stage_code, repeat_times)\n except:\n pass\n stage_code_map, zone_linear_map = get_stage_map()\n if stage_code not in stage_code_map:\n raise RuntimeError(f'无效的关卡: {stage_code}')\n stage = stage_code_map[stage_code]\n activity_id = stage['zoneId'].split('_')[0]\n activity_infos = get_activity_infos()\n activity = activity_infos[activity_id]\n logger.debug(f'stage: {stage}, activity: {activity}')\n self.enter_activity(activity)\n stage_linear = zone_linear_map[stage['zoneId']]\n self.helper.find_and_tap_stage_by_ocr(None, stage_code, stage_linear)\n return self.helper.module_battle_slim(None, repeat_times)\n\n def enter_activity(self, activity):\n vh = self.vh\n act_name = get_activity_name(activity)\n if act_name not in get_available_activity():\n raise RuntimeError(f'无效的活动: {act_name}')\n self.open_terminal()\n if activity['displayType'] == 'BRANCHLINE':\n self.tap_branch_line()\n else:\n self.tap_side_story()\n crop_flag = activity['displayType'] == 'SIDESTORY'\n act_pos_map = self.get_all_act_pos(crop_flag)\n if act_name not in act_pos_map:\n if activity['displayType'] == 'BRANCHLINE':\n raise RuntimeError(f'找不到相应活动: {act_name}')\n last_acts = act_pos_map.keys()\n while True:\n origin_x = random.randint(int(5.833 * vh), int(24.861 * vh))\n origin_y = random.randint(int(57.222 * vh), int(77.917 * vh))\n move = -random.randint(int(vh // 5), int(vh // 4))\n self.helper.adb.touch_swipe2((origin_x, origin_y),\n (random.randint(-20, 20), move), random.randint(900, 1200))\n act_pos_map = self.get_all_act_pos(crop_flag)\n if act_name in act_pos_map:\n break\n if last_acts == act_pos_map.keys():\n raise RuntimeError(f'找不到相应活动: {act_name}')\n last_acts = act_pos_map.keys()\n logger.info(f'switch to {act_name}')\n self.click(act_pos_map[act_name], 1)\n self.tap_enter_activity()\n\n def tap_back(self):\n vw, vh = self.vw, self.vh\n self.helper.tap_rect((2.222 * vh, 1.944 * vh, 22.361 * vh, 8.333 * vh))\n time.sleep(0.5)\n\n def get_all_act_pos(self, crop=False):\n act_map = {}\n screen = self.screenshot()\n cv_screen = pil2cv(screen)\n for icon in [icon1, icon2]:\n act_map.update(self.get_act_pos_by_icon(cv_screen, icon, crop))\n logger.info(act_map)\n return act_map\n\n def get_act_pos_by_icon(self, cv_screen, icon, crop=False):\n vh, vw = self.vh, self.vw\n raw_screen = cv_screen.copy()\n if self.scale != 1:\n cv_screen = cv2.resize(cv_screen, (int(self.helper.viewport[0] / self.scale), 720))\n roi = crop_cv_by_rect(cv_screen, (0, 0, 10.000 * vh, 100.000 * vh))\n roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)\n result = cv2.matchTemplate(roi, icon, cv2.TM_CCOEFF_NORMED)\n loc = np.where(result >= 0.8)\n tag_set = set()\n tag_set2 = set()\n res = {}\n dbg_screen = raw_screen.copy()\n available_activity = get_available_activity()\n for pt in zip(*loc[::-1]):\n pos_key = (pt[0] // 100, pt[1] // 100)\n pos_key2 = (int(pt[0] / 100 + 0.5), int(pt[1] / 100 + 0.5))\n if pos_key in tag_set or pos_key2 in tag_set2:\n continue\n tag_set.add(pos_key)\n tag_set2.add(pos_key2)\n if icon1 is icon:\n x, y = (int(pt[0]) + 35, int(pt[1]) - 6)\n tw, th = map(self.apply_scale, (180, 40))\n else:\n x, y = (int(pt[0]) + 35, int(pt[1]) - 3)\n tw, th = map(self.apply_scale, (150, 30))\n l, t = map(self.apply_scale, (x, y))\n tag_img = raw_screen[t:t + th, l:l + tw]\n if crop:\n gray_tag = cv2.cvtColor(tag_img, cv2.COLOR_RGB2GRAY)\n tag_img = crop_image_only_outside(gray_tag, tag_img, 160)\n factor = 2.5 - self.scale\n if factor > 1:\n # print(factor)\n tag_img = cv2.resize(tag_img, (0, 0), fx=factor, fy=factor, interpolation=cv2.INTER_LINEAR)\n # show_img(tag_img)\n # conv-lite-fc has better accuracy, but it is slower than densenet-lite-fc.\n name = ocr_and_correct(tag_img, available_activity, model_name='densenet-lite-fc', log_level=logging.INFO)\n if name:\n res[name] = (int(l + 85 * self.scale), int(t + 20 * self.scale))\n cv2.rectangle(dbg_screen, (l, t), (l + tw, t + th), (255, 255, 0), 2)\n # show_img(dbg_screen)\n return res\n\n def tap_side_story(self):\n vh, vw = self.vh, self.vw\n logger.info('open side story view')\n self.helper.tap_rect((44.297 * vw, 88.611 * vh, 56.406 * vw, 98.750 * vh))\n time.sleep(1)\n\n def tap_branch_line(self):\n logger.info('open branch line view')\n vh, vw = self.vh, self.vw\n self.helper.tap_rect((29.375 * vw, 88.611 * vh, 41.719 * vw, 98.750 * vh))\n time.sleep(1)\n\n def tap_enter_activity(self):\n logger.info('enter activity')\n vh, vw = self.vh, self.vw\n self.helper.tap_rect((100 * vw - 24.583 * vh, 69.167 * vh, 100 * vw - 8.750 * vh, 75.556 * vh))\n time.sleep(1)\n\n def open_terminal(self):\n self.helper.back_to_main()\n logger.info('open terminal')\n self.helper.tap_quadrilateral(imgreco.main.get_ballte_corners(self.screenshot()))\n time.sleep(1)\n\n\nif __name__ == '__main__':\n StartSpStageAddon().run('CB-10', 0, False)\n # StartSpStageAddon().get_all_act_pos()\n"
] |
[
[
"numpy.where"
]
] |
Nnemr/PRNet
|
[
"9176a943377f925d56ce361a47eb0957afa13edc"
] |
[
"get_300WLP_maps.py"
] |
[
"''' \nGenerate uv position map of 300W_LP.\n'''\nimport os, sys\nimport numpy as np\nimport scipy.io as sio\nimport random as ran\nfrom skimage.transform import SimilarityTransform\nfrom skimage import io, util\nimport skimage.transform\nfrom time import time\nimport cv2\nimport matplotlib.pyplot as plt\nsys.path.append('..')\nimport face3d\nfrom face3d import mesh\nfrom face3d.morphable_model import MorphabelModel\n\ndef process_uv(uv_coords, uv_h = 256, uv_w = 256):\n uv_coords[:,0] = uv_coords[:,0]*(uv_w - 1)\n uv_coords[:,1] = uv_coords[:,1]*(uv_h - 1)\n uv_coords[:,1] = uv_h - uv_coords[:,1] - 1\n uv_coords = np.hstack((uv_coords, np.zeros((uv_coords.shape[0], 1)))) # add z\n return uv_coords\n\ndef run_posmap_300W_LP(bfm, image_path, mat_path, save_folder, uv_h = 256, uv_w = 256, image_h = 256, image_w = 256):\n # 1. load image and fitted parameters\n image_name = image_path.strip().split('/')[-1]\n image = io.imread(image_path)/255;\n [h, w, c] = image.shape;\n \n info = sio.loadmat(mat_path);\n pose_para = info['Pose_Para'].T.astype(np.float32);\n shape_para = info['Shape_Para'].astype(np.float32);\n exp_para = info['Exp_Para'].astype(np.float32);\n # 2. generate mesh;\n # generate shape\n vertices = bfm.generate_vertices(shape_para, exp_para);\n # transform mesh\n s = pose_para[-1, 0];\n angles = pose_para[:3, 0];\n t = pose_para[3:6, 0];\n transformed_vertices = bfm.transform_3ddfa(vertices, s, angles, t)\n projected_vertices = transformed_vertices.copy() # using stantard camera & orth projection as in 3DDFA\n image_vertices = projected_vertices.copy()\n image_vertices[:,1] = h - image_vertices[:,1] - 1\n\n # 3. crop image with key points\n kpt = image_vertices[bfm.kpt_ind, :].astype(np.int32)\n left = np.min(kpt[:, 0])\n right = np.max(kpt[:, 0])\n top = np.min(kpt[:, 1])\n bottom = np.max(kpt[:, 1])\n center = np.array([right - (right - left) / 2.0, \n bottom - (bottom - top) / 2.0])\n old_size = (right - left + bottom - top)/2\n size = int(old_size*1.5)\n # random pertube. you can change the numbers\n marg = old_size*0.1\n t_x = np.random.rand()*marg*2 - marg\n t_y = np.random.rand()*marg*2 - marg\n center[0] = center[0]+t_x;\n center[1] = center[1]+t_y\n size = size*(np.random.rand()*0.2 + 0.9)\n\n # crop and record the transform parameters\n src_pts = np.array([[center[0]-size/2, center[1]-size/2], [center[0] - size/2, center[1]+size/2], [center[0]+size/2, center[1]-size/2]]);\n DST_PTS = np.array([[0, 0], [0, image_h - 1], [image_w - 1, 0]]);\n tform = skimage.transform.estimate_transform('similarity', src_pts, DST_PTS);\n # transform face position(image vertices) along with 2d facial image \n angle = np.random.rand() * 90 - 45;\n rows, cols = image.shape[0], image.shape[1];\n # rotation around center\n center = np.array((cols, rows)) / 2. - 0.5;\n tform1 = SimilarityTransform(translation=center);\n tform2 = SimilarityTransform(rotation=np.deg2rad(angle));\n tform3 = SimilarityTransform(translation=-center);\n rotate_transform = tform3 + tform2 + tform1;\n tform = rotate_transform + tform;\n opt = ran.randint(1,2);\n cropped_image = skimage.transform.warp(image, tform.inverse, output_shape=(image_h, image_w));\n position = image_vertices.copy()\n position[:, 2] = 1\n position = np.dot(position, tform.params.T)\n position[:, 2] = image_vertices[:, 2]*tform.params[0, 0] # scale z\n position[:, 2] = position[:, 2] - np.min(position[:, 2]) # translate z\n # 4. uv position map: render position in uv space\n uv_position_map = mesh.render.render_colors(uv_coords, bfm.full_triangles, position, uv_h, uv_w, c = 3)\n #cv2.imshow('image', cropped_image);\n #cv2.waitKey(0);\n #cv2.destroyAllWindows();\n # 5. save files\n io.imsave('{}\\{}'.format(save_folder, image_name), np.squeeze(cropped_image));\n np.save('{}\\{}'.format(save_folder, image_name.replace('jpg', 'npy')), uv_position_map);\n io.imsave('{}\\{}'.format('results/uv_maps/', image_name.replace('.jpg', '_posmap.jpg')), (uv_position_map)/abs(uv_position_map.max())); # only for show\n #cv2.imwrite(image_name[:-4]+'_posmap.jpg',uv_position_map);\n # --verify\n #import cv2\n uv_texture_map_rec = cv2.remap(cropped_image, uv_position_map[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR,borderMode= cv2.BORDER_CONSTANT,borderValue=(0));\n #io.imsave('{}\\{}'.format(save_folder, image_name.replace('.jpg', '_tex.jpg')), np.squeeze(uv_texture_map_rec)); #Save fitted face on position map (texture).\n\n\nif __name__ == '__main__':\n save_folder = 'results/'\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n # set para\n uv_h = uv_w = 256\n image_h = image_w = 256;\n # load uv coords\n global uv_coords\n uv_coords = face3d.morphable_model.load.load_uv_coords('Data/BFM/Out/BFM_UV.mat') \n uv_coords = process_uv(uv_coords, uv_h, uv_w)\n '''\n Save LFPW Testing as well, only the first 8.\n '''\n # load bfm \n bfm = MorphabelModel('Data/BFM/Out/BFM.mat') \n # run\n content = [];\n print('Running');\n s =0;\n f=0;\n types = ['AFW', 'HELEN', 'LFPW', 'IBUG', 'LFPW_Test'];\n for i in types:\n print(i);\n with open(i+'_Data.txt', 'r') as fileRead:\n content = [file.rstrip('\\n') for file in fileRead];\n s=0;\n print(len(content));\n for filename in content:\n #print(filename)\n #if(s==8 and i is 'LFPW_Test'):\n # break\n if(s%500 ==0):\n print(str(s) +'/' +str(len(content)))\n image_path = 'Data/BFM/300W_LP/'+ i+'/'+filename+'.jpg';\n mat_path = 'Data/BFM/300W_LP/'+i+'/'+filename+'.mat';\n if(i is 'LFPW_Test'):\n image_path = 'Data/BFM/300W_LP/'+ 'LFPW'+'/'+filename+'.jpg';\n mat_path = 'Data/BFM/300W_LP/'+'LFPW'+'/'+filename+'.mat';\n run_posmap_300W_LP(bfm, image_path, mat_path, save_folder)\n s+=1; \n print(s+f)\n print(f)\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.dot",
"numpy.random.rand",
"numpy.zeros",
"scipy.io.loadmat",
"numpy.min",
"numpy.deg2rad",
"numpy.squeeze"
]
] |
lannguyen0910/theseus
|
[
"5c08fb2f4a9c7ffa395788e6a0ade43780e8bd7d"
] |
[
"theseus/classification/trainer/trainer.py"
] |
[
"import torch\nfrom torchvision.transforms import functional as TFF\nimport matplotlib.pyplot as plt\nfrom theseus.base.trainer.supervised_trainer import SupervisedTrainer\nfrom theseus.utilities.loading import load_state_dict\nfrom theseus.classification.utilities.gradcam import CAMWrapper, show_cam_on_image\nfrom theseus.utilities.visualization.visualizer import Visualizer\nfrom theseus.utilities.analysis.analyzer import ClassificationAnalyzer\n\nfrom theseus.utilities.loggers.observer import LoggerObserver\nLOGGER = LoggerObserver.getLogger(\"main\")\n\nclass ClassificationTrainer(SupervisedTrainer):\n \"\"\"Trainer for classification tasks\n \n \"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def check_best(self, metric_dict):\n \"\"\"\n Hook function, called after metrics are calculated\n \"\"\"\n if metric_dict['bl_acc'] > self.best_value:\n if self.iters > 0: # Have been training, else in evaluation-only mode or just sanity check\n LOGGER.text(\n f\"Evaluation improved from {self.best_value} to {metric_dict['bl_acc']}\",\n level=LoggerObserver.INFO)\n self.best_value = metric_dict['bl_acc']\n self.save_checkpoint('best')\n \n else:\n if self.visualize_when_val:\n self.visualize_pred()\n\n def save_checkpoint(self, outname='last'):\n \"\"\"\n Save all information of the current iteration\n \"\"\"\n weights = {\n 'model': self.model.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'iters': self.iters,\n 'best_value': self.best_value,\n }\n\n if self.scaler is not None:\n weights[self.scaler.state_dict_key] = self.scaler.state_dict()\n \n self.checkpoint.save(weights, outname)\n\n def load_checkpoint(self, path:str):\n \"\"\"\n Load all information the current iteration from checkpoint \n \"\"\"\n LOGGER.text(\"Loading checkpoints...\", level=LoggerObserver.INFO)\n state_dict = torch.load(path, map_location='cpu')\n self.iters = load_state_dict(self.iters, state_dict, 'iters')\n self.best_value = load_state_dict(self.best_value, state_dict, 'best_value') \n self.scaler = load_state_dict(self.scaler, state_dict, self.scaler.state_dict_key)\n\n \n def visualize_gt(self):\n \"\"\"\n Visualize dataloader for sanity check \n \"\"\"\n\n LOGGER.text(\"Visualizing dataset...\", level=LoggerObserver.DEBUG)\n visualizer = Visualizer()\n\n # Train batch\n batch = next(iter(self.trainloader))\n images = batch[\"inputs\"]\n\n batch = []\n for idx, inputs in enumerate(images):\n img_show = visualizer.denormalize(inputs)\n img_cam = TFF.to_tensor(img_show)\n batch.append(img_cam)\n grid_img = visualizer.make_grid(batch)\n\n fig = plt.figure(figsize=(8,8))\n plt.axis('off')\n plt.imshow(grid_img)\n plt.tight_layout(pad=0)\n LOGGER.log([{\n 'tag': \"Sanitycheck/batch/train\",\n 'value': fig,\n 'type': LoggerObserver.FIGURE,\n 'kwargs': {\n 'step': self.iters\n }\n }])\n\n # Validation batch\n batch = next(iter(self.valloader))\n images = batch[\"inputs\"]\n\n batch = []\n for idx, inputs in enumerate(images):\n img_show = visualizer.denormalize(inputs)\n img_cam = TFF.to_tensor(img_show)\n batch.append(img_cam)\n grid_img = visualizer.make_grid(batch)\n\n fig = plt.figure(figsize=(8,8))\n plt.axis('off')\n plt.imshow(grid_img)\n plt.tight_layout(pad=0)\n\n LOGGER.log([{\n 'tag': \"Sanitycheck/batch/val\",\n 'value': fig,\n 'type': LoggerObserver.FIGURE,\n 'kwargs': {\n 'step': self.iters\n }\n }])\n\n\n @torch.enable_grad() #enable grad for CAM\n def visualize_pred(self):\n r\"\"\"Visualize model prediction and CAM\n \n \"\"\"\n # Vizualize Grad Class Activation Mapping and model predictions\n LOGGER.text(\"Visualizing model predictions...\", level=LoggerObserver.DEBUG)\n\n visualizer = Visualizer()\n\n batch = next(iter(self.valloader))\n images = batch[\"inputs\"]\n targets = batch[\"targets\"]\n\n self.model.eval()\n\n model_name = self.model.model.name\n grad_cam = CAMWrapper.get_method(\n name='gradcam', \n model=self.model.model.get_model(), \n model_name=model_name, use_cuda=next(self.model.parameters()).is_cuda)\n\n grayscale_cams, label_indices, scores = grad_cam(images, return_probs=True)\n \n gradcam_batch = []\n pred_batch = []\n for idx in range(len(grayscale_cams)):\n image = images[idx]\n target = targets[idx].item()\n label = label_indices[idx]\n grayscale_cam = grayscale_cams[idx, :]\n score = scores[idx]\n\n img_show = visualizer.denormalize(image)\n visualizer.set_image(img_show)\n if self.valloader.dataset.classnames is not None:\n label = self.valloader.dataset.classnames[label]\n target = self.valloader.dataset.classnames[target]\n\n if label == target:\n color = [0,1,0]\n else:\n color = [1,0,0]\n\n visualizer.draw_label(\n f\"GT: {target}\\nP: {label}\\nC: {score:.4f}\", \n fontColor=color, \n fontScale=0.8,\n thickness=2,\n outline=None,\n offset=100\n )\n \n img_cam =show_cam_on_image(img_show, grayscale_cam, use_rgb=True)\n\n img_cam = TFF.to_tensor(img_cam)\n gradcam_batch.append(img_cam)\n\n pred_img = visualizer.get_image()\n pred_img = TFF.to_tensor(pred_img)\n pred_batch.append(pred_img)\n\n if idx == 63: # limit number of images\n break\n \n # GradCAM images\n gradcam_grid_img = visualizer.make_grid(gradcam_batch)\n fig = plt.figure(figsize=(8,8))\n plt.imshow(gradcam_grid_img)\n plt.axis(\"off\")\n plt.tight_layout(pad=0)\n LOGGER.log([{\n 'tag': \"Validation/gradcam\",\n 'value': fig,\n 'type': LoggerObserver.FIGURE,\n 'kwargs': {\n 'step': self.iters\n }\n }])\n\n # Prediction images\n pred_grid_img = visualizer.make_grid(pred_batch)\n fig = plt.figure(figsize=(10,10))\n plt.imshow(pred_grid_img)\n plt.axis(\"off\")\n plt.tight_layout(pad=0)\n LOGGER.log([{\n 'tag': \"Validation/prediction\",\n 'value': fig,\n 'type': LoggerObserver.FIGURE,\n 'kwargs': {\n 'step': self.iters\n }\n }])\n\n # Zeroing gradients in optimizer for safety\n self.optimizer.zero_grad()\n\n @torch.no_grad()\n def visualize_model(self):\n # Vizualize Model Graph\n LOGGER.text(\"Visualizing architecture...\", level=LoggerObserver.DEBUG)\n\n batch = next(iter(self.valloader))\n images = batch[\"inputs\"].to(self.model.device)\n LOGGER.log([{\n 'tag': \"Sanitycheck/analysis/architecture\",\n 'value': self.model.model.get_model(),\n 'type': LoggerObserver.TORCH_MODULE,\n 'kwargs': {\n 'inputs': images\n }\n }])\n\n def analyze_gt(self):\n \"\"\"\n Perform simple data analysis\n \"\"\"\n LOGGER.text(\"Analyzing datasets...\", level=LoggerObserver.DEBUG)\n analyzer = ClassificationAnalyzer()\n analyzer.add_dataset(self.trainloader.dataset)\n fig = analyzer.analyze(figsize=(10,5))\n LOGGER.log([{\n 'tag': \"Sanitycheck/analysis/train\",\n 'value': fig,\n 'type': LoggerObserver.FIGURE,\n 'kwargs': {\n 'step': self.iters\n }\n }])\n\n analyzer = ClassificationAnalyzer()\n analyzer.add_dataset(self.valloader.dataset)\n fig = analyzer.analyze(figsize=(10,5))\n LOGGER.log([{\n 'tag': \"Sanitycheck/analysis/val\",\n 'value': fig,\n 'type': LoggerObserver.FIGURE,\n 'kwargs': {\n 'step': self.iters\n }\n }])\n\n def on_evaluate_end(self):\n if self.visualize_when_val:\n self.visualize_pred()\n self.save_checkpoint()\n \n def on_start(self):\n if self.resume is not None:\n self.load_checkpoint(self.resume)\n\n def sanitycheck(self):\n \"\"\"Sanity check before training\n \"\"\"\n self.visualize_gt()\n self.analyze_gt()\n self.visualize_model()\n self.evaluate_epoch()\n"
] |
[
[
"torch.enable_grad",
"torch.no_grad",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"torch.load",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
] |
microsoft/DistributedBERT
|
[
"e6245fee4d7123466a3e3b53f8afacffd6baa75f"
] |
[
"run_pretraining.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run masked LM/next sentence masked_lm pre-training for BERT.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport modeling\nimport optimization\nimport tensorflow as tf\nimport horovod.tensorflow as hvd\nfrom tensorflow.python import debug as tf_debug\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\n \"input_file\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"validation_input_file\", None,\n \"Input validation TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"input_dir\", None,\n \"Input TF example dir.\")\n\nflags.DEFINE_string(\n \"validation_input_dir\", None,\n \"Input validation TF example dir.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded. Must match data generation.\")\n\nflags.DEFINE_integer(\n \"max_predictions_per_seq\", 20,\n \"Maximum number of masked LM predictions per sequence. \"\n \"Must match data generation.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_bool(\"do_train_eval\", False, \"Whether to run train with eval.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_integer(\"num_train_steps\", 100000, \"Number of training steps.\")\n\nflags.DEFINE_integer(\"num_warmup_steps\", 10000, \"Number of warmup steps.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\"max_eval_steps\", None, \"Maximum number of eval steps.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\n\nflags.DEFINE_integer(\"hooking_frequence\", 100, \"Hooking frequence.\")\n\nflags.DEFINE_bool(\"reduce_log\", False, \"Reduce log.\")\n\nflags.DEFINE_integer(\"keep_checkpoint_max\", 1, \"Keep checkpoint max.\")\n\nflags.DEFINE_bool(\"xla\", True, \"Whether to train with XLA optimization.\")\n\nflags.DEFINE_bool(\"adjust_lr\", True, \"Whether to adjust learning_rate.\")\n\nflags.DEFINE_integer(\"previous_train_steps\", 0, \"Previous train steps.\")\n\nflags.DEFINE_integer(\"post_train_steps\", 0, \"Post train steps.\")\n\nflags.DEFINE_bool(\"use_hvd\", True, \"Whether to use Horovod.\")\n\nflags.DEFINE_bool(\"use_compression\", True, \"Whether to use compression in Horovod.\")\n\nflags.DEFINE_bool(\"use_fp16\", True, \"Whether to use fp16.\")\n\nflags.DEFINE_bool(\"cos_decay\", False, \"Whether to use cos decay.\")\n\nflags.DEFINE_bool(\"use_lamb\", False, \"Whether to use lamb.\")\n\nflags.DEFINE_bool(\"auto_recover\", False, \"Whether to use auto recover.\")\n\nflags.DEFINE_string(\"recover_dir\", None, \"The output directory where the model checkpoints will be recovered.\")\n\nflags.DEFINE_integer(\"ckpt_no\", None, \"Checkpoint number of model to be recovered.\")\n\nflags.DEFINE_integer(\"ckpt_no_input\", None, \"Checkpoint number of input to be recovered.\")\n\nflags.DEFINE_bool(\"clip\", False, \"Whether to use clip.\")\n\nflags.DEFINE_bool(\"profile\", False, \"Whether to use profile.\")\n\n\ndef model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings, adjust_lr, use_hvd,\n use_compression, use_fp16, clip, cos_decay,\n use_lamb, previous_train_steps, post_train_steps):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n next_sentence_labels = features[\"next_sentence_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n compute_type=tf.float16 if use_fp16 else tf.float32)\n\n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), model.get_embedding_table(),\n masked_lm_positions, masked_lm_ids, masked_lm_weights, clip)\n\n (next_sentence_loss, next_sentence_example_loss,\n next_sentence_log_probs) = get_next_sentence_output(\n bert_config, model.get_pooled_output(), next_sentence_labels, clip)\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op, update_learning_rate = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, adjust_lr, use_hvd,\n use_compression, use_fp16, clip, cos_decay, use_lamb, previous_train_steps, post_train_steps)\n\n logging_hook = tf.train.LoggingTensorHook({\"loss\": total_loss, \"learning_rate\": update_learning_rate}, every_n_iter=FLAGS.hooking_frequence)\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n training_hooks=[logging_hook])\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n next_sentence_log_probs = tf.reshape(\n next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n next_sentence_predictions = tf.argmax(\n next_sentence_log_probs, axis=-1, output_type=tf.int32)\n next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n next_sentence_accuracy = tf.metrics.accuracy(\n labels=next_sentence_labels, predictions=next_sentence_predictions)\n next_sentence_mean_loss = tf.metrics.mean(\n values=next_sentence_example_loss)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n \"next_sentence_accuracy\": next_sentence_accuracy,\n \"next_sentence_loss\": next_sentence_mean_loss,\n }\n\n eval_metrics = metric_fn(\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels\n )\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics)\n else:\n raise ValueError(\"Only TRAIN and EVAL modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn\n\n\ndef get_masked_lm_output(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights, clip):\n \"\"\"Get loss and log probs for the masked LM.\"\"\"\n input_tensor = gather_indexes(input_tensor, positions)\n\n with tf.variable_scope(\"cls/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=bert_config.hidden_size,\n activation=modeling.get_activation(bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n bert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n if clip:\n log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6))\n else:\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n label_ids = tf.reshape(label_ids, [-1])\n label_weights = tf.reshape(label_weights, [-1])\n\n one_hot_labels = tf.one_hot(\n label_ids, depth=bert_config.vocab_size, dtype=tf.float32)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_example_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n loss = numerator / denominator\n\n return (loss, per_example_loss, log_probs)\n\n\ndef get_next_sentence_output(bert_config, input_tensor, labels, clip):\n \"\"\"Get loss and log probs for the next sentence prediction.\"\"\"\n\n # Simple binary classification. Note that 0 is \"next sentence\" and 1 is\n # \"random sentence\". This weight matrix is not used after pre-training.\n with tf.variable_scope(\"cls/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n shape=[2, bert_config.hidden_size],\n initializer=modeling.create_initializer(bert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\", shape=[2], initializer=tf.zeros_initializer())\n\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n if clip:\n log_probs = tf.log(tf.clip_by_value(tf.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6))\n else:\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n labels = tf.reshape(labels, [-1])\n one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, per_example_loss, log_probs)\n\n\ndef gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor\n\n\ndef input_fn_builder(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4,\n batch_size=None,\n use_hvd=True):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n # batch_size = params[\"batch_size\"]\n\n name_to_features = {\n \"input_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"input_mask\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"segment_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"masked_lm_positions\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_ids\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_weights\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.float32),\n \"next_sentence_labels\":\n tf.FixedLenFeature([1], tf.int64),\n }\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n\n if use_hvd:\n d = d.shard(hvd.size(), hvd.rank()) #TODO only for Horovod, shard to mimic single_GPU = False\n print(\"Data shard: %s %s\" % (hvd.size(), hvd.rank()))\n\n d = d.repeat()\n d = d.shuffle(buffer_size=len(input_files))\n\n # `cycle_length` is the number of parallel files that get read.\n cycle_length = min(num_cpu_threads, len(input_files))\n\n # `sloppy` mode means that the interleaving is not exact. This adds\n # even more randomness to the training pipeline.\n d = d.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset,\n sloppy=is_training,\n cycle_length=cycle_length))\n d = d.shuffle(buffer_size=100)\n else:\n d = tf.data.TFRecordDataset(input_files)\n # Since we evaluate for a fixed number of steps we don't want to encounter\n # out-of-range exceptions.\n # d = d.repeat()\n\n # We must `drop_remainder` on training because the TPU requires fixed\n # size dimensions. For eval, we assume we are evaluating on the CPU or GPU\n # and we *don't* want to drop the remainder, otherwise we wont cover\n # every sample.\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n num_parallel_batches=num_cpu_threads,\n drop_remainder=True))\n return d\n\n return input_fn\n\n\ndef _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n if FLAGS.use_hvd:\n hvd.init()\n\n if FLAGS.reduce_log and (hvd.rank() != 0):\n tf.logging.set_verbosity(tf.logging.ERROR)\n\n FLAGS.output_dir = FLAGS.output_dir if hvd.rank() == 0 else os.path.join(FLAGS.output_dir, str(hvd.rank()))\n\n if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_train_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n if FLAGS.recover_dir is not None:\n if FLAGS.use_hvd:\n FLAGS.recover_dir = FLAGS.recover_dir if hvd.rank() == 0 else os.path.join(FLAGS.recover_dir, str(hvd.rank()))\n path_ckpt = os.path.join(FLAGS.output_dir, \"checkpoint\")\n path_ckpt_input = os.path.join(FLAGS.output_dir, \"checkpoint_input\")\n\n if FLAGS.ckpt_no is not None and not tf.gfile.Exists(path_ckpt):\n with tf.gfile.GFile(path_ckpt, \"w\") as writer:\n writer.write('model_checkpoint_path: \"%s-%s\"\\n' % (os.path.join(FLAGS.recover_dir, \"model.ckpt\"), str(FLAGS.ckpt_no)))\n writer.write('all_model_checkpoint_paths: \"%s-%s\"\\n' % (os.path.join(FLAGS.recover_dir, \"model.ckpt\"), str(FLAGS.ckpt_no)))\n\n if FLAGS.ckpt_no_input is not None and not tf.gfile.Exists(path_ckpt_input):\n with tf.gfile.GFile(path_ckpt_input, \"w\") as writer:\n writer.write('model_checkpoint_path: \"%s-%s\"\\n' % (os.path.join(FLAGS.recover_dir, \"input.ckpt\"), str(FLAGS.ckpt_no_input)))\n writer.write('all_model_checkpoint_paths: \"%s-%s\"\\n' % (os.path.join(FLAGS.recover_dir, \"input.ckpt\"), str(FLAGS.ckpt_no_input)))\n\n if FLAGS.use_hvd and hvd.rank() == 0 and (FLAGS.do_train or FLAGS.do_train_eval):\n (cpath, cname) = os.path.split(FLAGS.bert_config_file)\n tf.gfile.Copy(FLAGS.bert_config_file, os.path.join(FLAGS.output_dir, cname), True)\n\n input_files = []\n if FLAGS.input_file is not None:\n for input_pattern in FLAGS.input_file.split(\",\"):\n input_files.extend(tf.gfile.Glob(input_pattern))\n if FLAGS.input_dir is not None:\n for filename in tf.gfile.ListDirectory(FLAGS.input_dir):\n input_files.extend(tf.gfile.Glob(os.path.join(FLAGS.input_dir, filename)))\n\n tf.logging.info(\"*** Input Files ***\")\n for input_file in input_files:\n tf.logging.info(\" %s\" % input_file)\n\n validation_input_files = []\n if FLAGS.validation_input_file is None and FLAGS.validation_input_dir is None:\n validation_input_files = input_files\n else:\n if FLAGS.validation_input_file is not None:\n for input_pattern in FLAGS.validation_input_file.split(\",\"):\n validation_input_files.extend(tf.gfile.Glob(input_pattern))\n if FLAGS.validation_input_dir is not None:\n for filename in tf.gfile.ListDirectory(FLAGS.validation_input_dir):\n validation_input_files.extend(tf.gfile.Glob(os.path.join(FLAGS.validation_input_dir, filename)))\n\n tf.logging.info(\"*** Input Validation Files ***\")\n for input_file in validation_input_files:\n tf.logging.info(\" %s\" % input_file)\n\n config = tf.ConfigProto()\n if FLAGS.xla:\n config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1\n if FLAGS.use_hvd:\n config.gpu_options.visible_device_list = str(hvd.local_rank())\n config.gpu_options.allow_growth=True\n\n run_config = tf.estimator.RunConfig(\n model_dir=FLAGS.output_dir,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n log_step_count_steps=FLAGS.hooking_frequence,\n session_config=config)\n\n if FLAGS.use_hvd and hvd.rank() != 0 and not FLAGS.auto_recover:\n run_config = tf.estimator.RunConfig(\n model_dir=FLAGS.output_dir,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n save_checkpoints_steps=None,\n save_checkpoints_secs=None,\n log_step_count_steps=FLAGS.hooking_frequence,\n session_config=config)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=FLAGS.num_train_steps,\n num_warmup_steps=FLAGS.num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu,\n adjust_lr=FLAGS.adjust_lr,\n use_hvd=FLAGS.use_hvd,\n use_compression=FLAGS.use_compression,\n use_fp16=FLAGS.use_fp16,\n clip=FLAGS.clip,\n cos_decay=FLAGS.cos_decay,\n use_lamb=FLAGS.use_lamb,\n previous_train_steps=FLAGS.previous_train_steps,\n post_train_steps=FLAGS.post_train_steps)\n\n hooks = []\n\n if FLAGS.use_hvd:\n hooks.append(hvd.BroadcastGlobalVariablesHook(0))\n\n if hvd.rank() == -1: #if debug, set 0\n CLIDebugHook = tf_debug.LocalCLIDebugHook(ui_type='readline')\n CLIDebugHook.add_tensor_filter(\"has_inf_or_nan\", tf_debug.has_inf_or_nan)\n hooks.append(CLIDebugHook)\n\n if FLAGS.profile and hvd.rank() == 0:\n ProfilerHook = tf.train.ProfilerHook(save_steps=FLAGS.hooking_frequence, output_dir=FLAGS.output_dir, show_dataflow=True, show_memory=True)\n hooks.append(ProfilerHook)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config)\n\n if FLAGS.do_train:\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n train_input_fn = input_fn_builder(\n input_files=input_files,\n max_seq_length=FLAGS.max_seq_length,\n max_predictions_per_seq=FLAGS.max_predictions_per_seq,\n is_training=True,\n batch_size=FLAGS.train_batch_size,\n use_hvd=FLAGS.use_hvd)\n\n if FLAGS.auto_recover:\n hooks.append(tf.data.experimental.CheckpointInputPipelineHook(estimator))\n\n estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps, hooks=hooks)\n\n if FLAGS.do_eval:\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n eval_input_fn = input_fn_builder(\n input_files=validation_input_files,\n max_seq_length=FLAGS.max_seq_length,\n max_predictions_per_seq=FLAGS.max_predictions_per_seq,\n is_training=False,\n batch_size=FLAGS.eval_batch_size,\n use_hvd=FLAGS.use_hvd)\n\n result = estimator.evaluate(\n input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)\n\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n with tf.gfile.GFile(output_eval_file, \"w\") as writer:\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n tf.logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n if FLAGS.do_train_eval:\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n train_input_fn = input_fn_builder(\n input_files=input_files,\n max_seq_length=FLAGS.max_seq_length,\n max_predictions_per_seq=FLAGS.max_predictions_per_seq,\n is_training=True,\n batch_size=FLAGS.train_batch_size,\n use_hvd=FLAGS.use_hvd)\n\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n eval_input_fn = input_fn_builder(\n input_files=validation_input_files,\n max_seq_length=FLAGS.max_seq_length,\n max_predictions_per_seq=FLAGS.max_predictions_per_seq,\n is_training=False,\n batch_size=FLAGS.eval_batch_size,\n use_hvd=FLAGS.use_hvd)\n\n if FLAGS.auto_recover:\n hooks.append(tf.data.experimental.CheckpointInputPipelineHook(estimator))\n\n train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps, hooks=hooks)\n eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\n\nif __name__ == \"__main__\":\n # flags.mark_flag_as_required(\"input_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n"
] |
[
[
"tensorflow.data.TFRecordDataset",
"tensorflow.python.debug.LocalCLIDebugHook",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.metrics.mean",
"tensorflow.nn.softmax",
"tensorflow.train.ProfilerHook",
"tensorflow.one_hot",
"tensorflow.parse_single_example",
"tensorflow.trainable_variables",
"tensorflow.flags.DEFINE_string",
"tensorflow.estimator.TrainSpec",
"tensorflow.FixedLenFeature",
"tensorflow.argmax",
"tensorflow.logging.info",
"tensorflow.ConfigProto",
"tensorflow.gfile.MakeDirs",
"tensorflow.estimator.RunConfig",
"tensorflow.variable_scope",
"tensorflow.constant",
"tensorflow.nn.bias_add",
"tensorflow.app.run",
"tensorflow.nn.log_softmax",
"tensorflow.logging.set_verbosity",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.range",
"tensorflow.train.LoggingTensorHook",
"tensorflow.gfile.Glob",
"tensorflow.gfile.Exists",
"tensorflow.estimator.EvalSpec",
"tensorflow.gfile.GFile",
"tensorflow.metrics.accuracy",
"tensorflow.reduce_sum",
"tensorflow.data.experimental.CheckpointInputPipelineHook",
"tensorflow.train.init_from_checkpoint",
"tensorflow.to_int32",
"tensorflow.zeros_initializer",
"tensorflow.contrib.data.parallel_interleave",
"tensorflow.train.Scaffold",
"tensorflow.estimator.train_and_evaluate",
"tensorflow.gather",
"tensorflow.estimator.Estimator",
"tensorflow.gfile.ListDirectory",
"tensorflow.reduce_mean"
]
] |
TristanHehnen/propti
|
[
"4aa52ea5f47a1497df6ae4e6f61cdfc680cce69a",
"4aa52ea5f47a1497df6ae4e6f61cdfc680cce69a"
] |
[
"propti/propti_pre_processing.py",
"propti/spotpy_wrapper.py"
] |
[
"import re\nimport os\nimport sys\nimport shutil as sh\nimport logging\n\nimport propti as pr\n\nimport statistics as stat\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom scipy import interpolate\nimport scipy.signal as sign\nfrom scipy.stats import norm\nimport matplotlib as mpl\nmpl.use('pdf')\n\nimport matplotlib.pyplot as plt\n\n\nsetups = None # type: pr.SimulationSetupSet\nops = None # type: pr.ParameterSet\noptimiser = None # type: pr.OptimiserProperties\n\n\n# This function takes a Pandas data frame and a list with header labels.\n# Based on the header labels it looks for the shortest column. Afterwards\n# it takes the smallest and largest values of the provided columns (per\n# line) and collects them one list, each.\ndef calculate_min_mean_max_lists(data_frame, header_list):\n # Initialise the lists.\n list_min = []\n list_mean = []\n list_max = []\n\n # Determine the length of the shortest column.\n min_len_list = []\n\n # for column in range(len(header_list)):\n #\n # a = len(data_frame[header_list[column]])\n # min_len_list.append(a)\n\n for column in header_list:\n\n a = len(data_frame[column])\n min_len_list.append(a)\n\n min_len = min(min_len_list)\n\n # Iterate over all provided columns for the shortest length.\n for column in range(min_len):\n\n # Iterate over the columns by line and collect min and max values\n # in separate lists.\n interm_list = []\n for line in range(len(header_list)):\n interm_list.append(data_frame[header_list[line]][column])\n\n list_max.append(max(interm_list))\n list_mean.append(np.mean(interm_list))\n list_min.append(min(interm_list))\n\n return min_len, list_min, list_mean, list_max\n\n\ndef savgol_filter(x_values):\n filtered_data = sign.savgol_filter(x_values,\n 37,\n 3,\n deriv=0,\n delta=1.0,\n axis=-1,\n mode='interp',\n cval=0.0)\n return filtered_data\n\n\ndef interpolate_lists(raw_lists, x_increment=1, window=21, poly_order=3,\n new_data_file='proc_data',\n plot_file_name='average_smooth_plot',\n plot_title='Averaged and Sav-Gol smoothed',\n x_label='x label',\n y_label='y label'):\n\n n_lists = range(len(raw_lists))\n\n x_max_collection = []\n for i in n_lists:\n x_max_collection.append(max(raw_lists[i][0]))\n\n # Determine the length of the shortest data series to fit the other to it.\n x_min = int(min(x_max_collection))\n print('max col: {}'.format(x_max_collection))\n print('x_min: {}'.format(int(x_min)))\n x_new = np.arange(0, x_min, x_increment)\n\n # Interpolate each data series to fit to the same x-values.\n interpolated_data = [x_new]\n for i in n_lists:\n f = interpolate.interp1d(raw_lists[i][0], raw_lists[i][1])\n y_new = f(x_new)\n interpolated_data.append(y_new)\n\n # Calculate the average over all lists per x-value.\n data_mean = []\n data_median = []\n for i in range(len(interpolated_data[0])):\n data_to_be_averaged = []\n for j in n_lists[0:]:\n new_element = interpolated_data[j+1][i]\n data_to_be_averaged.append(new_element)\n\n element_mean = stat.mean(data_to_be_averaged)\n element_median = stat.median(data_to_be_averaged)\n\n data_mean.append(element_mean)\n data_median.append(element_median)\n\n # Smoothing of the new data, using Savitzky-Golay filter.\n data_smoothed = sign.savgol_filter(data_mean,\n window,\n poly_order)\n\n d1 = sign.savgol_filter(data_median,\n window,\n poly_order)\n processed_data1 = [x_new, d1]\n\n processed_data = [x_new, data_smoothed]\n\n # Create Pandas DataFrame with the new values and save them as CSV.\n proc1 = np.vstack((x_new, data_smoothed))\n proc2 = pd.DataFrame.from_records(proc1.transpose(),\n columns=['newx', 'newy']).set_index('newx')\n proc2.to_csv('{}.csv'.format(new_data_file))\n print(proc2.head())\n\n fig = plt.figure()\n plt.title(plot_title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n for i in n_lists:\n plt.plot(raw_lists[i][0], raw_lists[i][1],\n color='gray', label='Raw')\n\n plt.plot(processed_data[0], processed_data[1],\n color='black', label='Processed mean')\n\n plt.plot(processed_data1[0], processed_data1[1],\n color='red', label='Processed median', linestyle='--')\n\n plt.grid()\n plt.legend(loc='best')\n plt.savefig(plot_file_name)\n plt.close(fig)\n\n return interpolated_data\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"import logging\nimport sys\nimport os\nimport shutil\nimport numpy as np\nimport tempfile\nimport os.path\nfrom pathlib import Path\nimport spotpy\n\nfrom .data_structures import Parameter, ParameterSet, SimulationSetup, \\\n SimulationSetupSet, Relation, OptimiserProperties\n\nfrom .basic_functions import create_input_file, run_simulations, \\\n extract_simulation_data\n\n\n####################\n# SPOTPY SETUP CLASS\n\nclass SpotpySetup(object):\n def __init__(self,\n params: ParameterSet,\n setups: SimulationSetupSet,\n optimiser: OptimiserProperties):\n\n self.setups = setups\n self.params = params\n self.optimiser = optimiser\n self.spotpy_parameter = []\n\n for p in params:\n logging.debug(\"setup spotpy parameter: {}\".format(p.name))\n if p.distribution == 'uniform':\n\n optguess = None\n step = None\n if p.value is not None:\n optguess = p.value\n if p.max_increment is not None:\n step = p.max_increment\n\n cp = spotpy.parameter.Uniform(p.place_holder,\n p.min_value, p.max_value,\n step=step,\n optguess=optguess,\n minbound=p.min_value,\n maxbound=p.max_value)\n self.spotpy_parameter.append(cp)\n else:\n\n logging.error(\n 'parameter distribution function unknown: {}'.format(\n p.distribution))\n\n def parameters(self):\n return spotpy.parameter.generate(self.spotpy_parameter)\n\n def simulation(self, vector):\n logging.debug(\"current spotpy simulation vector: {}\".format(vector))\n\n # copy spotpy parameter vector to parameter set\n for i in range(len(vector)):\n self.params[i].value = vector[i]\n\n # update all simulation setup parameter sets\n for s in self.setups:\n s.model_parameter.update(self.params)\n\n # create run directories for all simulation setups\n for s in self.setups:\n if s.execution_dir_prefix:\n tmp_dir_root = s.execution_dir_prefix\n else:\n tmp_dir_root = os.path.join(os.getcwd(), s.work_dir)\n s.execution_dir = tempfile.mkdtemp(prefix='rundir_',\n dir=tmp_dir_root)\n create_input_file(s)\n\n # run all simulatons\n run_simulations(self.setups, self.optimiser.num_subprocesses)\n\n # gather simulation data\n for s in self.setups:\n logging.debug(\"start data extraction\")\n extract_simulation_data(s)\n\n\n # clean up temporary execution directories\n for s in self.setups:\n shutil.rmtree(s.execution_dir)\n\n # compute fitness values\n global_fitness_value = 0\n individual_fitness_values = list()\n\n for s in self.setups:\n for r in s.relations:\n current_fitness = r.fitness_weight * r.compute_fitness()\n global_fitness_value += current_fitness\n individual_fitness_values.append(current_fitness)\n\n # first element of returned list is the global fitness value\n # note: in general this should be the simulation data, which is returned\n # due to our data structure, the passing of the fitness values, i.e. result\n # of the objective function, is most convenient approach here\n return [global_fitness_value] + individual_fitness_values\n\n def evaluation(self):\n logging.debug(\"evaluation\")\n for s in self.setups:\n for r in s.relations:\n r.read_data(wd='.', target='experiment')\n\n # return dummy data\n # TODO: reconsider returning proper values\n return [1]\n\n def objectivefunction(self, simulation, evaluation, params):\n\n # the simulation function does not return simulation data, but directly the\n # fitness values, just pass these values\n fitness_value = simulation\n return fitness_value\n\ndef run_optimisation(params: ParameterSet,\n setups: SimulationSetupSet,\n opt: OptimiserProperties) -> ParameterSet:\n spot = SpotpySetup(params, setups, opt)\n # Check if a break file exists for restarting.\n break_file_name = Path('{}.break'.format(opt.db_name))\n break_point = 'write'\n if break_file_name.is_file():\n break_point = 'readandwrite'\n parallel = 'seq'\n if opt.mpi:\n parallel = 'mpi'\n if opt.algorithm == 'sceua':\n sampler = spotpy.algorithms.sceua(spot,\n dbname=opt.db_name,\n dbformat=opt.db_type,\n parallel=parallel,\n db_precision=np.float64,\n breakpoint=break_point,\n backup_every_rep=opt.backup_every)\n\n ngs = opt.ngs\n if not ngs:\n ngs = len(params)\n # Set amount of parameters as default for number of complexes\n # if not explicitly specified.\n opt.ngs = ngs\n results=sampler.sample(opt.repetitions, ngs=ngs, max_loop_inc=opt.max_loop_inc)\n #results=sampler.sample(opt.repetitions, ngs=ngs)\n elif opt.algorithm == 'fscabc':\n sampler = spotpy.algorithms.fscabc(spot,\n dbname=opt.db_name,\n dbformat=opt.db_type,\n parallel=parallel,\n db_precision=np.float64,\n breakpoint=break_point,\n backup_every_rep=opt.backup_every)\n eb = opt.eb\n if not eb:\n eb = 48\n # Set amount of parameters as default for number of complexes\n # if not explicitly specified.\n opt.eb = eb\n results=sampler.sample(opt.repetitions, eb=eb)\n elif opt.algorithm == 'abc':\n sampler = spotpy.algorithms.abc(spot,\n dbname=opt.db_name,\n dbformat=opt.db_type,\n parallel=parallel,\n breakpoint=break_point,\n backup_every_rep=opt.backup_every)\n eb = opt.eb\n if not eb:\n eb = 48\n # Set amount of parameters as default for number of complexes\n # if not explicitly specified.\n opt.eb = eb\n results=sampler.sample(opt.repetitions, eb=eb)\n elif opt.algorithm == 'mc':\n sampler = spotpy.algorithms.mc(spot,\n dbname=opt.db_name,\n dbformat=opt.db_type,\n parallel=parallel)\n results=sampler.sample(opt.repetitions)\n\n elif opt.algorithm == 'dream':\n sampler = spotpy.algorithms.dream(spot,\n dbname=opt.db_name,\n dbformat=opt.db_type,\n parallel=parallel)\n results=sampler.sample(opt.repetitions)\n\n elif opt.algorithm == 'demcz':\n sampler = spotpy.algorithms.demcz(spot,\n dbname=opt.db_name,\n dbformat=opt.db_type,\n alt_objfun=None,\n parallel=parallel)\n results=sampler.sample(opt.repetitions)\n elif opt.algorithm == 'mcmc':\n sampler = spotpy.algorithms.mcmc(spot,\n dbname=opt.db_name,\n dbformat=opt.db_type,\n alt_objfun=None,\n parallel=parallel)\n results=sampler.sample(opt.repetitions)\n elif opt.algorithm == 'mle':\n sampler = spotpy.algorithms.mle(spot,\n dbname=opt.db_name,\n dbformat=opt.db_type,\n parallel=parallel)\n ## breakpoint=break_point,\n ## backup_every_rep=opt.backup_every)\n results=sampler.sample(opt.repetitions)\n\n elif opt.algorithm == 'sa':\n sampler = spotpy.algorithms.sa(spot,\n dbname=opt.db_name,\n dbformat=opt.db_type,\n parallel=parallel)\n results=sampler.sample(opt.repetitions)\n elif opt.algorithm == 'rope':\n sampler = spotpy.algorithms.rope(spot,\n dbname=opt.db_name,\n dbformat=opt.db_type,\n parallel=parallel)\n results=sampler.sample(opt.repetitions)\n\n elif opt.algorithm == 'mc':\n sampler = spotpy.algorithms.mc(spot,\n dbname=opt.db_name,\n dbformat=opt.db_type,\n parallel=parallel)\n results=sampler.sample(opt.repetitions)\n\n elif opt.algorithm == 'fast':\n sampler = spotpy.algorithms.fast(spot,\n dbname=opt.db_name,\n dbformat='csv',\n parallel=parallel,\n breakpoint=break_point,\n backup_every_rep=opt.backup_every)\n results=sampler.sample(opt.repetitions)\n else:\n return(print('No valid optimization algorithm selected'))\n\n if sampler.status.optimization_direction == 'minimize':\n pars=sampler.status.params_min\n elif sampler.status.optimization_direction == 'maximize':\n pars=sampler.status.params_max\n for i in range(len(params)):\n params[i].value = pars[i]\n for s in setups:\n s.model_parameter.update(params)\n return params\n\ndef test_spotpy_setup():\n p1 = Parameter(\"density\", \"RHO\", min_value=1.0, max_value=2.4,\n distribution='uniform')\n p2 = Parameter(\"cp\", place_holder=\"CP\", min_value=4.0, max_value=7.2,\n distribution='uniform')\n\n ps = ParameterSet()\n ps.append(p1)\n ps.append(p2)\n\n spot = SpotpySetup(ps)\n\n for p in spot.parameter:\n print(p.name, p.rndargs)\n\n\ndef test_spotpy_run():\n p1 = Parameter(\"ambient temperature\", place_holder=\"TMPA\", min_value=0,\n max_value=100,\n distribution='uniform', value=0)\n\n ps = ParameterSet()\n ps.append(p1)\n\n r1 = Relation()\n r1.model[0].label_x = \"Time\"\n r1.model[0].label_y = \"TEMP\"\n r1.model[0].file_name = 'TEST_devc.csv'\n r1.model[0].header_line = 1\n\n r1.experiment[0].x = np.linspace(0, 10, 20)\n r1.experiment[0].y = np.ones_like(r1.experiment[0].x) * 42.1\n\n r1.x_def = np.linspace(3.0, 8.5, 3)\n relations = [r1]\n\n s0 = SimulationSetup(name='ambient run',\n work_dir='test_spotpy',\n model_template=os.path.join('templates',\n 'template_basic_03.fds'),\n model_executable='fds',\n relations=relations,\n model_parameter=ps\n )\n setups = SimulationSetupSet()\n setups.append(s0)\n\n for s in setups:\n if not os.path.exists(s.work_dir):\n os.mkdir(s.work_dir)\n\n run_optimisation(ps, setups)\n\n\nif __name__ == \"__main__\":\n # test_spotpy_setup()\n test_spotpy_run()\n"
] |
[
[
"matplotlib.use",
"scipy.interpolate.interp1d",
"scipy.signal.savgol_filter",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.close",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"numpy.vstack"
],
[
"numpy.linspace",
"numpy.ones_like"
]
] |
hoshigakky/shinma_bot
|
[
"23d21f7a69f8e0b33270072baeaa18a62f334469"
] |
[
"utils/opencv_util.py"
] |
[
"import logging\nimport cv2\nimport numpy as np\n\n# loggerの子loggerオブジェクトの宣言\nfrom const.constants import constant_list, FIRST_LEFT_Y, FIRST_RIGHT_Y, FIRST_LEFT_X, FIRST_RIGHT_X, SECOND_LEFT_Y, \\\n SECOND_RIGHT_Y, SECOND_LEFT_X, SECOND_RIGHT_X, TYPE_PATHS\n\nfrom const.constants import TMP_SCREEN_SHOT_PATH\n\nlogger = logging.getLogger(\"discord_bot\").getChild(__name__)\n\n# 拡大率\nRATE = 4\n\n\nclass OpenCVUtil:\n descriptors = {key: [] for key in TYPE_PATHS.keys()}\n key_points = {key: [] for key in TYPE_PATHS.keys()}\n images = {key: [] for key in TYPE_PATHS.keys()}\n\n @staticmethod\n def init_load():\n akaze = cv2.AKAZE_create()\n for path_key in TYPE_PATHS.keys():\n paths = TYPE_PATHS[path_key]\n for path in paths:\n weapon_type_img = cv2.imread(path)\n height = weapon_type_img.shape[0]\n width = weapon_type_img.shape[1]\n # 特徴量を抽出しやすいように画像を拡大\n weapon_type_img = cv2.resize(weapon_type_img, (int(width * RATE), int(height * RATE)))\n\n # 特徴量を保持\n kp1, des1 = akaze.detectAndCompute(weapon_type_img, None)\n OpenCVUtil.descriptors[path_key].append(des1)\n OpenCVUtil.key_points[path_key].append(kp1)\n OpenCVUtil.images[path_key].append(weapon_type_img)\n\n @staticmethod\n def _create_red_mask(image):\n # HSV色空間に変換\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n # 赤色のHSVの値域1\n hsv_min = np.array([0, 64, 0])\n hsv_max = np.array([30, 255, 255])\n mask1 = cv2.inRange(hsv, hsv_min, hsv_max)\n\n # 赤色のHSVの値域2\n hsv_min = np.array([150, 64, 0])\n hsv_max = np.array([179, 255, 255])\n mask2 = cv2.inRange(hsv, hsv_min, hsv_max)\n\n # 赤色領域のマスク(255:赤色、0:赤色以外)\n mask = mask1 + mask2\n\n return mask\n\n @staticmethod\n def match_weapon_type(image_path: str, server_id) -> []:\n logger.debug(\"matching start\")\n target_image = cv2.imread(image_path)\n top_match_types = OpenCVUtil._match(target_image, server_id)\n return top_match_types\n\n @staticmethod\n def _match(target_img, server_id) -> []:\n match_types = []\n rects = OpenCVUtil._find_rectangle(target_img)\n rect_img = target_img[rects[0][1]:rects[1][1], rects[0][0]:rects[1][0]]\n OpenCVUtil._debug_image_writer(\"g://\" + str(server_id) + \"/rect.png\", rect_img)\n rect_height = rect_img.shape[0]\n rect_width = rect_img.shape[1]\n first_place = rect_img[int(rect_height * FIRST_LEFT_Y):int(rect_height * FIRST_RIGHT_Y),\n int(rect_width * FIRST_LEFT_X):int(rect_width * FIRST_RIGHT_X)]\n second_place = rect_img[int(rect_height * SECOND_LEFT_Y):int(rect_height * SECOND_RIGHT_Y),\n int(rect_width * SECOND_LEFT_X):int(rect_width * SECOND_RIGHT_X)]\n\n # 特徴量を抽出しやすいように画像を拡大\n OpenCVUtil._debug_image_writer(\"g://\" + str(server_id) + \"/resize_before_firstbot.png\", first_place)\n first_place = cv2.resize(first_place, (int(first_place.shape[1] * RATE), int(first_place.shape[0] * RATE)))\n second_place = cv2.resize(second_place, (int(second_place.shape[1] * RATE), int(second_place.shape[0] * RATE)))\n OpenCVUtil._debug_image_writer(\"g://\" + str(server_id) + \"/resize_after_firstbot.png\", first_place)\n\n akaze = cv2.AKAZE_create()\n blocks = []\n for i in range(0, 4):\n concat_height = first_place.shape[0]\n # 神魔武器3つ入っているので3分割する\n concat_block_width = int(first_place.shape[1] / 4)\n # 武器1つ分の範囲で画像を分割\n blocks.append(first_place[0:concat_height, i * concat_block_width: (i + 1) * concat_block_width])\n for i in range(0, 4):\n concat_height = second_place.shape[0]\n concat_block_width = int(second_place.shape[1] / 4)\n blocks.append(second_place[0:concat_height, i * concat_block_width: (i + 1) * concat_block_width])\n\n # デバッグ用出力\n for i, block in enumerate(blocks):\n OpenCVUtil._debug_image_writer(\"g://\" + str(server_id) + \"/block_\" + str(i) + \".png\", block)\n OpenCVUtil._debug_image_writer(\"g://\" + str(server_id) + \"/firstbot.png\", first_place)\n OpenCVUtil._debug_image_writer(\"g://\" + str(server_id) + \"/secondbot.png\", second_place)\n\n bf = cv2.BFMatcher()\n for pic, block in enumerate(blocks):\n block_kp, block_des = akaze.detectAndCompute(block, None)\n more_score = 0\n more_type = \"\"\n for i, des_keys in enumerate(OpenCVUtil.descriptors.keys()):\n for des in OpenCVUtil.descriptors.get(des_keys):\n matches = bf.knnMatch(des, block_des, k=2)\n\n good = []\n for m, n in matches:\n if m.distance < 0.5 * n.distance:\n good.append([m])\n\n # debug\n # out1 = cv2.drawKeypoints(OpenCVUtil.images[i], OpenCVUtil.key_points[i], None)\n # out2 = cv2.drawKeypoints(block, block_kp, None)\n # img_kaze = cv2.drawMatchesKnn(out1, OpenCVUtil.key_points[i], out2, block_kp, good, None, flags=2)\n # OpenCVUtil._debug_image_writer(\"g://type\" + str(i) + '.png', out1)\n # OpenCVUtil._debug_image_writer('g://img_gray_debug_1' + str(pic) + str(i) + '.png', img_kaze)\n\n logger.info(str(i) + \" type : \" + str(len(good)))\n if len(good) > more_score:\n if i >= len(constant_list()):\n logger.info(constant_list()[i - len(constant_list())])\n more_score = len(good)\n more_type = constant_list()[i - len(constant_list())]\n else:\n logger.info(constant_list()[i])\n more_score = len(good)\n more_type = constant_list()[i]\n\n match_types.append(more_type)\n\n return match_types\n\n @staticmethod\n def _find_rectangle(image) -> ():\n logger.debug(\"find rectangle start\")\n image = cv2.GaussianBlur(image, (11, 11), 0)\n\n # BGRでの色抽出\n bgrLower = np.array([110, 110, 110]) # 抽出する色の下限(BGR)\n bgrUpper = np.array([200, 200, 200]) # 抽出する色の上限(BGR)\n img_mask = cv2.inRange(image, bgrLower, bgrUpper) # BGRからマスクを作成\n result = cv2.bitwise_and(image, image, mask=img_mask) # 元画像とマスクを合成\n height = result.shape[0]\n width = result.shape[1]\n\n OpenCVUtil._debug_image_writer(\"g://find_rect.png\", result)\n\n black_pixel = 0\n rect_start_pixel_w = -1\n rect_start_pixel_h = -1\n end = False\n # 左上の位置を特定\n for h in range(0, height - 1):\n for w in range(0, width - 1):\n b = result[h, w][0]\n g = result[h, w][1]\n r = result[h, w][2]\n if b != 0 and g != 0 and r != 0:\n logger.info(\"[\" + str(w) + \",\" + str(h) + \"] \" + str(b) + \", \" + str(g) + \", \" + str(r))\n if rect_start_pixel_w == -1:\n rect_start_pixel_w = w\n rect_start_pixel_h = h\n\n # 背景色が黒以外で連続横幅6割続いた場合は神魔の範囲とみなす\n black_pixel += 1\n if black_pixel > int(width * 0.6):\n end = True\n left = rect_start_pixel_w, rect_start_pixel_h\n break\n else:\n black_pixel -= 1\n\n rect_start_pixel_w = -1\n rect_start_pixel_h = -1\n black_pixel = 0\n if end is True:\n break\n\n # 右下の位置を特定\n rect_start_pixel_w = -1\n rect_start_pixel_h = -1\n end = False\n black_pixel = 0\n for h in range(height - 1, 0, -1):\n for w in range(width - 1, 0, -1):\n b = result[h, w][0]\n g = result[h, w][1]\n r = result[h, w][2]\n if b != 0 and g != 0 and r != 0:\n logger.info(\"[\" + str(w) + \",\" + str(h) + \"] \" + str(b) + \", \" + str(g) + \", \" + str(r))\n if rect_start_pixel_w == -1:\n rect_start_pixel_w = w\n rect_start_pixel_h = h\n # 背景色が黒以外で連続横幅6割続いた場合は神魔の範囲とみなす\n black_pixel += 1\n if black_pixel > int(width * 0.6):\n end = True\n right = rect_start_pixel_w, rect_start_pixel_h\n break\n else:\n black_pixel -= 1\n\n rect_start_pixel_w = -1\n rect_start_pixel_h = -1\n black_pixel = 0\n if end is True:\n break\n\n return left, right\n\n @staticmethod\n def _find_rectangle2(image) -> ():\n image = cv2.GaussianBlur(image, (11, 11), 0)\n\n # BGRでの色抽出\n bgrLower = np.array([110, 110, 110]) # 抽出する色の下限(BGR)\n bgrUpper = np.array([200, 200, 200]) # 抽出する色の上限(BGR)\n img_mask = cv2.inRange(image, bgrLower, bgrUpper) # BGRからマスクを作成\n result = cv2.bitwise_and(image, image, mask=img_mask) # 元画像とマスクを合成\n result = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)\n\n contours, hierarchy = cv2.findContours(result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # 各輪郭に対する処理\n area_max = 0\n rect = None\n for i in range(0, len(contours)):\n # 輪郭の領域を計算\n area = cv2.contourArea(contours[i])\n\n # 一番大きい領域を対象とする\n if area_max < area:\n if len(contours[i]) > 0:\n area_max = area\n rect = contours[i]\n\n # 外接矩形\n x, y, w, h = cv2.boundingRect(rect)\n\n # 外接矩形毎に画像を保存\n # OpenCVUtil._debug_image_writer(\"g://find_rect2.png\", image[y:y + h, x:x + w])\n return (x, y), (x + w, y + h)\n\n @staticmethod\n def _debug_image_writer(path: str, image):\n cv2.imwrite(path, image)\n"
] |
[
[
"numpy.array"
]
] |
franneck94/UdemyAI
|
[
"bb3decc35ec626a09edf0abdbfbe7c36dac6179a",
"bb3decc35ec626a09edf0abdbfbe7c36dac6179a"
] |
[
"Chapter10_DeepQNetworks/FrozenLake/frozenLakeDqn.py",
"Chapter4_OpenAIGym/gymGamesAgent.py"
] |
[
"import numpy as np\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\n\n\nclass DQN(Model):\n def __init__(self, state_shape: int, num_actions: int, learning_rate: float):\n super().__init__()\n self.state_shape = state_shape\n self.num_actions = num_actions\n self.learning_rate = learning_rate\n self.internal_model = self.build_model()\n\n def build_model(self) -> Model:\n input_state = Input(shape=self.state_shape)\n x = Dense(units=12)(input_state)\n x = Activation(\"relu\")(x)\n x = Dense(units=24)(x)\n x = Activation(\"relu\")(x)\n q_value_pred = Dense(self.num_actions)(x)\n model = Model(inputs=input_state, outputs=q_value_pred)\n model.compile(loss=\"mse\", optimizer=Adam(learning_rate=self.learning_rate))\n return model\n\n def call(self, inputs: np.ndarray) -> np.ndarray:\n return self.internal_model(inputs).numpy()\n\n def fit(self, states: np.ndarray, q_values: np.ndarray):\n self.internal_model.fit(x=states, y=q_values, verbose=0)\n\n def update_model(self, other_model: Model):\n self.internal_model.set_weights(other_model.get_weights())\n\n def load_model(self, path: str):\n self.internal_model.load_weights(path)\n\n def save_model(self, path: str):\n self.internal_model.save_weights(path)\n\n\nif __name__ == \"__main__\":\n dqn = DQN(state_shape=4, num_actions=2, learning_rate=0.001)\n dqn.internal_model.summary()\n",
"import gym\nimport numpy as np\n\n\nclass Agent:\n def __init__(self, env):\n self.env = env\n\n def get_action(self):\n action = self.env.action_space.sample()\n return action\n\n def play(self, episodes, render=True):\n rewards = [0.0 for i in range(episodes)]\n\n for episode in range(episodes):\n state = self.env.reset()\n total_reward = 0.0\n\n while True:\n if render:\n self.env.render()\n action = self.get_action()\n state, reward, done, _ = self.env.step(action)\n total_reward += reward\n if done:\n rewards[episode] = total_reward\n break\n\n return rewards\n\n\nif __name__ == \"__main__\":\n games = [\n \"CartPole-v1\",\n \"MountainCar-v0\",\n \"PongNoFrameskip-v4\",\n \"Breakout-v0\",\n ]\n\n for game in games:\n env = gym.make(game)\n agent = Agent(env)\n rewards = agent.play(episodes=10, render=True)\n\n rewards_mean = np.mean(rewards)\n rewards_min = np.min(rewards)\n rewards_max = np.max(rewards)\n\n print(\"Rewards Mean: \", rewards_mean)\n print(\"Rewards Min: \", rewards_min)\n print(\"Rewards Max: \", rewards_max)\n"
] |
[
[
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.Adam"
],
[
"numpy.max",
"numpy.min",
"numpy.mean"
]
] |
nuwang/tools-iuc
|
[
"e6dda21d0488a7792a672db197c7937e8828885b"
] |
[
"tools/variant_analyzer/mut2read.py"
] |
[
"#!/usr/bin/env python\n\n\"\"\"mut2read.py\n\nAuthor -- Gundula Povysil\nContact -- povysil@bioinf.jku.at\n\nTakes a tabular file with mutations and a BAM file as input and prints\nall tags of reads that carry the mutation to a user specified output file.\nCreates fastq file of reads of tags with mutation.\n\n======= ========== ================= ================================\nVersion Date Author Description\n0.2.1 2019-10-27 Gundula Povysil -\n======= ========== ================= ================================\n\nUSAGE: python mut2read.py DCS_Mutations.tabular DCS.bam Aligned_Families.tabular Interesting_Reads.fastq\n tag_count_dict.json\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport sys\n\nimport numpy as np\nimport pysam\n\n\ndef make_argparser():\n parser = argparse.ArgumentParser(description='Takes a tabular file with mutations and a BAM file as input and prints all tags of reads that carry the mutation to a user specified output file and creates a fastq file of reads of tags with mutation.')\n parser.add_argument('--mutFile',\n help='TABULAR file with DCS mutations.')\n parser.add_argument('--bamFile',\n help='BAM file with aligned DCS reads.')\n parser.add_argument('--familiesFile',\n help='TABULAR file with aligned families.')\n parser.add_argument('--outputFastq',\n help='Output FASTQ file of reads with mutations.')\n parser.add_argument('--outputJson',\n help='Output JSON file to store collected data.')\n return parser\n\n\ndef mut2read(argv):\n parser = make_argparser()\n args = parser.parse_args(argv[1:])\n\n file1 = args.mutFile\n file2 = args.bamFile\n file3 = args.familiesFile\n outfile = args.outputFastq\n json_file = args.outputJson\n\n if os.path.isfile(file1) is False:\n sys.exit(\"Error: Could not find '{}'\".format(file1))\n\n if os.path.isfile(file2) is False:\n sys.exit(\"Error: Could not find '{}'\".format(file2))\n\n if os.path.isfile(file3) is False:\n sys.exit(\"Error: Could not find '{}'\".format(file3))\n\n # read mut file\n with open(file1, 'r') as mut:\n mut_array = np.genfromtxt(mut, skip_header=1, delimiter='\\t', comments='#', dtype='string')\n\n # read dcs bam file\n # pysam.index(file2)\n bam = pysam.AlignmentFile(file2, \"rb\")\n\n # get tags\n tag_dict = {}\n cvrg_dict = {}\n\n if len(mut_array) == 13:\n mut_array = mut_array.reshape((1, len(mut_array)))\n\n for m in range(len(mut_array[:, 0])):\n print(str(m + 1) + \" of \" + str(len(mut_array[:, 0])))\n chrom = mut_array[m, 1]\n stop_pos = mut_array[m, 2].astype(int)\n chrom_stop_pos = str(chrom) + \"#\" + str(stop_pos)\n ref = mut_array[m, 9]\n alt = mut_array[m, 10]\n\n dcs_len = []\n\n for pileupcolumn in bam.pileup(chrom.tobytes(), stop_pos - 2, stop_pos, max_depth=100000000):\n\n if pileupcolumn.reference_pos == stop_pos - 1:\n count_alt = 0\n count_ref = 0\n count_indel = 0\n count_n = 0\n count_other = 0\n count_lowq = 0\n print(\"unfiltered reads=\", pileupcolumn.n, \"filtered reads=\", len(pileupcolumn.pileups),\n \"difference= \", len(pileupcolumn.pileups) - pileupcolumn.n)\n for pileupread in pileupcolumn.pileups:\n if not pileupread.is_del and not pileupread.is_refskip:\n # query position is None if is_del or is_refskip is set.\n nuc = pileupread.alignment.query_sequence[pileupread.query_position]\n dcs_len.append(len(pileupread.alignment.query_sequence))\n if nuc == alt:\n count_alt += 1\n tag = pileupread.alignment.query_name\n if tag in tag_dict:\n tag_dict[tag][chrom_stop_pos] = alt\n else:\n tag_dict[tag] = {}\n tag_dict[tag][chrom_stop_pos] = alt\n elif nuc == ref:\n count_ref += 1\n elif nuc == \"N\":\n count_n += 1\n elif nuc == \"lowQ\":\n count_lowq += 1\n else:\n count_other += 1\n else:\n count_indel += 1\n dcs_median = np.median(np.array(dcs_len))\n cvrg_dict[chrom_stop_pos] = (count_ref, count_alt, dcs_median)\n\n print(\"coverage at pos %s = %s, ref = %s, alt = %s, other bases = %s, N = %s, indel = %s, low quality = %s, median length of DCS = %s\\n\" %\n (pileupcolumn.pos, count_ref + count_alt, count_ref, count_alt, count_other, count_n,\n count_indel, count_lowq, dcs_median))\n bam.close()\n\n with open(json_file, \"w\") as f:\n json.dump((tag_dict, cvrg_dict), f)\n\n # create fastq from aligned reads\n with open(outfile, 'w') as out:\n with open(file3, 'r') as families:\n for line in families:\n line = line.rstrip('\\n')\n splits = line.split('\\t')\n tag = splits[0]\n\n if tag in tag_dict:\n str1 = splits[4]\n curr_seq = str1.replace(\"-\", \"\")\n str2 = splits[5]\n curr_qual = str2.replace(\" \", \"\")\n\n out.write(\"@\" + splits[0] + \".\" + splits[1] + \".\" + splits[2] + \"\\n\")\n out.write(curr_seq + \"\\n\")\n out.write(\"+\" + \"\\n\")\n out.write(curr_qual + \"\\n\")\n\n\nif __name__ == '__main__':\n sys.exit(mut2read(sys.argv))\n"
] |
[
[
"numpy.array",
"numpy.genfromtxt"
]
] |
GSNCodes/CSCI-5551-PackBot
|
[
"5c0269c9f499b271b7961839f1c0bffb273572f3"
] |
[
"Aruco/aruco_test.py"
] |
[
"import argparse\nimport imutils\nfrom imutils.video import VideoStream\nimport time\nimport cv2\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# all tags supported by cv2\nARUCO_DICT = {\n\t\"DICT_4X4_50\": cv2.aruco.DICT_4X4_50,\n\t\"DICT_4X4_100\": cv2.aruco.DICT_4X4_100,\n\t\"DICT_4X4_250\": cv2.aruco.DICT_4X4_250,\n\t\"DICT_4X4_1000\": cv2.aruco.DICT_4X4_1000,\n\t\"DICT_5X5_50\": cv2.aruco.DICT_5X5_50,\n\t\"DICT_5X5_100\": cv2.aruco.DICT_5X5_100,\n\t\"DICT_5X5_250\": cv2.aruco.DICT_5X5_250,\n\t\"DICT_5X5_1000\": cv2.aruco.DICT_5X5_1000,\n\t\"DICT_6X6_50\": cv2.aruco.DICT_6X6_50,\n\t\"DICT_6X6_100\": cv2.aruco.DICT_6X6_100,\n\t\"DICT_6X6_250\": cv2.aruco.DICT_6X6_250,\n\t\"DICT_6X6_1000\": cv2.aruco.DICT_6X6_1000,\n\t\"DICT_7X7_50\": cv2.aruco.DICT_7X7_50,\n\t\"DICT_7X7_100\": cv2.aruco.DICT_7X7_100,\n\t\"DICT_7X7_250\": cv2.aruco.DICT_7X7_250,\n\t\"DICT_7X7_1000\": cv2.aruco.DICT_7X7_1000,\n\t\"DICT_ARUCO_ORIGINAL\": cv2.aruco.DICT_ARUCO_ORIGINAL,\n\t\"DICT_APRILTAG_16h5\": cv2.aruco.DICT_APRILTAG_16h5,\n\t\"DICT_APRILTAG_25h9\": cv2.aruco.DICT_APRILTAG_25h9,\n\t\"DICT_APRILTAG_36h10\": cv2.aruco.DICT_APRILTAG_36h10,\n\t\"DICT_APRILTAG_36h11\": cv2.aruco.DICT_APRILTAG_36h11\n}\n\narucoDict = cv2.aruco.Dictionary_get(ARUCO_DICT[\"DICT_6X6_250\"])\narucoParams = cv2.aruco.DetectorParameters_create()\nboard = cv2.aruco.CharucoBoard_create(6, 4, 1, 0.8, arucoDict)\n\n\n# imboard = board.draw((4000, 4000))\n# fig = plt.figure()\n# ax = fig.add_subplot(1,1,1)\n# plt.imshow(imboard, cmap = \"gray\", interpolation = \"nearest\")\n# ax.axis(\"off\")\n# #cv2.imwrite(imagesFolder + \"/chessboard.tiff\",imboard)\n# plt.savefig(\"./chessboard.pdf\")\n# plt.grid()\n# plt.show()\n\n# fig = plt.figure()\n# nx = 6\n# ny = 4\n# for i in range(1, nx*ny+1):\n# ax = fig.add_subplot(ny,nx, i)\n# img = cv2.aruco.drawMarker(arucoDict,i, 700)\n# plt.imshow(img, cmap = \"gray\", interpolation = \"nearest\")\n# ax.axis(\"off\")\n\n# # plt.savefig(\"newboard.pdf\")\n# plt.show()\n\ndef read_chessboards(images):\n \"\"\"\n Charuco base pose estimation.\n \"\"\"\n print(\"POSE ESTIMATION STARTS:\")\n allCorners = []\n allIds = []\n decimator = 0\n\n for im in images:\n print(\"=> Processing image {0}\".format(im))\n frame = cv2.imread(im, 0)\n #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n\n res = cv2.aruco.detectMarkers(frame, arucoDict)\n\n if len(res[0])>0:\n res2 = cv2.aruco.interpolateCornersCharuco(res[0],res[1],frame,board)\n if res2[1] is not None and res2[2] is not None and len(res2[1])>3 and decimator%1==0:\n allCorners.append(res2[1])\n allIds.append(res2[2])\n\n decimator+=1\n\n imsize = frame.shape\n return allCorners,allIds,imsize\n\ndef calibrate_camera(allCorners,allIds,imsize):\n \"\"\"\n Calibrates the camera using the dected corners.\n \"\"\"\n print(\"CAMERA CALIBRATION\")\n\n cameraMatrixInit = np.array([[ 2000., 0., imsize[0]/2.],\n [ 0., 2000., imsize[1]/2.],\n [ 0., 0., 1.]])\n\n distCoeffsInit = np.zeros((5,1))\n flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL)\n (ret, camera_matrix, distortion_coefficients0,\n rotation_vectors, translation_vectors,\n stdDeviationsIntrinsics, stdDeviationsExtrinsics,\n perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended(\n charucoCorners=allCorners,\n charucoIds=allIds,\n board=board,\n imageSize=imsize,\n cameraMatrix=cameraMatrixInit,\n distCoeffs=distCoeffsInit,\n flags=flags,\n criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9))\n\n return ret, camera_matrix, distortion_coefficients0, rotation_vectors, translation_vectors\n\ndef detectFromStream(size_of_marker, mtx, dist):\n vs = VideoStream(src=0).start()\n time.sleep(2.0)\n\n # loop over the frames from the video stream\n while True:\n # grab the frame from the threaded video stream and resize it\n # to have a maximum width of 1000 pixels\n frame = vs.read()\n #frame = imutils.resize(frame, width=1000)\n # plt.imshow(frame)\n # plt.show()\n # continue\n # detect ArUco markers in the input frame\n (corners, ids, rejected) = cv2.aruco.detectMarkers(frame, arucoDict, parameters=arucoParams)\n rvecs,tvecs,_ = cv2.aruco.estimatePoseSingleMarkers(corners, size_of_marker , mtx, dist)\n print(\"HERE\")\n if tvecs is not None:\n msg = []\n for t in tvecs:\n vec = []\n for x in t:\n for val in x:\n vec.append(val)\n msg.append(vec)\n print(msg)\n # verify *at least* one ArUco marker was detected\n if len(corners) > 0:\n # flatten the ArUco IDs list\n ids = ids.flatten()\n # loop over the detected ArUCo corners\n i = 0\n for (markerCorner, markerID) in zip(corners, ids):\n # extract the marker corners (which are always returned\n # in top-left, top-right, bottom-right, and bottom-left\n # order)\n corners = markerCorner.reshape((4, 2))\n (topLeft, topRight, bottomRight, bottomLeft) = corners\n # convert each of the (x, y)-coordinate pairs to integers\n topRight = (int(topRight[0]), int(topRight[1]))\n bottomRight = (int(bottomRight[0]), int(bottomRight[1]))\n bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))\n topLeft = (int(topLeft[0]), int(topLeft[1]))\n # draw the bounding box of the ArUCo detection\n cv2.line(frame, topLeft, topRight, (0, 255, 0), 2)\n cv2.line(frame, topRight, bottomRight, (0, 255, 0), 2)\n cv2.line(frame, bottomRight, bottomLeft, (0, 255, 0), 2)\n cv2.line(frame, bottomLeft, topLeft, (0, 255, 0), 2)\n # compute and draw the center (x, y)-coordinates of the\n # ArUco marker\n cX = int((topLeft[0] + bottomRight[0]) / 2.0)\n cY = int((topLeft[1] + bottomRight[1]) / 2.0)\n cv2.circle(frame, (cX, cY), 4, (0, 0, 255), -1)\n # draw the ArUco marker ID on the frame\n cv2.putText(frame, str(markerID),\n (topLeft[0], topLeft[1] - 15),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 255, 0), 2)\n cv2.putText(frame, str(int(np.linalg.norm(tvecs[i]))) + \"cm\",\n (topLeft[0], topLeft[1] - 30),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 2)\n \n i += 1\n\n # if tvecs is not None:\n # for i,v in enumerate(tvecs):\n # print(ids[i], \": \", np.linalg.norm(v))\n\n # plt.imshow(frame)\n # plt.show()\n # continue\n \n # show the output frame\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n time.sleep(.1)\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n # do a bit of cleanup\n cv2.destroyAllWindows()\n vs.stop()\n\nif __name__ == \"__main__\":\n #detectFromStream()\n imagesFolder = \"./images/\"\n images = [imagesFolder + f for f in os.listdir(imagesFolder) if f.startswith(\"img_\")]\n\n allCorners, allIds, imsize = read_chessboards(images)\n\n ret, mtx, dist, rvecs, tvecs = calibrate_camera(allCorners,allIds,imsize)\n\n size_of_marker = 3.9\n detectFromStream(size_of_marker, mtx, dist)\n"
] |
[
[
"numpy.array",
"numpy.linalg.norm",
"numpy.zeros"
]
] |
vlasenkoalexey/tensorflow_serving_benchmark
|
[
"c8b9c26ab6026cb91bf4a5183e0f4bd182b1888f"
] |
[
"clients/triton_rest.py"
] |
[
"\"\"\"Client for Triton Inference Server using REST API.\n\nReferences:\n-\nhttps://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest\n-\nhttps://github.com/triton-inference-server/client/tree/master/src/python/examples\n-\nhttps://github.com/triton-inference-server/client/blob/master/src/python/library/tritonclient/http/__init__.py\n\"\"\"\n\nimport json\nimport time\nimport threading\nimport distribution\nimport clients.base_rest_client\nimport clients.utils\nimport tensorflow.compat.v1 as tf\nimport requests as r\nimport numpy as np\nimport tritonclient.http as triton_httpclient\nimport tritonclient.utils as triton_utils\nfrom tensorflow.python.framework import dtypes\n\n\nclass TritonRest(clients.base_rest_client.BaseRestClient):\n\n def generate_rest_request_from_dictionary(self, row_dict):\n triton_request_inputs = []\n for key, value in row_dict.items():\n t = clients.utils.get_type(value, self._default_float_type,\n self._default_int_type)\n if t == np.object_:\n value = clients.utils.map_multi_dimensional_list(\n value, lambda s: s.encode(\"utf-8\"))\n numpy_value = np.array(value, dtype=t)\n triton_request_input = triton_httpclient.InferInput(\n key, list(numpy_value.shape), triton_utils.np_to_triton_dtype(t))\n triton_request_input.set_data_from_numpy(\n numpy_value, binary_data=True) # binary_data=True by default\n triton_request_inputs.append(triton_request_input)\n # https://github.com/triton-inference-server/client/blob/530bcac5f1574aa2222930076200544eb274245c/src/python/library/tritonclient/http/__init__.py#L81\n # Returns tuple - request and request len to pass in Infer-Header-Content-Length header\n (request, json_size) = triton_httpclient._get_inference_request(\n inputs=triton_request_inputs,\n request_id=\"\",\n outputs=None,\n sequence_id=0,\n sequence_start=0,\n sequence_end=0,\n priority=0,\n timeout=None)\n\n headers = {}\n if json_size:\n headers[\"Inference-Header-Content-Length\"] = str(json_size)\n return (request, headers)\n\n def get_requests_from_dictionary(self, path):\n rows = []\n with tf.gfile.GFile(path, \"r\") as f:\n for line in f:\n row_dict = eval(line)\n rows.append(self.generate_rest_request_from_dictionary(row_dict))\n return rows\n\n def get_requests_from_tfrecord(self, path, count, batch_size):\n raise NotImplementedError()\n\n def get_requests_from_file(self, path):\n raise NotImplementedError()\n\n def get_uri(self):\n if self._host.startswith(\"http\"):\n return self._host\n else:\n # https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest\n if self._model_version:\n return f\"http://{self._host}:{self._port}/v2/models/{self._model_name}/versions/{self._model_version}/infer\"\n else:\n return f\"http://{self._host}:{self._port}/v2/models/{self._model_name}/infer\"\n"
] |
[
[
"numpy.array",
"tensorflow.compat.v1.gfile.GFile"
]
] |
jbarrow/variational-item-response-theory-public
|
[
"1e40eb2685908c48a7111ad64971024fe4eb0110"
] |
[
"src/torch_core/infer.py"
] |
[
"\"\"\"Create infer_dict for VIBO_ models.\"\"\"\n\nimport os\nfrom tqdm import tqdm\n\nimport torch\nfrom src.torch_core.models import (\n VIBO_1PL, \n VIBO_2PL, \n VIBO_3PL,\n)\nfrom src.datasets import load_dataset\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('checkpoint', type=str)\n args = parser.parse_args()\n\n checkpoint_path = args.checkpoint\n checkpoint = torch.load(checkpoint_path)\n state_dict = checkpoint['model_state_dict']\n args = checkpoint['args']\n\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n if args.cuda: torch.cuda.set_device(args.gpu_device)\n\n if args.irt_model == '1pl':\n model_class = VIBO_1PL\n elif args.irt_model == '2pl':\n model_class = VIBO_2PL\n elif args.irt_model == '3pl':\n model_class = VIBO_3PL\n else:\n raise Exception(f'model {args.irt_model} not recognized')\n\n train_dataset = load_dataset(\n args.dataset,\n train = True,\n num_person = args.num_person,\n num_item = args.num_item,\n ability_dim = args.ability_dim,\n max_num_person = args.max_num_person,\n max_num_item = args.max_num_item,\n )\n\n num_person = train_dataset.num_person\n num_item = train_dataset.num_item\n \n model = model_class(\n args.ability_dim,\n num_item,\n hidden_dim = args.hidden_dim,\n ability_merge = args.ability_merge,\n conditional_posterior = args.conditional_posterior,\n generative_model = args.generative_model,\n ).to(device)\n\n def get_infer_dict(loader):\n model.eval()\n infer_dict = {}\n\n with torch.no_grad(): \n ability_mus, item_feat_mus = [], []\n ability_logvars, item_feat_logvars = [], []\n\n pbar = tqdm(total=len(loader))\n for _, response, _, mask in loader:\n mb = response.size(0)\n response = response.to(device)\n mask = mask.long().to(device)\n\n _, ability_mu, ability_logvar, _, item_feat_mu, item_feat_logvar = \\\n model.encode(response, mask)\n\n ability_mus.append(ability_mu.cpu())\n ability_logvars.append(ability_logvar.cpu())\n\n item_feat_mus.append(item_feat_mu.cpu())\n item_feat_logvars.append(item_feat_logvar.cpu())\n\n pbar.update()\n\n ability_mus = torch.cat(ability_mus, dim=0)\n ability_logvars = torch.cat(ability_logvars, dim=0)\n pbar.close()\n\n infer_dict['ability_mu'] = ability_mus\n infer_dict['ability_logvar'] = ability_logvars\n infer_dict['item_feat_mu'] = item_feat_mu\n infer_dict['item_feat_logvar'] = item_feat_logvar\n\n return infer_dict\n\n model.load_state_dict(state_dict)\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size = args.batch_size, \n shuffle = False,\n )\n\n infer_dict = get_infer_dict(train_loader)\n checkpoint['infer_dict'] = infer_dict\n\n torch.save(checkpoint, checkpoint_path)\n"
] |
[
[
"torch.device",
"torch.cat",
"torch.save",
"torch.no_grad",
"torch.cuda.set_device",
"torch.utils.data.DataLoader",
"torch.load"
]
] |
dotslash/Ganymede
|
[
"e096258b95d42e9d622c06de0b30adc9c3fae2c0"
] |
[
"kg:jigsaw-unintended-bias-in-toxicity-classification/simple_model.py"
] |
[
"import datetime\nfrom typing import List, Tuple, Dict, Callable\n\nimport numpy as np\nimport pandas\nimport tensorflow as tf\nfrom keras.callbacks import LearningRateScheduler\nfrom keras.initializers import Constant\nfrom keras.layers import Bidirectional, GlobalMaxPooling1D\nfrom keras.layers import Embedding\nfrom keras.layers import GlobalAveragePooling1D, CuDNNLSTM\nfrom keras.layers import Input, Dense, SpatialDropout1D\nfrom keras.layers import add, concatenate\nfrom keras.models import Model\nfrom keras.preprocessing import text, sequence\n\n# Flags.\nLSTM_UNITS: int = 128\nDENSE_HIDDEN_UNITS: int = 4 * LSTM_UNITS\nMAX_SEQUENCE_LENGTH: int = 200\nTESTING_MODE: bool = False\nENABLE_TEXT_PROCESSING: bool = True\nBATCH_SIZE: int = 1024\nNUM_EPOCHS: int = 4\nTOKENIZER_NUM_WORDS: int = 50000\nNUM_MODELS: int = 2\n\n# Facts.\nCATEGORY_COLS: List[str] = ['severe_toxicity', 'obscene', 'identity_attack',\n 'insult', 'threat']\nIDENTITY_COLS: List[str] = [\n 'male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish',\n 'muslim', 'black', 'white', 'psychiatric_or_mental_illness']\n\n# TODO(dotslash): Make the file paths work both in kaggle and locally.\nTRAIN_FILE: str = \\\n '../input/jigsaw-unintended-bias-in-toxicity-classification/train.csv'\nTEST_FILE: str = \\\n '../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv'\nEMBEDDING_FILES: List[str] = [\n '../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec',\n '../input/glove840b300dtxt/glove.840B.300d.txt']\n# Facts in the form of Data.\n# TEXT PROCESSING STUFF (NOT SURE IF THIS HELPS)\nCONTRACTION_MAPPING: Dict[str, str] = {\n \"'cause\": \"because\",\n \"ain't\": \"is not\",\n \"aren't\": \"are not\",\n \"can't\": \"cannot\",\n \"could've\": \"could have\",\n \"couldn't\": \"could not\",\n \"didn't\": \"did not\",\n \"doesn't\": \"does not\",\n \"don't\": \"do not\",\n \"hadn't\": \"had not\",\n \"hasn't\": \"has not\",\n \"haven't\": \"have not\",\n \"he'd\": \"he would\",\n \"he'll\": \"he will\",\n \"he's\": \"he is\",\n \"here's\": \"here is\",\n \"how'd\": \"how did\",\n \"how'd'y\": \"how do you\",\n \"how'll\": \"how will\",\n \"how's\": \"how is\",\n \"i'd\": \"i would\",\n \"i'd've\": \"i would have\",\n \"i'll\": \"i will\",\n \"i'll've\": \"i will have\",\n \"i'm\": \"i am\",\n \"i've\": \"i have\",\n \"isn't\": \"is not\",\n \"it'd\": \"it would\",\n \"it'd've\": \"it would have\",\n \"it'll\": \"it will\",\n \"it'll've\": \"it will have\",\n \"it's\": \"it is\",\n \"let's\": \"let us\",\n \"ma'am\": \"madam\",\n \"mayn't\": \"may not\",\n \"might've\": \"might have\",\n \"mightn't\": \"might not\",\n \"mightn't've\": \"might not have\",\n \"must've\": \"must have\",\n \"mustn't\": \"must not\",\n \"mustn't've\": \"must not have\",\n \"needn't\": \"need not\",\n \"needn't've\": \"need not have\",\n \"o'clock\": \"of the clock\",\n \"oughtn't\": \"ought not\",\n \"oughtn't've\": \"ought not have\",\n \"sha'n't\": \"shall not\",\n \"shan't\": \"shall not\",\n \"shan't've\": \"shall not have\",\n \"she'd\": \"she would\",\n \"she'd've\": \"she would have\",\n \"she'll\": \"she will\",\n \"she'll've\": \"she will have\",\n \"she's\": \"she is\",\n \"should've\": \"should have\",\n \"shouldn't\": \"should not\",\n \"shouldn't've\": \"should not have\",\n \"so's\": \"so as\",\n \"so've\": \"so have\",\n \"that'd\": \"that would\",\n \"that'd've\": \"that would have\",\n \"that's\": \"that is\",\n \"there'd\": \"there would\",\n \"there'd've\": \"there would have\",\n \"there's\": \"there is\",\n \"they'd\": \"they would\",\n \"they'd've\": \"they would have\",\n \"they'll\": \"they will\",\n \"they'll've\": \"they will have\",\n \"they're\": \"they are\",\n \"they've\": \"they have\",\n \"this's\": \"this is\",\n \"to've\": \"to have\",\n \"wasn't\": \"was not\",\n \"we'd\": \"we would\",\n \"we'd've\": \"we would have\",\n \"we'll\": \"we will\",\n \"we'll've\": \"we will have\",\n \"we're\": \"we are\",\n \"we've\": \"we have\",\n \"weren't\": \"were not\",\n \"what'll\": \"what will\",\n \"what'll've\": \"what will have\",\n \"what're\": \"what are\",\n \"what's\": \"what is\",\n \"what've\": \"what have\",\n \"when's\": \"when is\",\n \"when've\": \"when have\",\n \"where'd\": \"where did\",\n \"where's\": \"where is\",\n \"where've\": \"where have\",\n \"who'll\": \"who will\",\n \"who'll've\": \"who will have\",\n \"who's\": \"who is\",\n \"who've\": \"who have\",\n \"why's\": \"why is\",\n \"why've\": \"why have\",\n \"will've\": \"will have\",\n \"won't\": \"will not\",\n \"won't've\": \"will not have\",\n \"would've\": \"would have\",\n \"wouldn't\": \"would not\",\n \"wouldn't've\": \"would not have\",\n \"y'all\": \"you all\",\n \"y'all'd\": \"you all would\",\n \"y'all'd've\": \"you all would have\",\n \"y'all're\": \"you all are\",\n \"y'all've\": \"you all have\",\n \"you'd\": \"you would\",\n \"you'd've\": \"you would have\",\n \"you'll\": \"you will\",\n \"you'll've\": \"you will have\",\n \"you're\": \"you are\",\n \"you've\": \"you have\",\n}\nfor k, v in list(CONTRACTION_MAPPING.items()):\n CONTRACTION_MAPPING[k.capitalize()] = v.capitalize()\nQUOTES = ['’', '‘', '´', '`']\n__SYMBOLS_TO_ISOLATE = '.,?!-;*\"…:—()%#$&_/@\・ω+=”“[]^–>\\\\°<~•≠™ˈʊɒ∞§{}·τα❤☺ɡ|¢→̶`❥━┣┫┗O►★©―ɪ✔®\\x96\\x92●£♥➤´¹☕≈÷♡◐║▬′ɔː€۩۞†μ✒➥═☆ˌ◄½ʻπδηλσερνʃ✬SUPERIT☻±♍µº¾✓◾؟.⬅℅»Вав❣⋅¿¬♫CMβ█▓▒░⇒⭐›¡₂₃❧▰▔◞▀▂▃▄▅▆▇↙γ̄″☹➡«φ⅓„✋:¥̲̅́∙‛◇✏▷❓❗¶˚˙)сиʿ✨。ɑ\\x80◕!%¯−flfi₁²ʌ¼⁴⁄₄⌠♭✘╪▶☭✭♪☔☠♂☃☎✈✌✰❆☙○‣⚓年∎ℒ▪▙☏⅛casǀ℮¸w‚∼‖ℳ❄←☼⋆ʒ⊂、⅔¨͡๏⚾⚽Φ×θ₩?(℃⏩☮⚠月✊❌⭕▸■⇌☐☑⚡☄ǫ╭∩╮,例>ʕɐ̣Δ₀✞┈╱╲▏▕┃╰▊▋╯┳┊≥☒↑☝ɹ✅☛♩☞AJB◔◡↓♀⬆̱ℏ\\x91⠀ˤ╚↺⇤∏✾◦♬³の|/∵∴√Ω¤☜▲↳▫‿⬇✧ovm-208'‰≤∕ˆ⚜☁'\n__SYMBOLS_TO_REMOVE = '\\n🍕\\r🐵\\xa0\\ue014\\t\\uf818\\uf04a\\xad😢🐶️\\uf0e0😜😎👊\\u200b\\u200e😁عدويهصقأناخلىبمغر😍💖💵Е👎😀😂\\u202a\\u202c🔥😄🏻💥ᴍʏʀᴇɴᴅᴏᴀᴋʜᴜʟᴛᴄᴘʙғᴊᴡɢ😋👏שלוםבי😱‼\\x81エンジ故障\\u2009🚌ᴵ͞🌟😊😳😧🙀😐😕\\u200f👍😮😃😘אעכח💩💯⛽🚄🏼ஜ😖ᴠ🚲‐😟😈💪🙏🎯🌹😇💔😡\\x7f👌ἐὶήιὲκἀίῃἴξ🙄H😠\\ufeff\\u2028😉😤⛺🙂\\u3000تحكسة👮💙فزط😏🍾🎉😞\\u2008🏾😅😭👻😥😔😓🏽🎆🍻🍽🎶🌺🤔😪\\x08‑🐰🐇🐱🙆😨🙃💕𝘊𝘦𝘳𝘢𝘵𝘰𝘤𝘺𝘴𝘪𝘧𝘮𝘣💗💚地獄谷улкнПоАН🐾🐕😆ה🔗🚽歌舞伎🙈😴🏿🤗🇺🇸мυтѕ⤵🏆🎃😩\\u200a🌠🐟💫💰💎эпрд\\x95🖐🙅⛲🍰🤐👆🙌\\u2002💛🙁👀🙊🙉\\u2004ˢᵒʳʸᴼᴷᴺʷᵗʰᵉᵘ\\x13🚬🤓\\ue602😵άοόςέὸתמדףנרךצט😒͝🆕👅👥👄🔄🔤👉👤👶👲🔛🎓\\uf0b7\\uf04c\\x9f\\x10成都😣⏺😌🤑🌏😯ех😲Ἰᾶὁ💞🚓🔔📚🏀👐\\u202d💤🍇\\ue613小土豆🏡❔⁉\\u202f👠》कर्मा🇹🇼🌸蔡英文🌞🎲レクサス😛外国人关系Сб💋💀🎄💜🤢َِьыгя不是\\x9c\\x9d🗑\\u2005💃📣👿༼つ༽😰ḷЗз▱ц🤣卖温哥华议会下降你失去所有的钱加拿大坏税骗子🐝ツ🎅\\x85🍺آإشء🎵🌎͟ἔ油别克🤡🤥😬🤧й\\u2003🚀🤴ʲшчИОРФДЯМюж😝🖑ὐύύ特殊作戦群щ💨圆明园קℐ🏈😺🌍⏏ệ🍔🐮🍁🍆🍑🌮🌯🤦\\u200d𝓒𝓲𝓿𝓵안영하세요ЖљКћ🍀😫🤤ῦ我出生在了可以说普通话汉语好极🎼🕺🍸🥂🗽🎇🎊🆘🤠👩🖒🚪天一家⚲\\u2006⚭⚆⬭⬯⏖新✀╌🇫🇷🇩🇪🇮🇬🇧😷🇨🇦ХШ🌐\\x1f杀鸡给猴看ʁ𝗪𝗵𝗲𝗻𝘆𝗼𝘂𝗿𝗮𝗹𝗶𝘇𝗯𝘁𝗰𝘀𝘅𝗽𝘄𝗱📺ϖ\\u2000үսᴦᎥһͺ\\u2007հ\\u2001ɩye൦lƽh𝐓𝐡𝐞𝐫𝐮𝐝𝐚𝐃𝐜𝐩𝐭𝐢𝐨𝐧Ƅᴨןᑯ໐ΤᏧ௦Іᴑ܁𝐬𝐰𝐲𝐛𝐦𝐯𝐑𝐙𝐣𝐇𝐂𝐘𝟎ԜТᗞ౦〔Ꭻ𝐳𝐔𝐱𝟔𝟓𝐅🐋ffi💘💓ё𝘥𝘯𝘶💐🌋🌄🌅𝙬𝙖𝙨𝙤𝙣𝙡𝙮𝙘𝙠𝙚𝙙𝙜𝙧𝙥𝙩𝙪𝙗𝙞𝙝𝙛👺🐷ℋ𝐀𝐥𝐪🚶𝙢Ἱ🤘ͦ💸ج패티W𝙇ᵻ👂👃ɜ🎫\\uf0a7БУі🚢🚂ગુજરાતીῆ🏃𝓬𝓻𝓴𝓮𝓽𝓼☘﴾̯﴿₽\\ue807𝑻𝒆𝒍𝒕𝒉𝒓𝒖𝒂𝒏𝒅𝒔𝒎𝒗𝒊👽😙\\u200cЛ‒🎾👹⎌🏒⛸公寓养宠物吗🏄🐀🚑🤷操美𝒑𝒚𝒐𝑴🤙🐒欢迎来到阿拉斯ספ𝙫🐈𝒌𝙊𝙭𝙆𝙋𝙍𝘼𝙅ﷻ🦄巨收赢得白鬼愤怒要买额ẽ🚗🐳𝟏𝐟𝟖𝟑𝟕𝒄𝟗𝐠𝙄𝙃👇锟斤拷𝗢𝟳𝟱𝟬⦁マルハニチロ株式社⛷한국어ㄸㅓ니͜ʖ𝘿𝙔₵𝒩ℯ𝒾𝓁𝒶𝓉𝓇𝓊𝓃𝓈𝓅ℴ𝒻𝒽𝓀𝓌𝒸𝓎𝙏ζ𝙟𝘃𝗺𝟮𝟭𝟯𝟲👋🦊多伦🐽🎻🎹⛓🏹🍷🦆为和中友谊祝贺与其想象对法如直接问用自己猜本传教士没积唯认识基督徒曾经让相信耶稣复活死怪他但当们聊些政治题时候战胜因圣把全堂结婚孩恐惧且栗谓这样还♾🎸🤕🤒⛑🎁批判检讨🏝🦁🙋😶쥐스탱트뤼도석유가격인상이경제황을렵게만들지않록잘관리해야합다캐나에서대마초와화약금의품런성분갈때는반드시허된사용🔫👁凸ὰ💲🗯𝙈Ἄ𝒇𝒈𝒘𝒃𝑬𝑶𝕾𝖙𝖗𝖆𝖎𝖌𝖍𝖕𝖊𝖔𝖑𝖉𝖓𝖐𝖜𝖞𝖚𝖇𝕿𝖘𝖄𝖛𝖒𝖋𝖂𝕴𝖟𝖈𝕸👑🚿💡知彼百\\uf005𝙀𝒛𝑲𝑳𝑾𝒋𝟒😦𝙒𝘾𝘽🏐𝘩𝘨ὼṑ𝑱𝑹𝑫𝑵𝑪🇰🇵👾ᓇᒧᔭᐃᐧᐦᑳᐨᓃᓂᑲᐸᑭᑎᓀᐣ🐄🎈🔨🐎🤞🐸💟🎰🌝🛳点击查版🍭𝑥𝑦𝑧NG👣\\uf020っ🏉ф💭🎥Ξ🐴👨🤳🦍\\x0b🍩𝑯𝒒😗𝟐🏂👳🍗🕉🐲چی𝑮𝗕𝗴🍒ꜥⲣⲏ🐑⏰鉄リ事件ї💊「」\\uf203\\uf09a\\uf222\\ue608\\uf202\\uf099\\uf469\\ue607\\uf410\\ue600燻製シ虚偽屁理屈Г𝑩𝑰𝒀𝑺🌤𝗳𝗜𝗙𝗦𝗧🍊ὺἈἡχῖΛ⤏🇳𝒙ψՁմեռայինրւդձ冬至ὀ𝒁🔹🤚🍎𝑷🐂💅𝘬𝘱𝘸𝘷𝘐𝘭𝘓𝘖𝘹𝘲𝘫کΒώ💢ΜΟΝΑΕ🇱♲𝝈↴💒⊘Ȼ🚴🖕🖤🥘📍👈➕🚫🎨🌑🐻𝐎𝐍𝐊𝑭🤖🎎😼🕷grntidufbk𝟰🇴🇭🇻🇲𝗞𝗭𝗘𝗤👼📉🍟🍦🌈🔭《🐊🐍\\uf10aლڡ🐦\\U0001f92f\\U0001f92a🐡💳ἱ🙇𝗸𝗟𝗠𝗷🥜さようなら🔼'\nISOLATE_DICT = {ord(c): f' {c} ' for c in __SYMBOLS_TO_ISOLATE}\nREMOVE_DICT = {ord(c): ' ' for c in __SYMBOLS_TO_REMOVE}\n\n\ndef pretty_time_delta(delta: datetime.timedelta):\n seconds = delta.total_seconds()\n sign_string = '-' if seconds < 0 else ''\n seconds = abs(int(seconds))\n days, seconds = divmod(seconds, 86400)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n if days > 0:\n return '%s%dd%dh%dm%ds' % (sign_string, days, hours, minutes, seconds)\n else:\n return '%s%dh%dm%ds' % (sign_string, hours, minutes, seconds)\n\n\nclass Logger:\n def __init__(self):\n self.start = datetime.datetime.now()\n\n def log(self, message: str) -> None:\n now = datetime.datetime.now()\n time_taken = pretty_time_delta(now - self.start)\n print('{} delta-{}: {}'.format(now, time_taken, message))\n\n\nlogger = Logger()\nlogger.log('Started.')\n\n\nclass TextCleaner:\n def __init__(self):\n self.specials: List[str] = QUOTES\n self.contractions_dict: Dict[str, str] = CONTRACTION_MAPPING\n self.clean_chars_translation_dict: Dict[int, str] = ISOLATE_DICT\n for k, v in REMOVE_DICT.items():\n self.clean_chars_translation_dict[k] = v\n\n def contractions(self, inp: str) -> str:\n for s in self.specials:\n inp = inp.replace(s, \"'\")\n return ' '.join(\n [self.contractions_dict.get(word, word) for word in inp.split(' ')])\n\n def contractions_lambda(self) -> Callable[[str], str]:\n return lambda x: self.contractions(x)\n\n def clean_chars(self, inp: str) -> str:\n return inp.translate(self.clean_chars_translation_dict)\n\n def clean_chars_lambda(self) -> Callable[[str], str]:\n return lambda x: self.clean_chars(x)\n\n\nclass EmbeddingStore:\n def __init__(self, embedding_file: str):\n f = open(embedding_file)\n logger.log('Loading embedding file:{}'.format(embedding_file))\n self.dict = dict()\n for line in f:\n if TESTING_MODE and len(self.dict) > 100000:\n # 100k words are enough if we are in test mode\n break\n values = line.strip().split(' ')\n word = values[0]\n try:\n coeffs = np.asarray(values[1:], dtype='float32')\n self.vector_length = len(coeffs)\n self.dict[word] = coeffs\n except Exception:\n logger.log('Failed parsing embedding for \"{}\"'.format(word))\n f.close()\n logger.log('Loaded embedding file: {}'.format(embedding_file))\n logger.log('Found %s word vectors.' % len(self.dict))\n\n def embedding(self, word: str) -> np.array:\n return self.dict.get(word, np.zeros(self.vector_length))\n\n\ndef get_top_words(tokenizer: text.Tokenizer):\n ret = [(v, k) for k, v in tokenizer.index_word.items()]\n return ret[:tokenizer.num_words]\n\n\ndef binary_accuracy(y_true: tf.Tensor, y_pred: tf.Tensor):\n import keras.backend as kb\n return kb.mean(kb.equal(kb.round(y_true), kb.round(y_pred)))\n\n\ndef build_model(embedding_matrix: np.array, num_other_results: int):\n inp = Input(shape=(MAX_SEQUENCE_LENGTH,))\n x = Embedding(embedding_matrix.shape[0], embedding_matrix.shape[1],\n embeddings_initializer=Constant(embedding_matrix),\n input_length=MAX_SEQUENCE_LENGTH,\n trainable=False)(inp)\n x = SpatialDropout1D(0.2)(x)\n x = Bidirectional(CuDNNLSTM(LSTM_UNITS, return_sequences=True))(x)\n x = Bidirectional(CuDNNLSTM(LSTM_UNITS, return_sequences=True))(x)\n x = concatenate([\n GlobalMaxPooling1D()(x),\n GlobalAveragePooling1D()(x)])\n x = add([x, Dense(DENSE_HIDDEN_UNITS, activation='relu')(x)])\n x = add([x, Dense(DENSE_HIDDEN_UNITS, activation='relu')(x)])\n result = Dense(1, activation='sigmoid')(x)\n other_results = Dense(num_other_results, activation='sigmoid')(x)\n model = Model(inputs=inp, outputs=[result, other_results])\n model.compile(loss='binary_crossentropy', optimizer='adam',\n metrics=['acc', binary_accuracy])\n return model\n\n\ndef print_diff(s1: pandas.Series, s2: pandas.Series) -> None:\n diff: pandas.Series = (s1 == s2)\n logger.log('diff')\n print(diff.value_counts())\n\n\n# TODO(dotslash): Create a container type for return value of this\n# function.\ndef load_train_data() -> Tuple[np.array, np.array,\n np.array, np.array,\n pandas.DataFrame, pandas.DataFrame,\n text.Tokenizer]:\n # Load Training and Testing data.\n train_data: pandas.DataFrame = pandas.read_csv(TRAIN_FILE)\n test_data: pandas.DataFrame = pandas.read_csv(TEST_FILE)\n np.random.shuffle(train_data.values)\n logger.log('Loaded train and test data.')\n\n if TESTING_MODE:\n train_data = train_data.head(10000)\n test_data = test_data.head(10000)\n\n if ENABLE_TEXT_PROCESSING:\n # eliminate contractions\n logger.log('Processing text.')\n cleaner = TextCleaner()\n orig: pandas.Series = train_data['comment_text'].copy(deep=True)\n train_data['comment_text'] = \\\n train_data['comment_text'].apply(cleaner.contractions_lambda())\n test_data['comment_text'] = \\\n test_data['comment_text'].apply(cleaner.contractions_lambda())\n print_diff(train_data['comment_text'], orig)\n # remove junk chars\n train_data['comment_text'] = \\\n train_data['comment_text'].apply(cleaner.clean_chars_lambda())\n test_data['comment_text'] = \\\n test_data['comment_text'].apply(cleaner.clean_chars_lambda())\n print_diff(train_data['comment_text'], orig)\n logger.log('Processed text.')\n\n # Trim the train data and keep only the useful columns.\n useful_cols: List[str] = \\\n ['id', 'comment_text', 'target'] + CATEGORY_COLS + IDENTITY_COLS\n train_data: pandas.DataFrame = train_data[useful_cols]\n print('Sample training data\\n' + train_data.head().to_string())\n print('Sample test data\\n' + test_data.head().to_string())\n\n # Create a tokenizer based on train and test data.\n tokenizer: text.Tokenizer = text.Tokenizer(num_words=TOKENIZER_NUM_WORDS)\n tokenizer.fit_on_texts(list(train_data['comment_text']) + \\\n list(test_data['comment_text']))\n logger.log('Fit text tokens.')\n\n # Prepare X, Y for training and testing.\n # We will convert the text to a sequence using the tokenizer.\n train_seq = tokenizer.texts_to_sequences(list(train_data['comment_text']))\n train_seq = sequence.pad_sequences(train_seq, maxlen=MAX_SEQUENCE_LENGTH)\n test_seq = tokenizer.texts_to_sequences(list(test_data['comment_text']))\n test_seq = sequence.pad_sequences(test_seq, maxlen=MAX_SEQUENCE_LENGTH)\n logger.log('Converted tokens to sequences.')\n\n x_train, y_train, y_other_train = \\\n train_seq, train_data['target'], train_data[CATEGORY_COLS]\n x_test = test_seq\n logger.log('Prepared and train, validation and test sets.')\n return x_train, y_train, y_other_train, x_test, train_data, test_data, tokenizer\n\n\ndef construct_embedding_matrix(tokenizer: text.Tokenizer) -> np.array:\n # Load embeddings from disk.\n embeddings = [EmbeddingStore(embedding_file)\n for embedding_file in EMBEDDING_FILES]\n # Construct a embedding matrix used for Embedding layer.\n embedding_dim = sum(embedding.vector_length for embedding in embeddings)\n tokenizer_words = get_top_words(tokenizer)\n embedding_matrix = np.zeros((len(tokenizer_words) + 1, embedding_dim))\n for word, ind in tokenizer_words:\n embedding_matrix[ind] = np.concatenate(\n [embedding.embedding(word) for embedding in embeddings])\n logger.log('Created embedding matrix.')\n return embedding_matrix\n\n\ndef main():\n x_train, y_train, y_other_train, x_test, train_data, \\\n test_data, tokenizer = load_train_data()\n embedding_matrix = construct_embedding_matrix(tokenizer)\n sample_weights: pandas.Series = pandas.Series(\n data=np.ones(len(x_train), dtype=np.float32))\n\n for column in IDENTITY_COLS:\n train_data[column] = np.where(train_data[column] >= 0.5, True, False)\n bool_target: pandas.Series = pandas.Series(\n data=np.where(train_data['target'] > 0.5, True, False))\n inv_bool_target: pandas.Series = ~bool_target\n train_id_columns_sum: pandas.Series = train_data[IDENTITY_COLS].sum(axis=1)\n inv_train_id_columns_sum: pandas.Series = (~train_data[IDENTITY_COLS]).sum(\n axis=1)\n # Focus more on the rows that have identity columns.\n sample_weights += train_id_columns_sum\n # Focus more on the false negatives\n sample_weights += (bool_target * inv_train_id_columns_sum)\n # Focus a lot more on the false positives\n sample_weights += ((inv_bool_target * train_id_columns_sum) * 5)\n sample_weights /= sample_weights.mean()\n\n # Fit the model.\n logger.log('Training model.')\n checkpoint_predictions = []\n weights = []\n for model_idx in range(NUM_MODELS):\n model = build_model(embedding_matrix, y_other_train.shape[-1])\n merge = np.concatenate((x_train, y_train, y_other_train), axis=1)\n np.random.shuffle(merge)\n x_train = merge[:,len(x_train[0])]\n y_train = merge[:len(x_train[0]), len(x_train[0])+1]\n y_other_train = merge[:len(x_train[0])]\n for global_epoch in range(NUM_EPOCHS):\n model.fit(\n x_train, [y_train, y_other_train],\n validation_split=0.1,\n batch_size=BATCH_SIZE,\n epochs=1,\n # One set of sample_weights for each output\n sample_weight=[sample_weights.values,\n sample_weights.values],\n # TODO(dotslash): How does this help?\n callbacks=[LearningRateScheduler(\n lambda _: (0.55 ** global_epoch) / 100.0, verbose=1)])\n logger.log('Trained model: {}.'.format(model_idx))\n weights.append(2 ** global_epoch)\n checkpoint_predictions.append(\n model.predict(x_test, batch_size=2048)[0])\n y_test = np.average(checkpoint_predictions, weights=weights, axis=0)\n logger.log('Predicted test set.')\n submission = pandas.DataFrame.from_dict({\n 'id': test_data.id,\n 'prediction': y_test.flatten()\n })\n submission.to_csv('submission.csv', index=False)\n logger.log('Done.')\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.concatenate",
"numpy.asarray",
"numpy.zeros",
"numpy.random.shuffle",
"numpy.where",
"numpy.average",
"pandas.read_csv"
]
] |
heytitle/schnetpack
|
[
"6facf724e6e220053f4ba8d5b81744744d1abef3"
] |
[
"src/scripts/schnetpack_md17.py"
] |
[
"#!/usr/bin/env python\nimport argparse\nimport logging\nimport os\nfrom shutil import copyfile, rmtree\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom ase.data import atomic_numbers\nfrom torch.optim import Adam\nfrom torch.utils.data.sampler import RandomSampler\n\nimport schnetpack as spk\nfrom schnetpack.datasets import MD17\nfrom schnetpack.utils import compute_params, to_json, read_from_json\n\nlogging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"INFO\"))\n\n\ndef get_parser():\n \"\"\" Setup parser for command line arguments \"\"\"\n main_parser = argparse.ArgumentParser()\n\n ## command-specific\n cmd_parser = argparse.ArgumentParser(add_help=False)\n cmd_parser.add_argument('--cuda', help='Set flag to use GPU(s)', action='store_true')\n cmd_parser.add_argument('--parallel',\n help='Run data-parallel on all available GPUs (specify with environment variable'\n + ' CUDA_VISIBLE_DEVICES)', action='store_true')\n cmd_parser.add_argument('--batch_size', type=int,\n help='Mini-batch size for training and prediction (default: %(default)s)',\n default=100)\n\n ## training\n train_parser = argparse.ArgumentParser(add_help=False, parents=[cmd_parser])\n train_parser.add_argument('datapath', help='Path / destination of MD17 dataset directory')\n train_parser.add_argument('molecule', help='Selected molecule trajectory of MD17 collection',\n choices=MD17.existing_datasets)\n train_parser.add_argument('modelpath', help='Destination for models and logs')\n train_parser.add_argument('--seed', type=int, default=None, help='Set random seed for torch and numpy.')\n train_parser.add_argument('--overwrite', help='Remove previous model directory.', action='store_true')\n\n # data split\n train_parser.add_argument('--split_path', help='Path / destination of npz with data splits',\n default=None)\n train_parser.add_argument('--split', help='Give sizes of train and validation splits and use remaining for testing',\n type=int, nargs=2, default=[None, None])\n train_parser.add_argument('--max_epochs', type=int, help='Maximum number of training epochs (default: %(default)s)',\n default=5000)\n train_parser.add_argument('--lr', type=float, help='Initial learning rate (default: %(default)s)',\n default=1e-4)\n train_parser.add_argument('--lr_patience', type=int,\n help='Epochs without improvement before reducing the learning rate (default: %(default)s)',\n default=25)\n train_parser.add_argument('--lr_decay', type=float, help='Learning rate decay (default: %(default)s)',\n default=0.5)\n train_parser.add_argument('--lr_min', type=float, help='Minimal learning rate (default: %(default)s)',\n default=1e-6)\n train_parser.add_argument('--rho', type=float,\n help='Energy-force trade-off. For rho=0, use forces only. (default: %(default)s)',\n default=0.1)\n\n train_parser.add_argument('--logger', help='Choose logger for training process (default: %(default)s)',\n choices=['csv', 'tensorboard'], default='csv')\n train_parser.add_argument('--log_every_n_epochs', type=int,\n help='Log metrics every given number of epochs (default: %(default)s)',\n default=1)\n\n ## evaluation\n eval_parser = argparse.ArgumentParser(add_help=False, parents=[cmd_parser])\n eval_parser.add_argument('datapath', help='Path / destination of MD17 dataset directory')\n eval_parser.add_argument('molecule', help='Molecule trajectory',\n choices=MD17.existing_datasets)\n eval_parser.add_argument('modelpath', help='Path of stored model')\n eval_parser.add_argument('--split', help='Evaluate on trained model on given split',\n choices=['train', 'validation', 'test'], default=['test'], nargs='+')\n\n # model-specific parsers\n model_parser = argparse.ArgumentParser(add_help=False)\n\n ####### SchNet #######\n schnet_parser = argparse.ArgumentParser(add_help=False, parents=[model_parser])\n schnet_parser.add_argument('--features', type=int, help='Size of atom-wise representation (default: %(default)s)',\n default=256)\n schnet_parser.add_argument('--interactions', type=int, help='Number of interaction blocks (default: %(default)s)',\n default=6)\n schnet_parser.add_argument('--cutoff', type=float, default=5.,\n help='Cutoff radius of local environment (default: %(default)s)')\n schnet_parser.add_argument('--num_gaussians', type=int, default=25,\n help='Number of Gaussians to expand distances (default: %(default)s)')\n\n ####### wACSF ########\n wacsf_parser = argparse.ArgumentParser(add_help=False, parents=[model_parser])\n # wACSF parameters\n wacsf_parser.add_argument('--radial', type=int, default=22,\n help='Number of radial symmetry functions (default: %(default)s)')\n wacsf_parser.add_argument('--angular', type=int, default=5,\n help='Number of angular symmetry functions (default: %(default)s)')\n wacsf_parser.add_argument('--zetas', type=int, nargs='+', default=[1],\n help='List of zeta exponents used for angle resolution (default: %(default)s)')\n wacsf_parser.add_argument('--standardize', action='store_true',\n help='Standardize wACSF before atomistic network.')\n wacsf_parser.add_argument('--cutoff', type=float, default=5.0,\n help='Cutoff radius of local environment (default: %(default)s)')\n # Atomistic network parameters\n wacsf_parser.add_argument('--n_nodes', type=int, default=100,\n help='Number of nodes in atomic networks (default: %(default)s)')\n wacsf_parser.add_argument('--n_layers', type=int, default=2,\n help='Number of layers in atomic networks (default: %(default)s)')\n # Advances wACSF settings\n wacsf_parser.add_argument('--centered', action='store_true', help='Use centered Gaussians for radial functions')\n wacsf_parser.add_argument('--crossterms', action='store_true', help='Use crossterms in angular functions')\n wacsf_parser.add_argument('--behler', action='store_true', help='Switch to conventional ACSF')\n wacsf_parser.add_argument('--elements', default=['H', 'C', 'O'], nargs='+',\n help='List of elements to be used for symmetry functions (default: %(default)s).')\n\n ## setup subparser structure\n cmd_subparsers = main_parser.add_subparsers(dest='mode', help='Command-specific arguments')\n cmd_subparsers.required = True\n subparser_train = cmd_subparsers.add_parser('train', help='Training help')\n subparser_eval = cmd_subparsers.add_parser('eval', help='Eval help')\n\n subparser_export = cmd_subparsers.add_parser('export', help='Export help')\n subparser_export.add_argument('modelpath', help='Path of stored model')\n subparser_export.add_argument('destpath', help='Destination path for exported model')\n\n train_subparsers = subparser_train.add_subparsers(dest='model', help='Model-specific arguments')\n train_subparsers.required = True\n train_subparsers.add_parser('schnet', help='SchNet help', parents=[train_parser, schnet_parser])\n train_subparsers.add_parser('wacsf', help='wACSF help', parents=[train_parser, wacsf_parser])\n\n eval_subparsers = subparser_eval.add_subparsers(dest='model', help='Model-specific arguments')\n eval_subparsers.required = True\n eval_subparsers.add_parser('schnet', help='SchNet help', parents=[eval_parser, schnet_parser])\n eval_subparsers.add_parser('wacsf', help='wACSF help', parents=[eval_parser, wacsf_parser])\n\n return main_parser\n\n\ndef train(args, model, train_loader, val_loader, device):\n # setup hook and logging\n hooks = [\n spk.train.MaxEpochHook(args.max_epochs)\n ]\n\n # setup optimizer for training\n # to_opt = model.parameters()\n # Bugfix, since model will not train with requires grad variables\n to_opt = filter(lambda p: p.requires_grad, model.parameters())\n optimizer = Adam(to_opt, lr=args.lr)\n\n schedule = spk.train.ReduceLROnPlateauHook(optimizer, patience=args.lr_patience, factor=args.lr_decay,\n min_lr=args.lr_min,\n window_length=1, stop_after_min=True)\n hooks.append(schedule)\n\n # index into model output: [energy, forces]\n metrics = [spk.metrics.MeanAbsoluteError(MD17.energies, \"y\"),\n spk.metrics.RootMeanSquaredError(MD17.energies, \"y\"),\n spk.metrics.MeanAbsoluteError(MD17.forces, \"dydx\"),\n spk.metrics.RootMeanSquaredError(MD17.forces, \"dydx\")]\n if args.logger == 'csv':\n logger = spk.train.CSVHook(os.path.join(args.modelpath, 'log'),\n metrics, every_n_epochs=args.log_every_n_epochs)\n hooks.append(logger)\n elif args.logger == 'tensorboard':\n logger = spk.train.TensorboardHook(os.path.join(args.modelpath, 'log'),\n metrics, every_n_epochs=args.log_every_n_epochs)\n hooks.append(logger)\n\n # setup loss function\n def loss(batch, result):\n ediff = batch[MD17.energies] - result[\"y\"]\n ediff = ediff ** 2\n\n fdiff = batch[MD17.forces] - result[\"dydx\"]\n fdiff = fdiff ** 2\n\n err_sq = args.rho * torch.mean(ediff.view(-1)) + (1 - args.rho) * torch.mean(fdiff.view(-1))\n return err_sq\n\n trainer = spk.train.Trainer(args.modelpath, model, loss, optimizer,\n train_loader, val_loader, hooks=hooks)\n trainer.train(device)\n\n\ndef evaluate(args, model, train_loader, val_loader, test_loader, device):\n header = ['Subset', 'Energy MAE', 'Energy RMSE',\n 'Force MAE', 'Force RMSE', 'Force Length MAE', 'Force Length RMSE', 'Force Angle MAE', 'Angle RMSE']\n\n metrics = [\n spk.metrics.MeanAbsoluteError(MD17.energies, \"y\"),\n spk.metrics.RootMeanSquaredError(MD17.energies, \"y\"),\n spk.metrics.MeanAbsoluteError(MD17.forces, \"dydx\"),\n spk.metrics.RootMeanSquaredError(MD17.forces, \"dydx\"),\n spk.metrics.LengthMAE(MD17.forces, \"dydx\"),\n spk.metrics.LengthRMSE(MD17.forces, \"dydx\"),\n spk.metrics.AngleMAE(MD17.forces, \"dydx\"),\n spk.metrics.AngleRMSE(MD17.forces, \"dydx\")\n ]\n\n results = []\n if 'train' in args.split:\n results.append(['training'] + ['%.5f' % i for i in evaluate_dataset(metrics, model, train_loader, device)])\n\n if 'validation' in args.split:\n results.append(['validation'] + ['%.5f' % i for i in evaluate_dataset(metrics, model, val_loader, device)])\n\n if 'test' in args.split:\n results.append(['test'] + ['%.5f' % i for i in evaluate_dataset(metrics, model, test_loader, device)])\n\n header = ','.join(header)\n results = np.array(results)\n\n np.savetxt(os.path.join(args.modelpath, 'evaluation.csv'), results, header=header, fmt='%s', delimiter=',')\n\n\ndef evaluate_dataset(metrics, model, loader, device):\n for metric in metrics:\n metric.reset()\n\n for batch in loader:\n batch = {\n k: v.to(device)\n for k, v in batch.items()\n }\n result = model(batch)\n\n for metric in metrics:\n metric.add_batch(batch, result)\n\n results = [\n metric.aggregate() for metric in metrics\n ]\n return results\n\n\ndef get_model(args, atomref=None, mean=None, stddev=None, train_loader=None, parallelize=False, mode='train'):\n if args.model == 'schnet':\n representation = spk.representation.SchNet(args.features, args.features, args.interactions,\n args.cutoff, args.num_gaussians)\n atomwise_output = spk.atomistic.Energy(args.features, mean=mean, stddev=stddev, atomref=atomref,\n return_force=True, create_graph=True)\n model = spk.atomistic.AtomisticModel(representation, atomwise_output)\n\n elif args.model == 'wacsf':\n sfmode = ('weighted', 'Behler')[args.behler]\n # Convert element strings to atomic charges\n elements = frozenset((atomic_numbers[i] for i in sorted(args.elements)))\n representation = spk.representation.BehlerSFBlock(args.radial, args.angular, zetas=set(args.zetas),\n cutoff_radius=args.cutoff,\n centered=args.centered, crossterms=args.crossterms,\n elements=elements,\n mode=sfmode)\n logging.info(\"Using {:d} {:s}-type SF\".format(representation.n_symfuncs, sfmode))\n # Standardize representation if requested\n if args.standardize and mode == 'train':\n if train_loader is None:\n raise ValueError(\"Specification of a trainig_loader is required to standardize wACSF\")\n else:\n logging.info(\"Computing and standardizing symmetry function statistics\")\n else:\n train_loader = None\n\n representation = spk.representation.StandardizeSF(representation, train_loader, cuda=args.cuda)\n\n # Build HDNN model\n atomwise_output = spk.atomistic.ElementalEnergy(representation.n_symfuncs, n_hidden=args.n_nodes,\n n_layers=args.n_layers, mean=mean, stddev=stddev,\n atomref=atomref, return_force=True, create_graph=True,\n elements=elements)\n model = spk.atomistic.AtomisticModel(representation, atomwise_output)\n\n else:\n raise ValueError('Unknown model class:', args.model)\n\n if parallelize:\n model = nn.DataParallel(model)\n\n logging.info(\"The model you built has: %d parameters\" % compute_params(model))\n\n return model\n\n\nif __name__ == '__main__':\n parser = get_parser()\n args = parser.parse_args()\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n argparse_dict = vars(args)\n jsonpath = os.path.join(args.modelpath, 'args.json')\n\n if args.mode == 'train':\n if args.overwrite and os.path.exists(args.modelpath):\n logging.info('existing model will be overwritten...')\n rmtree(args.modelpath)\n\n if not os.path.exists(args.modelpath):\n os.makedirs(args.modelpath)\n\n to_json(jsonpath, argparse_dict)\n\n spk.utils.set_random_seed(args.seed)\n train_args = args\n else:\n train_args = read_from_json(jsonpath)\n\n # will download md17 if necessary, calculate_triples is required for wACSF angular functions\n logging.info('MD17 will be loaded...')\n md17 = MD17(args.datapath, args.molecule, download=True, parse_all=False, collect_triples=args.model == 'wacsf')\n\n # splits the dataset in test, val, train sets\n split_path = os.path.join(args.modelpath, 'split.npz')\n if args.mode == 'train':\n if args.split_path is not None:\n copyfile(args.split_path, split_path)\n\n logging.info('create splits...')\n data_train, data_val, data_test = md17.create_splits(*train_args.split, split_file=split_path)\n\n logging.info('load data...')\n train_loader = spk.data.AtomsLoader(data_train, batch_size=args.batch_size, sampler=RandomSampler(data_train),\n num_workers=4, pin_memory=True)\n val_loader = spk.data.AtomsLoader(data_val, batch_size=args.batch_size, num_workers=2, pin_memory=True)\n\n if args.mode == 'train':\n logging.info('calculate statistics...')\n mean, stddev = train_loader.get_statistics(MD17.energies, True)\n else:\n mean, stddev = None, None\n\n # Construct the model.\n model = get_model(train_args, mean=mean, stddev=stddev, train_loader=train_loader, parallelize=args.parallel,\n mode=args.mode).to(device)\n\n if args.mode == 'eval':\n if args.parallel:\n model.module.load_state_dict(\n torch.load(os.path.join(args.modelpath, 'best_model')))\n else:\n model.load_state_dict(\n torch.load(os.path.join(args.modelpath, 'best_model')))\n\n if args.mode == 'train':\n logging.info(\"training...\")\n train(args, model, train_loader, val_loader, device)\n logging.info(\"...training done!\")\n elif args.mode == 'eval':\n logging.info(\"evaluating...\")\n test_loader = spk.data.AtomsLoader(data_test, batch_size=args.batch_size,\n num_workers=2, pin_memory=True)\n evaluate(args, model, train_loader, val_loader, test_loader, device)\n logging.info(\"... done!\")\n else:\n print('Unknown mode:', args.mode)\n"
] |
[
[
"torch.device",
"numpy.array",
"torch.utils.data.sampler.RandomSampler",
"torch.optim.Adam",
"torch.nn.DataParallel"
]
] |
anikaanzum/NetworkDataAnalysis
|
[
"13f008233ccb4e7c16a576a6e068daf9c14510d6"
] |
[
"k-means.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Though the following import is not directly being used, it is required\n# for 3D projection to work\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom sklearn.cluster import KMeans\nfrom sklearn import datasets\n\nnp.random.seed(5)\n\niris = datasets.load_iris()\nX = [12, 20, 28, 18, 29, 33, 24, 45, 45, 52, 51, 52, 55, 53, 55, 61, 64, 69, 72]\ny = [39, 36, 30, 52, 54, 46, 55, 59, 63, 70, 66, 63, 58, 23, 14, 8, 19, 7, 24]\n\nestimators = [('k_means_iris_8', KMeans(n_clusters=8)),\n ('k_means_iris_3', KMeans(n_clusters=3)),\n ('k_means_iris_bad_init', KMeans(n_clusters=3, n_init=1,\n init='random'))]\n\nfignum = 1\ntitles = ['8 clusters', '3 clusters', '3 clusters, bad initialization']\nfor name, est in estimators:\n fig = plt.figure(fignum, figsize=(4, 3))\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n est.fit(X)\n labels = est.labels_\n\n ax.scatter(X[:, 3], X[:, 0], X[:, 2],\n c=labels.astype(np.float), edgecolor='k')\n\n ax.w_xaxis.set_ticklabels([])\n ax.w_yaxis.set_ticklabels([])\n ax.w_zaxis.set_ticklabels([])\n ax.set_xlabel('Petal width')\n ax.set_ylabel('Sepal length')\n ax.set_zlabel('Petal length')\n ax.set_title(titles[fignum - 1])\n ax.dist = 12\n fignum = fignum + 1\n\n# Plot the ground truth\nfig = plt.figure(fignum, figsize=(4, 3))\nax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n\nfor name, label in [('Setosa', 0),\n ('Versicolour', 1),\n ('Virginica', 2)]:\n ax.text3D(X[y == label, 3].mean(),\n X[y == label, 0].mean(),\n X[y == label, 2].mean() + 2, name,\n horizontalalignment='center',\n bbox=dict(alpha=.2, edgecolor='w', facecolor='w'))\n# Reorder the labels to have colors matching the cluster results\ny = np.choose(y, [1, 2, 0]).astype(np.float)\nax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y, edgecolor='k')\n\nax.w_xaxis.set_ticklabels([])\nax.w_yaxis.set_ticklabels([])\nax.w_zaxis.set_ticklabels([])\nax.set_xlabel('Petal width')\nax.set_ylabel('Sepal length')\nax.set_zlabel('Petal length')\nax.set_title('Ground Truth')\nax.dist = 12\n\nfig.show()"
] |
[
[
"numpy.random.seed",
"sklearn.cluster.KMeans",
"numpy.choose",
"matplotlib.pyplot.figure",
"sklearn.datasets.load_iris"
]
] |
Berk035/SimStar-Racer
|
[
"6deec22b02ed80195bf854d1ccce164ebc724d13"
] |
[
"sac_example/simstarEnv.py"
] |
[
"import gym\nimport collections as col\nimport numpy as np \nimport time\nfrom gym import spaces\n\ntry:\n import simstar\nexcept ImportError:\n print(\"go to PythonAPI folder where setup.py is located\")\n print(\"python setup.py install\")\n\nclass SensoredVehicle(simstar.Vehicle):\n def __init__(self,vehicle:simstar.Vehicle, track_sensor,opponent_sensor):\n super().__init__(vehicle.client,vehicle._ID)\n self.track_sensor = track_sensor\n self.opponent_sensor = opponent_sensor\n\nclass SimstarEnv(gym.Env):\n def __init__(self, track=simstar.Environments.DutchGrandPrix, add_opponents=False, synronized_mode=False, num_opponents=6, speed_up=1, host=\"127.0.0.1\", port=8080):\n \n self.c_w = 0.01 # out of track penalty weight\n\n self.add_opponents = add_opponents # True: adds opponent vehicles; False: removes opponent vehicles\n self.number_of_opponents = num_opponents # agent_locations, agent_speeds, and lane_ids sizes has to be the same\n self.agent_locations = [-10, -20, -10, 0, 25, 0] # opponents' meters offset relative to ego vehicle\n self.agent_speeds = [45, 80, 55, 100, 40, 60] # opponent vehicle speeds in km/hr\n self.lane_ids = [1, 2, 3, 3, 2, 1] # make sure that the lane ids are not greater than number of lanes\n \n self.ego_lane_id = 2 # make sure that ego vehicle lane id is not greater than number of lanes\n self.ego_start_offset = 25 # ego vehicle's offset from the starting point of the road\n self.default_speed = 120 # km/hr\n self.set_ego_speed = 60 # km/hr\n self.road_width = 10 # meters\n\n self.track_sensor_size = 19\n self.opponent_sensor_size = 18\n\n self.time_step_slow = 0\n self.terminal_judge_start = 100 # if ego vehicle does not have progress for 100 steps, terminate\n self.termination_limit_progress = 6 # if progress of the ego vehicle is less than 6 for 100 steps, terminate\n\n # the type of race track to generate \n self.track_name = track\n \n self.synronized_mode = synronized_mode # simulator waits for update signal from client if enabled\n self.speed_up = speed_up # how faster should simulation run. up to 6x. \n self.host = host\n self.port = port\n \n self.hz = 10 # fixed control frequency \n self.fps = 60 # fixed simulation FPS\n self.tick_number_to_sample = self.fps/self.hz\n self.sync_step_num = int(self.tick_number_to_sample/self.speed_up)\n\n try:\n self.client = simstar.Client(host=self.host, port=self.port)\n self.client.ping()\n except simstar.TimeoutError or simstar.TransportError:\n raise simstar.TransportError(\"******* Make sure a Simstar instance is open and running at port %d*******\"%(self.port))\n \n self.client.open_env(self.track_name)\n \n print(\"[SimstarEnv] initializing environment\")\n time.sleep(5)\n\n # get main road\n self.road = None\n all_roads = self.client.get_roads()\n\n if len(all_roads) > 0:\n road_main = all_roads[0]\n road_id = road_main['road_id']\n self.road = simstar.RoadGenerator(self.client, road_id)\n\n # a list contaning all vehicles \n self.actor_list = []\n\n # disable lane change for automated actors\n self.client.set_lane_change_disabled(is_disabled=True)\n\n #input space. \n high = np.array([np.inf, np.inf, 1., 1.])\n low = np.array([-np.inf, -np.inf, 0., 0.])\n self.observation_space = spaces.Box(low=low, high=high)\n \n # action space: [steer, accel-brake]\n self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,))\n self.default_action = [0.0, 1.0]\n\n self.last_step_time = time.time()\n self.apply_settings()\n\n def apply_settings(self):\n print(\"[SimstarEnv] sync: \",self.synronized_mode,\" speed up: \",self.speed_up)\n self.client.set_sync_timeout(10)\n self.client.set_sync_mode(self.synronized_mode, self.speed_up)\n\n def reset(self):\n print(\"[SimstarEnv] actors are destroyed\")\n time.sleep(0.5)\n\n self.time_step_slow = 0\n \n # delete all the actors \n self.client.remove_actors(self.actor_list)\n self.actor_list.clear()\n \n # spawn a vehicle\n if self.track_name == simstar.Environments.DutchGrandPrix:\n vehicle_pose = simstar.PoseData(-603.631592, -225.756531, -3.999999, yaw=20/50)\n self.main_vehicle = self.client.spawn_vehicle_to(vehicle_pose, initial_speed=0, set_speed=self.set_ego_speed, vehicle_type=simstar.EVehicleType.Sedan1)\n else:\n self.main_vehicle = self.client.spawn_vehicle(distance=self.ego_start_offset, lane_id=self.ego_lane_id, initial_speed=0, set_speed=self.set_ego_speed, vehicle_type = simstar.EVehicleType.Sedan1)\n \n self.simstar_step()\n print(\"[SimstarEnv] main vehicle ID: \",self.main_vehicle.get_ID())\n\n # attach appropriate sensors to the vehicle\n track_sensor_settings = simstar.DistanceSensorParameters(\n enable = True, draw_debug = False, add_noise = False, position=simstar.PositionRPC(0.0, 0.0, -0.20), \n orientation=simstar.OrientationRPC(0.0, 0.0, 0.0), minimum_distance = 0.2, maximum_distance = 200.0,\n fov = 190.0, update_frequency_in_hz = 60.0, number_of_returns=self.track_sensor_size, query_type=simstar.QueryType.Static)\n\n track_sensor = self.main_vehicle.add_sensor(simstar.ESensorType.Distance, track_sensor_settings)\n \n self.simstar_step()\n\n opponent_sensor_settings = simstar.DistanceSensorParameters(\n enable = True, draw_debug = False, add_noise = False, position=simstar.PositionRPC(2.0, 0.0, 0.4), \n orientation=simstar.OrientationRPC(0.0, 0.0, 0.0), minimum_distance = 0.0, maximum_distance = 20.0,\n fov = 216.0, update_frequency_in_hz = 60.0, number_of_returns=self.opponent_sensor_size, query_type=simstar.QueryType.Dynamic)\n\n opponent_sensor = self.main_vehicle.add_sensor(simstar.ESensorType.Distance, opponent_sensor_settings)\n\n self.main_vehicle = SensoredVehicle(self.main_vehicle,track_sensor,opponent_sensor)\n\n # add all actors to the acor list\n self.actor_list.append(self.main_vehicle)\n\n # include other vehicles\n if self.add_opponents:\n\n # define other vehicles with set speeds and initial locations\n for i in range(self.number_of_opponents):\n new_agent = self.client.spawn_vehicle(actor=self.main_vehicle, distance=self.agent_locations[i], lane_id=self.lane_ids[i], initial_speed=0, set_speed=100)\n\n self.simstar_step()\n track_sensor = new_agent.add_sensor(simstar.ESensorType.Distance, track_sensor_settings)\n self.simstar_step()\n opponent_sensor = new_agent.add_sensor(simstar.ESensorType.Distance, opponent_sensor_settings)\n self.simstar_step()\n \n new_agent = SensoredVehicle(new_agent,track_sensor,opponent_sensor)\n \n # define drive controllers for each agent vehicle\n new_agent.set_controller_type(simstar.DriveType.Auto)\n self.actor_list.append(new_agent)\n \n self.simstar_step()\n\n self.simstar_step()\n\n # set as display vehicle to follow from simstar\n self.client.display_vehicle(self.main_vehicle)\n \n self.simstar_step()\n # set drive type as API for ego vehicle\n self.main_vehicle.set_controller_type(simstar.DriveType.API)\n \n self.simstar_step()\n\n simstar_obs = self.get_simstar_obs(self.main_vehicle)\n observation = self.make_observation(simstar_obs)\n return observation\n\n def calculate_reward(self, simstar_obs):\n collision = simstar_obs[\"damage\"]\n reward = 0.0\n done = False\n summary = {'end_reason': None}\n\n trackPos = simstar_obs['trackPos']\n angle = simstar_obs['angle']\n spx = simstar_obs['speedX']\n\n oppo = simstar_obs['opponents']\n min_opponent=np.min(oppo)\n\n progress = 1.5*spx * (np.cos(angle) - np.abs(np.sin(angle)))\n reward = progress - (spx) * np.abs(trackPos)\n\n #Encourages to directional driving.\n if np.abs(trackPos) < 0.1 : reward *= 1 \n elif np.abs(trackPos) < 0.2 : reward *= 0.8 \n elif np.abs(trackPos) < 0.3 : reward *= 0.7 \n elif np.abs(trackPos) < 0.4 : reward *= 0.6 \n elif np.abs(trackPos) < 0.5 : reward *= 0.5 \n elif np.abs(trackPos) < 0.6 : reward *= 0.4 \n elif np.abs(trackPos) < 0.7 : reward *= 0.1 \n else : reward *= 0.0\n\n #Avoiding from opponent collisions\n if min_opponent<2: reward-=2.5\n elif min_opponent<1: reward-=5\n elif min_opponent<0.5: reward-=10\n\n road = self.main_vehicle.get_road_deviation_info()\n curve = road['curvature']\n CURVE_TRESH=0.5\n \n if curve >0.01 and np.abs(trackPos)>CURVE_TRESH: reward-=2.5\n elif curve>0.02 and np.abs(trackPos)>CURVE_TRESH: reward-=5\n elif curve>0.03 and np.abs(trackPos)>CURVE_TRESH: reward-=10\n\n if collision:\n print(\"[SimstarEnv] collision with opponent vehicle\")\n reward -= self.c_w * spx*spx\n\n if np.abs(trackPos) >= 0.9:\n print(\"[SimstarEnv] finish episode due to road deviation\")\n reward = -100\n summary['end_reason'] = 'road_deviation'\n done = True\n \n if progress < self.termination_limit_progress:\n if self.terminal_judge_start < self.time_step_slow:\n print(\"[SimstarEnv] finish episode due to agent is too slow\")\n reward = -20\n summary['end_reason'] = 'too_slow'\n done = True\n else:\n self.time_step_slow = 0\n\n self.progress_on_road = self.main_vehicle.get_progress_on_road()\n # TODO: will be updated accordingly\n #if self.progress_on_road == 1.0:\n # self.progress_on_road = 0.0\n\n if self.progress_on_road > 2:\n print(\"[SimstarEnv] finished lap\")\n summary['end_reason'] = 'lap_done'\n reward+=100\n done = True\n\n self.time_step_slow += 1\n \n return reward, done, summary\n\n def step(self, action):\n self.action_to_simstar(action,self.main_vehicle)\n\n # required to continue simulation in sync mode\n self.simstar_step()\n\n simstar_obs = self.get_simstar_obs(self.main_vehicle)\n observation = self.make_observation(simstar_obs)\n reward, done, summary = self.calculate_reward(simstar_obs)\n \n return observation, reward, done, summary\n\n def make_observation(self, simstar_obs):\n names = ['angle', 'speedX', 'speedY', 'opponents','track','trackPos']\n Observation = col.namedtuple('Observation', names)\n\n return Observation( angle=np.array(simstar_obs['angle'], dtype=np.float32)/1.,\n speedX=np.array(simstar_obs['speedX'], dtype=np.float32)/self.default_speed,\n speedY=np.array(simstar_obs['speedY'], dtype=np.float32)/self.default_speed,\n opponents=np.array(simstar_obs['opponents'], dtype=np.float32)/20.,\n track=np.array(simstar_obs['track'], dtype=np.float32)/200.,\n trackPos=np.array(simstar_obs['trackPos'], dtype=np.float32)/1.)\n\n def ms_to_kmh(self, ms):\n return 3.6 * ms\n\n def clear(self):\n self.client.remove_actors(self.actor_list)\n\n def end(self):\n self.clear()\n\n # [steer, accel, brake] input\n def action_to_simstar(self, action,vehicle_ref):\n steer = float(action[0])\n accel_brake = float(action[1])\n\n steer = steer * 0.5\n\n if accel_brake >= 0:\n throttle = accel_brake\n brake = 0.0\n else:\n brake = abs(accel_brake)\n throttle = 0.0\n\n vehicle_ref.control_vehicle(steer=steer, throttle=throttle, brake=brake)\n \n def simstar_step(self):\n step_num = int(self.sync_step_num)\n if self.synronized_mode:\n for i in range(step_num):\n self.client.blocking_tick()\n else:\n time_diff_to_be = 1/60*step_num\n time_diff_actual = time.time()-self.last_step_time\n time_to_wait = time_diff_to_be - time_diff_actual\n if time_to_wait>0.0:\n time.sleep(time_to_wait)\n self.last_step_time = time.time()\n\n def get_simstar_obs(self, vehicle_ref): \n vehicle_state = vehicle_ref.get_vehicle_state_self_frame()\n speed_x_kmh = abs(self.ms_to_kmh(float(vehicle_state['velocity']['X_v'])))\n speed_y_kmh = abs(self.ms_to_kmh(float(vehicle_state['velocity']['Y_v'])))\n opponents = vehicle_ref.opponent_sensor.get_detections()\n track = vehicle_ref.track_sensor.get_detections()\n road_deviation = vehicle_ref.get_road_deviation_info()\n\n retry_counter = 0\n while len(track) < self.track_sensor_size or len(opponents) < self.opponent_sensor_size:\n self.simstar_step()\n time.sleep(0.1)\n opponents = vehicle_ref.opponent_sensor.get_detections()\n track = self.track_sensor.get_detections()\n retry_counter += 1\n if retry_counter > 1000: raise RuntimeError(\"Track Sensor shape error. Exited\")\n \n speed_x_kmh = np.sqrt((speed_x_kmh*speed_x_kmh) + (speed_y_kmh*speed_y_kmh))\n speed_y_kmh = 0.0\n \n # deviation from road in radians\n angle = float(road_deviation['yaw_dev'])\n \n # deviation from road center in meters\n trackPos = float(road_deviation['lat_dev']) / self.road_width\n\n curve = float(road_deviation['curvature'])\n\n # if collision occurs, True. else False\n damage = bool( vehicle_ref.check_for_collision())\n\n simstar_obs = {\n 'angle': angle,\n 'speedX': speed_x_kmh,\n 'speedY':speed_y_kmh,\n 'opponents':opponents ,\n 'track': track, \n 'trackPos': trackPos,\n 'damage': damage\n }\n\n return simstar_obs\n\n def get_agent_observations(self):\n states = []\n for vehicle in self.actor_list:\n if vehicle.get_ID() != self.main_vehicle.get_ID():\n raw_state = self.get_simstar_obs(vehicle)\n proc_state = self.make_observation(raw_state)\n states.append(proc_state)\n\n return states\n \n def set_agent_actions(self, action_list):\n num_actions = len(action_list)\n num_agents = len(self.actor_list)-1\n if num_actions == num_agents:\n action_index = 0\n for vehicle in self.actor_list:\n if vehicle.get_ID() != self.main_vehicle.get_ID():\n action = action_list[action_index]\n self.action_to_simstar(action,vehicle)\n action_index += 1\n else:\n print(\"[SimstarEnv] Warning! Agent number not equal to action number\")\n\n def change_opponent_control_to_api(self):\n self.simstar_step()\n for vehicle in self.actor_list:\n vehicle.set_controller_type(simstar.DriveType.API)\n\n def __del__(self):\n # reset sync mod so that user can interact with simstar\n if(self.synronized_mode):\n self.client.set_sync_mode(False)"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.min",
"numpy.abs",
"numpy.sqrt",
"numpy.cos"
]
] |
peter-ho/mlfinlab
|
[
"54d3ff004095b1354095811fe0dc64b3691dc76b"
] |
[
"mlfinlab/tests/test_risk_metrics.py"
] |
[
"# pylint: disable=missing-module-docstring\nimport unittest\nimport os\nimport pandas as pd\nfrom mlfinlab.portfolio_optimization.risk_metrics import RiskMetrics\n\n\nclass TestRiskMetrics(unittest.TestCase):\n \"\"\"\n Tests different risk metrics calculation from the RiskMetrics class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Set the file path for the tick data csv.\n \"\"\"\n project_path = os.path.dirname(__file__)\n data_path = project_path + '/test_data/stock_prices.csv'\n self.data = pd.read_csv(data_path, parse_dates=True, index_col=\"Date\")\n\n def test_variance_calculation(self):\n \"\"\"\n Test the calculation of variance.\n \"\"\"\n\n weights = [1] * self.data.shape[1]\n variance = RiskMetrics().calculate_variance(self.data.cov(), weights)\n assert isinstance(variance, float)\n\n def test_value_at_risk_calculation(self):\n \"\"\"\n Test the calculation of value at risk.\n \"\"\"\n\n test_returns = self.data.iloc[:, 0].values\n value_at_risk = RiskMetrics().calculate_value_at_risk(test_returns)\n assert isinstance(value_at_risk, float)\n\n def test_expected_shortfall_calculation(self):\n \"\"\"\n Test the calculation of expected shortfall.\n \"\"\"\n\n test_returns = self.data.iloc[:, 0].values\n expected_shortfall = RiskMetrics().calculate_expected_shortfall(test_returns)\n assert isinstance(expected_shortfall, float)\n\n def test_conditional_drawdown_calculation(self):\n \"\"\"\n Test the calculation of conditional drawdown at risk.\n \"\"\"\n\n test_returns = self.data.iloc[:, 0].values\n conditional_drawdown = RiskMetrics().calculate_conditional_drawdown_risk(test_returns)\n assert isinstance(conditional_drawdown, float)\n\n def test_value_at_risk_for_dataframe(self):\n \"\"\"\n Test the calculation of value at risk.\n \"\"\"\n\n test_returns = pd.DataFrame(self.data.iloc[:, 0])\n value_at_risk = RiskMetrics().calculate_value_at_risk(test_returns)\n assert isinstance(value_at_risk, float)\n\n def test_expected_shortfall_for_dataframe(self):\n \"\"\"\n Test the calculation of expected shortfall.\n \"\"\"\n\n test_returns = pd.DataFrame(self.data.iloc[:, 0])\n expected_shortfall = RiskMetrics().calculate_expected_shortfall(test_returns)\n assert isinstance(expected_shortfall, float)\n\n def test_conditional_drawdown_for_dataframe(self):\n \"\"\"\n Test the calculation of conditional drawdown at risk.\n \"\"\"\n\n test_returns = pd.DataFrame(self.data.iloc[:, 0])\n conditional_drawdown = RiskMetrics().calculate_conditional_drawdown_risk(test_returns)\n assert isinstance(conditional_drawdown, float)\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
justinbooms/imu-positioning
|
[
"26f33b4021ba824a44bc329ab4dc0c0c2dbfbbd0"
] |
[
"measure/measure_sum.py"
] |
[
"\"\"\"Continuous Reading of Sensors\n\nMake continuous readings from the sensors and begin\na take measurements function.\n\nWe Believe the Following:\nMagnet x: \nMagnet y:\nMagnet z:\nEuler Heading: Dir w/ 0 being North 90 East, 180 South, 270 West\nEuler Roll:\nEuler Pitch: Angle up \nAccel x:\nAccel y:\nAccel z:\nEuler x:\nEuler y:\nEuler z:\nThermometer: Temperature in Celcius\nLeft Encoder: Odometer of left wheel\nRight Encoder: Odometer of right wheel\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nimport time\nimport pickle\nfrom datetime import datetime, timedelta\n\nfrom easygopigo3 import EasyGoPiGo3\nfrom di_sensors.inertial_measurement_unit import InertialMeasurementUnit\n\n\nimport numpy as np\nimport math as math\n\n# Setup Sensors\nimu = InertialMeasurementUnit(bus = \"GPG3_AD1\")\ngpg = EasyGoPiGo3()\ngpg.reset_encoders()\n\n\ndef print_reading():\n mag = imu.read_magnetometer()\n gyro = imu.read_gyroscope()\n euler = imu.read_euler()\n accel = imu.read_accelerometer()\n temp = imu.read_temperature()\n encoder = gpg.read_encoders()\n \n string_to_print = \"Magnetometer X: {:.1f} Y: {:.1f} Z: {:.1f} \" \\\n \"Gyroscope X: {:.1f} Y: {:.1f} Z: {:.1f} \" \\\n \"Accelerometer X: {:.1f} Y: {:.1f} Z: {:.1f} \" \\\n \"Euler Heading: {:.1f} Roll: {:.1f} Pitch: {:.1f} \" \\\n \"Temperature: {:.1f}C \" \\\n \"Left Encoder: {:.1f} \" \\\n \"Right Encoder: {:.1f}\".format(mag[0], mag[1], mag[2],\n gyro[0], gyro[1], gyro[2],\n accel[0], accel[1], accel[2],\n euler[0], euler[1], euler[2],\n temp, encoder[0], encoder[1])\n print(string_to_print)\n\n\ndef get_reading():\n mag = imu.read_magnetometer()\n gyro = imu.read_gyroscope()\n euler = imu.read_euler()\n accel = imu.read_accelerometer()\n temp = imu.read_temperature()\n encoder = gpg.read_encoders()\n res = {\n \"mag_x\": mag[0],\n \"mag_y\": mag[1],\n \"mag_z\": mag[2],\n \"gyro_x\": gyro[0],\n \"gyro_y\": gyro[1],\n \"gyro_z\": gyro[2],\n \"accel_x\": accel[0], \n \"accel_y\": accel[1],\n \"accel_z\": accel[2], \n \"euler_x\": euler[0],\n \"euler_y\": euler[1],\n \"euler_z\": euler[2],\n # \"temp\": temp, \n \"left_enc\": encoder[0],\n \"right_enc\": encoder[1],\n }\n \n return res\n\n\ndef get_position(right_prev,left_prev):\n\n euler = imu.read_euler()\n euler_x=euler[0]\n\n encoder = gpg.read_encoders()\n left_enc=encoder[0]\n right_enc= encoder[1]\n y_delta=left_enc-left_prev\n x_delta=right_enc-right_prev\n y=math.sin(euler_x*0.0174533)*y_delta\n x=math.cos(euler_x*0.0174533)*x_delta\n res = {\n\n \"left_enc\": left_enc,\n \"right_enc\": right_enc,\n \"x\": x,\n \"y\": y,\n \n }\n ##print(res)\n return left_enc,right_enc,x,y\n\n\n\ni=0\nt1 = datetime.now()\ndata = []\nright_prev=0\nleft_prev=0\nx_total=0\ny_total=0\nwhile i<100:\n # Execute\n ##print_reading()\n #data.append(get_reading())\n t2 = datetime.now()\n left_enc,right_enc,x,y=get_position(right_prev,left_prev)\n \n right_prev=right_enc\n left_prev=left_enc\n x_total=x_total+x\n y_total=y_total+y\n print(\"x (mm) = %8.2f y (mm) = %8.2f\" % (x_total, y_total))\n print(imu.read_euler()[0])\n ##print(\"Duration: {}\".format(t2 - t1))\n # print(timedelta(t2, t1))\n \n \n # Prepare for next iteration\n i += 1\n t1 = t2\n time.sleep(.1)\n \n\ngpg.stop()\n\n#if x_total>0 and y_total>0: ### quadrant 1\n# direction_back=180+90-math.atan(x_total/y_total)*57.2958\n#elif x_total<0 and y_total>0:### quadrant 4\n# direction_back=180+90-math.atan(x_total/y_total)*57.2958\n#elif x_total<0 and y_total<0:### quadrant 3\n# direction_back=90-math.atan(x_total/y_total)*57.2958\n#else: ### quadrant 2\n# direction_back=90-math.atan(x_total/y_total)*57.2958\n##print(direction_back)\n#print(\"Back direction= %8.2f dist=%8.2f\" % (direction_back, distance_back/44))\n\n### Try quarant 3 and 4\n#if x_total>0 and y_total>0: ### quadrant 1\n# direction_back=180+90-math.atan(x_total/y_total)*57.2958\n#elif x_total<0 and y_total>0:### quadrant 4\n# direction_back=180+90+math.atan(x_total/y_total)*57.2958\n#elif x_total<0 and y_total<0:### quadrant 3\n# direction_back=90-math.atan(x_total/y_total)*57.2958\n#else: ### quadrant 2\n# direction_back=90+math.atan(x_total/y_total)*57.2958\n###print(direction_back)\n#print(\"Back direction= %8.2f dist=%8.2f\" % (direction_back, distance_back/44))\n\n## print direction_back, aka pointing vector direction CW deg angle from north\n## and distance back, aka pointing vector magnitude\n##print(imu.read_euler()[0]) \ndistance_back=math.sqrt(x_total**2+y_total**2)\ndirection_back = np.arctan2(y_total,x_total)\nprint(\"return direction (deg CW from north) = %8.2f distance (mm) = %8.2f\" % (direction_back, distance_back))\n\n#may need to deal with dividing by zero when x_total = 0\n\n## find rotation, the CW rotation needed to go from pointing vector to return vector\n## quadrant independent method\n## euler heading = bearing = yaw CW from north\n## x_total is x position in mm where x direction is north\n## y is west\nbearing = imu.read_euler()[0] \nrotation = -(math.pi + bearing + np.arctan2(y_total,x_total)*180/math.pi)\nprint(\"current yaw CW from north = %8.2f rotation = %8.2f\" % (bearing, rotation))\n\n#angle=imu.read_euler()[0]\n#angle_delta=direction_back-angle\n#print(\"current= %8.2f delta=%8.2f\" % (angle, angle_delta))\n\n##while angle_delta>1:\n ## angle=imu.read_euler()[0]\n ##angle_delta=direction_back-angle\n ## gpg.right()\n ## time.sleep(.1)\n ## gpg.stop()\n ## print(\"current= %8.2f delta=%8.2f\" % (angle, angle_delta))\n \ngpg.turn_degrees(rotation)\nprint(\"return distance (mm) = %8.2f\" % (distance_back))\ngpg.drive_cm(distance_back/10) \n#gpg.drive_cm(distance_back/44*2.54) \ngpg.stop() \n# Save Out\n#with open('data.pkl', 'wb') as f:\n #pickle.dump(data, f)\n\n"
] |
[
[
"numpy.arctan2"
]
] |
CermakM/cryptoanalysis
|
[
"02c157daa046915b46451eeec3bde93860082abc"
] |
[
"cryptoanalysis/analysis/decryption.py"
] |
[
"\"\"\"Decryption tools\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom collections import Counter, deque\nfrom matplotlib import pyplot as plt\nfrom cryptoanalysis.cipher import vigener\n\nimport cryptoanalysis\n\nMETA_DIR = \"%s/meta\" % cryptoanalysis.__path__[0]\n\n\nclass Analyser:\n\n def __init__(self, cipher: str = None, fname: str = None, cipher_type='mono', lang='en'):\n \"\"\"\n Analyser class providing tools to analyse and decrypt ciphers\n :param cipher: encoded stream to be decrypted\n :param fname: file to read string from\n :param cipher_type: mono alphabetic by default\n :param lang: language to choose\n \"\"\"\n if fname and cipher:\n raise ValueError(\"Only one argument can be provided at time: `cipher`, `file`\")\n\n if fname is not None:\n with open(fname) as sample:\n cipher = sample.read()\n\n self.cipher = cipher\n\n if cipher:\n self.cipher_strip, self.blacklist_dict = vigener.strip_blacklist(cipher.lower())\n\n self.type = cipher_type\n self.lang = lang\n\n # Convenient dictionary to store per-key shift vectors\n self.shift_dict = dict()\n\n _lang_df = pd.read_csv('{meta_dir}/{lang}.csv'.format(meta_dir=META_DIR, lang=self.lang))\n\n self.alphabet = _lang_df.letters.values\n self.letter_frequency = _lang_df.occurence.values\n \"DataFrame with cipher character frequency\"\n\n self.default_alphabet_dct = dict(zip(self.alphabet, (0 for _ in range(len(self.alphabet)))))\n\n def get_cipher(self) -> str:\n \"\"\"Returns ciphered text\"\"\"\n return self.cipher\n\n def get_char_occurance(self):\n \"\"\"Returns bag of character occurence count\"\"\"\n return Counter(self.cipher_strip)\n\n def get_char_frequency(self) -> dict:\n \"\"\"Returns dictionary of characters and their occurence\"\"\"\n series = pd.Series(Counter(self.cipher_strip))\n series /= series.sum()\n\n return series.to_dict()\n\n def get_shift_vector(self, key_index) -> list:\n \"\"\"Returns shift vector for key character on specific index\"\"\"\n if not self.shift_dict:\n return []\n\n return self.shift_dict[key_index]\n\n def plot_char_frequency(self):\n \"\"\"Plot frequency analysis using pandas DataFrame\"\"\"\n bag = Counter(self.default_alphabet_dct)\n bag.update(self.cipher_strip)\n\n cipher_frequency = self.__df_from_dict(bag)\n \"DataFrame with cipher character frequency\"\n ax = cipher_frequency.plot(\n title='Frequency analysis of the given cipher',\n kind='bar',\n rot=0 # Prevent letters from rotating by 90 deg\n )\n ax.legend(['Character count'])\n plt.show()\n\n def decipher(self, custom_key=None, key_id=0, rot=0) -> str:\n \"\"\"\n Attempt to break cipher using rotation hyperparameter\n :param custom_key: key to be used, if known\n :param key_id: id of the key to be used for decryption\n :param rot: 'a' transforms to 'a' for 0 (default), to 'b' for 1\n :return: decrypted stream\n \"\"\"\n\n if custom_key and key_id:\n raise(AttributeError(\"`custom_key` and `key_id` must be used exclusively\"))\n\n # Attempt to decrypt the key\n if custom_key is None:\n key, _ = self.get_keys(key_id, rot=rot)\n # print(\"Guessing key: '%s'\" % key)\n else:\n # Apply rotation to the custom key instead of the text\n key = custom_key\n\n # Apply the key to decode stream\n decoded_stream = vigener.decode(self.cipher_strip, key=key, rot=rot, strip=False)\n\n decoded_stream = vigener.destrip_blacklist(decoded_stream, feed_dict=self.blacklist_dict)\n\n return decoded_stream\n\n def get_keys(self, key_id=0, rot=0) -> (str, list):\n \"\"\"\n Attempt to decrypt the Vigener key\n :param key_id: length of the key to be returned\n :param rot: 'a' transforms to 'a' for 0, to 'b' for 1 (default)\n :return: decrypted key with highest probability, list of possible keys for different lengths\n \"\"\"\n\n # Estimate the key length\n key_len_list = self._est_key_len()\n if key_id >= len(key_len_list):\n return '', []\n\n # print(\"Estimated Key length: {}\".format(key_len_list))\n\n key_list = []\n\n for k in key_len_list:\n key = \"\"\n\n # Find optimal shift for each character of the key\n for index in range(k):\n cipher_strip = self.cipher_strip[index::k]\n # Create new bag for stripped cipher\n cipher_strip_bag = Counter(self.default_alphabet_dct)\n cipher_strip_bag.update(cipher_strip)\n cipher_strip_list = sorted(cipher_strip_bag.items()) # Sort by key to match def_occurence ordering\n\n # Convert occurence to frequency and put it into deque to rotate easily\n strip_frequency = deque([v / len(cipher_strip_bag)*100 for k, v in cipher_strip_list])\n\n # Create shift matrix\n shift_list = []\n for i in range(len(strip_frequency)):\n shift_list.append(strip_frequency.copy())\n strip_frequency.rotate(1)\n\n shift_matrix = np.array(shift_list)\n shift_vector = np.matmul(shift_matrix, np.resize(self.letter_frequency,\n new_shape=shift_matrix[0].shape))\n # Get first five (magic) shifts and cache them\n shift_tuples = np.array([*enumerate(shift_vector)])\n shift_list = sorted(shift_tuples, key=lambda x: x[1], reverse=True)[:5]\n self.shift_dict[index] = [int(shift) for shift, _ in shift_list]\n\n shift_index = int(np.argmax(shift_vector))\n\n # Rotate letters to the peak position\n shift_sample = deque(k for k, v in cipher_strip_list)\n shift_sample.rotate(shift_index + rot) # Number of rotations is shift index + rotation value\n\n key += shift_sample[0]\n\n key_list.append(key)\n\n return key_list[key_id], key_list\n\n def get_key_len(self, key_ord=0):\n \"\"\"Get length of nth key where n is the order of the key\"\"\"\n est_len_list = self._est_key_len()\n if key_ord >= len(est_len_list):\n return -1\n\n return int(est_len_list[key_ord])\n\n def get_key_len_list(self, res=3):\n \"\"\"Return list of estimated keys\n :param res: Number of results to be returned in the list\n \"\"\"\n return [int(i) for i in self._est_key_len(res=res)]\n\n def _est_key_len(self, batch_count=2, res=3, gcd=False) -> list:\n \"\"\"\n Attempts to estimate key len of cipher key based on coincidence matrix\n :param batch_count: Count of batches that will be created form the stream\n :param res: number of results to be returned\n :param gcd: eliminate multiplicators of key len using gcd\n :returns: List of possible key lengs sorted by probability\n \"\"\"\n\n # Generate batches\n batch_size = len(self.cipher_strip) // batch_count\n batches = [self.cipher_strip[i * batch_size: (i + 1) * batch_size] for i in range(batch_count)]\n\n for batch_stream in batches:\n shift_array = [None] * (batch_size + 1)\n shift_array[0] = batch_stream\n\n co_matrix = np.empty(shape=(batch_size - 1,), dtype=np.float32)\n \"Coincidance matrix\"\n\n # Perform shifts\n shifted_str = batch_stream\n for i in range(len(batch_stream) - 1): # The number of shifts is given by len of batch\n shifted_str = self.__shift_str(shifted_str)\n shift_array[i+1] = shifted_str\n\n # Get coincidance value\n # Placing this here increases algorithm performance\n co_value = sum(c == shift_c for c, shift_c in zip(batch_stream, shifted_str))\n co_matrix[i] = co_value\n\n # Perform z-scale\n co_matrix -= co_matrix.mean()\n co_matrix /= co_matrix.std()\n\n # Get all the coincidence indexes where coincidence value >= 1\n co_values, = np.where(co_matrix >= 1)\n\n co_index_list = list(map((lambda t: t[1] - t[0]), zip(co_values, co_values[1:])))\n\n co_indexes = Counter(co_index_list)\n\n if gcd:\n # TODO eliminate multiplicators\n pass\n\n res = min(res, len(co_indexes))\n results = sorted(co_indexes.items(), key=lambda x: x[1], reverse=True)[:res]\n\n return [r[0] for r in results]\n\n @staticmethod\n def __shift_str(stream: str, rot=1, direction='r') -> str:\n \"\"\"Performs cyclic shift in the given direction by given value\"\"\"\n\n if direction == 'r':\n _stream = stream[-rot:] + stream[:len(stream) - rot]\n elif direction == 'l':\n _stream = stream[rot:] + stream[:rot]\n else:\n raise AttributeError(\"Incorrect shift direction provided, \"\n \"expected one from ['l', 'r'], got %s\" % direction)\n\n return _stream\n\n @staticmethod\n def __df_from_dict(dct: dict) -> pd.DataFrame:\n \"\"\"Creates DataFrame from given dict replacing special symbols\"\"\"\n bag = dct.copy()\n\n return pd.DataFrame.from_dict(bag, orient='index')\n"
] |
[
[
"numpy.array",
"numpy.empty",
"pandas.DataFrame.from_dict",
"numpy.where",
"numpy.resize",
"numpy.argmax",
"matplotlib.pyplot.show"
]
] |
QRemy/gammapy-benchmarks
|
[
"7f6170e88284958056fbdf468fb890787a13f153"
] |
[
"benchmarks/io.py"
] |
[
"# To check the reading/writing performance of DL3 data\nimport logging\nimport numpy as np\nimport time\nimport yaml\nimport os\nfrom gammapy.data import DataStore\nfrom gammapy.maps import Map\n\nN_OBS = int(os.environ.get(\"GAMMAPY_BENCH_N_OBS\", 10))\n\ndef run_benchmark():\n info = {\"n_obs\": N_OBS}\n\n t = time.time()\n\n data_store = DataStore.from_dir(\"$GAMMAPY_DATA/cta-1dc/index/gps/\")\n OBS_ID = 110380\n obs_ids = OBS_ID * np.ones(N_OBS)\n observations = data_store.get_observations(obs_ids)\n\n info[\"data_loading\"] = time.time() - t\n t = time.time()\n\n m = Map.create()\n for obs in observations:\n m.fill_events(obs.events)\n\n info[\"filling\"] = time.time() - t\n t = time.time()\n\n m.write(\"survey_map.fits.gz\", overwrite=True)\n\n info[\"writing\"] = time.time() - t\n\n with open(\"bench.yaml\", \"w\") as fh:\n yaml.dump(info, fh, sort_keys=False, indent=4)\n\n\nif __name__ == \"__main__\":\n format = \"%(filename)s:%(lineno)s: %(message)s\"\n logging.basicConfig(level=logging.INFO, format=format)\n logging.info(f\"Running io.py with N_OBS = {N_OBS}\")\n logging.info(f\"cwd = {os.getcwd()}\")\n run_benchmark()\n"
] |
[
[
"numpy.ones"
]
] |
chengdazhi/mmdetection
|
[
"08cb54216479e59b4e4fad19ea2c9b3c72fb0405"
] |
[
"mmdet/datasets/phillyzip.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport zipfile\n\nimport cv2\nimport numpy as np\n\n_im_zfile = []\n\ndef imread(filename, flags=cv2.IMREAD_COLOR):\n global _im_zfile\n path = filename\n pos_at = path.index('@')\n if pos_at == -1:\n print(\"character '@' is not found from the given path '%s'\"%(path))\n assert 0\n path_zip = path[0: pos_at]\n path_img = path[pos_at + 1:]\n if not os.path.isfile(path_zip):\n print(\"zip file '%s' is not found\"%(path_zip))\n assert 0\n for i in range(len(_im_zfile)):\n if _im_zfile[i]['path'] == path_zip:\n data = _im_zfile[i]['zipfile'].read(path_img)\n return cv2.imdecode(np.frombuffer(data, np.uint8), flags)\n\n print(\"read new image zip file '%s'\"%(path_zip))\n _im_zfile.append({\n 'path': path_zip,\n 'zipfile': zipfile.ZipFile(path_zip, 'r')\n })\n data = _im_zfile[-1]['zipfile'].read(path_img)\n\n return cv2.imdecode(np.frombuffer(data, np.uint8), flags)\n\nimport xml.etree.ElementTree as ET\n\n_xml_path_zip = []\n_xml_zfile = []\n\ndef xmlread(filename):\n global _xml_path_zip\n global _xml_zfile\n path = filename\n pos_at = path.index('@')\n if pos_at == -1:\n print(\"character '@' is not found from the given path '%s'\"%(path))\n assert 0\n path_zip = path[0: pos_at]\n path_xml = path[pos_at + 2:]\n if not os.path.isfile(path_zip):\n print(\"zip file '%s' is not found\"%(path_zip))\n assert 0\n for i in xrange(len(_xml_path_zip)):\n if _xml_path_zip[i] == path_zip:\n data = _xml_zfile[i].open(path_xml)\n return ET.fromstring(data.read())\n _xml_path_zip.append(path_zip)\n print(\"read new xml file '%s'\"%(path_zip))\n _xml_zfile.append(zipfile.ZipFile(path_zip, 'r'))\n data = _xml_zfile[-1].open(path_xml)\n return ET.fromstring(data.read())\n"
] |
[
[
"numpy.frombuffer"
]
] |
informaton/GPflow
|
[
"164d90d78c1c6fd966ae19ebaee59b9241bcba39"
] |
[
"tests/test_logdensities.py"
] |
[
"# Copyright 2018 the GPflow authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom numpy.random import randn\nimport tensorflow as tf\nimport pytest\nimport gpflow\nfrom gpflow import logdensities, settings\nfrom gpflow.test_util import session_tf\nfrom scipy.stats import multivariate_normal as mvn\nfrom numpy.testing import assert_allclose\n\n\nrng = np.random.RandomState(1)\n\n\n@pytest.mark.parametrize(\"x\", [randn(4,10), randn(4,1)])\n@pytest.mark.parametrize(\"mu\", [randn(4,10), randn(4,1)])\n@pytest.mark.parametrize(\"cov_sqrt\", [randn(4,4), np.eye(4)])\ndef test_multivariate_normal(session_tf, x, mu, cov_sqrt):\n cov = np.dot(cov_sqrt, cov_sqrt.T)\n L = np.linalg.cholesky(cov)\n\n x_tf = tf.placeholder(settings.float_type)\n mu_tf = tf.placeholder(settings.float_type)\n gp_result = logdensities.multivariate_normal(\n x_tf, mu_tf, tf.convert_to_tensor(L))\n\n gp_result = session_tf.run(gp_result, feed_dict={x_tf: x, mu_tf: mu})\n\n if mu.shape[1] > 1:\n if x.shape[1] > 1:\n sp_result = [mvn.logpdf(x[:,i], mu[:,i], cov) for i in range(mu.shape[1])]\n else:\n sp_result = [mvn.logpdf(x.ravel(), mu[:, i], cov) for i in range(mu.shape[1])]\n else:\n sp_result = mvn.logpdf(x.T, mu.ravel(), cov)\n assert_allclose(gp_result, sp_result)\n\ndef test_shape_asserts(session_tf):\n A = np.random.randn(5)\n B = np.random.randn(5)\n L = np.tril(np.random.randn(5, 5))\n\n # Static shape check:\n with pytest.raises(ValueError):\n tA = tf.identity(A)\n tB = tf.identity(B)\n tL = tf.identity(L)\n res = logdensities.multivariate_normal(tA, tB, tL)\n\n # Dynamic shape check:\n # the following results in a segfault before PR#964\n with pytest.raises(tf.errors.InvalidArgumentError):\n vA = tf.placeholder(tf.float64)\n vB = tf.placeholder(tf.float64)\n vL = tf.placeholder(tf.float64)\n res = logdensities.multivariate_normal(vA, vB, vL)\n session_tf.run(res, {vA: A, vB: B, vL: L})\n"
] |
[
[
"numpy.testing.assert_allclose",
"tensorflow.convert_to_tensor",
"numpy.dot",
"numpy.random.RandomState",
"scipy.stats.multivariate_normal.logpdf",
"numpy.random.randn",
"numpy.eye",
"tensorflow.placeholder",
"numpy.linalg.cholesky",
"tensorflow.identity"
]
] |
ess-dmsc/nicos
|
[
"755d61d403ff7123f804c45fc80c7ff4d762993b"
] |
[
"nicos/clients/gui/widgets/plotting.py"
] |
[
"# -*- coding: utf-8 -*-\n# *****************************************************************************\n# NICOS, the Networked Instrument Control System of the MLZ\n# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)\n#\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n# Module authors:\n# Christian Felder <c.felder@fz-juelich.de>\n# Georg Brandl <g.brandl@fz-juelich.de>\n# Enrico Faulhaber <enrico.faulhaber@frm2.tum.de>\n#\n# *****************************************************************************\n\"\"\"NICOS GR plotting backend.\"\"\"\n\nimport os\nimport tempfile\nimport time\nfrom os import path\n\nimport gr\nimport numpy as np\nimport numpy.ma\nfrom gr.pygr import CoordConverter, ErrorBar, Plot, PlotAxes, \\\n RegionOfInterest, Text\nfrom gr.pygr.helper import ColorIndexGenerator\n\nfrom nicos.clients.gui.dialogs.data import DataExportDialog\nfrom nicos.clients.gui.utils import DlgPresets, DlgUtils, dialogFromUi, loadUi\nfrom nicos.guisupport.plots import DATEFMT, TIMEFMT, MaskedPlotCurve, \\\n NicosPlotAxes, NicosTimePlotAxes\nfrom nicos.guisupport.qt import QAction, QApplication, QCursor, QDialog, \\\n QFont, QListWidgetItem, QMenu, QPoint, Qt\nfrom nicos.guisupport.qtgr import InteractiveGRWidget, LegendEvent, \\\n MouseEvent, ROIEvent\nfrom nicos.guisupport.utils import savePlot, scaledFont\nfrom nicos.utils import number_types, safeName\nfrom nicos.utils.fitting import CosineFit, ExponentialFit, Fit, FitError, \\\n FitResult, GaussFit, LinearFit, LorentzFit, PearsonVIIFit, \\\n PseudoVoigtFit, SigmoidFit, TcFit\n\n\ndef cleanArray(arr):\n \"\"\"Clean an array or list from unsupported objects for plotting.\n\n Objects are replaced by None, which is then converted to NaN.\n \"\"\"\n try:\n return np.asarray(arr, float)\n except ValueError:\n return np.array([x if isinstance(x, number_types) else None\n for x in arr], float)\n\n\ndef prepareData(x, y, dy, norm):\n \"\"\"Prepare and sanitize data for plotting.\n\n x, y and dy are lists or arrays. norm can also be None.\n\n Returns x, y and dy arrays, where dy can also be None.\n \"\"\"\n # make arrays\n x = cleanArray(x)\n y = cleanArray(y)\n dy = cleanArray(dy)\n # normalize\n if norm is not None:\n norm = np.asarray(norm, float)\n y /= norm\n dy /= norm\n # remove infinity/NaN\n indices = np.isfinite(y) & np.isfinite(x)\n x = x[indices]\n y = y[indices]\n if not y.size:\n raise ValueError('y does not contain any value')\n if dy.size:\n dy = dy[indices]\n # remove error bars that aren't finite\n dy[~np.isfinite(dy)] = 0\n # if there are no errors left, don't bother drawing them\n if dy.sum() == 0:\n return x, y, None\n return x, y, dy\n\n\nclass Fitter:\n title = 'unknown fit'\n picks = []\n\n def __init__(self, plot, window, action, curve, pickmode):\n self.plot = plot\n self.window = window\n self.action = action\n self.curve = curve\n self.pickmode = pickmode\n self.data = plot._getCurveData(curve)\n\n self.values = []\n self.stage = 0\n\n def begin(self):\n self.plot._enterFitMode()\n if self.action:\n self.action.setChecked(True)\n if self.pickmode:\n self.plot._fitRequestPick(self.picks[0])\n else:\n self.finish()\n\n def addPick(self, point):\n self.stage += 1\n self.values.append(point)\n if self.stage < len(self.picks):\n paramname = self.picks[self.stage]\n self.plot._fitRequestPick(paramname)\n else:\n self.finish()\n\n def cancel(self):\n self.plot._leaveFitMode()\n if self.action:\n self.action.setChecked(False)\n\n def finish(self):\n self.cancel()\n try:\n res = self.do_fit()\n except FitError as e:\n self.plot.showInfo('Fitting failed: %s.' % e)\n return\n self.plot._plotFit(res)\n\n def do_fit(self):\n raise NotImplementedError\n\n def limitsFromPlot(self):\n return self.plot.getViewport()[:2]\n\n\nclass LinearFitter(Fitter):\n title = 'linear fit'\n picks = ['First point', 'Second point']\n\n def do_fit(self):\n if self.pickmode:\n (xmin, y1), (xmax, y2) = self.values # pylint: disable=unbalanced-tuple-unpacking\n m0 = (y2 - y1) / (xmax - xmin)\n pars = [m0, y1 - m0*xmin]\n else:\n pars = None\n xmin, xmax = self.limitsFromPlot()\n\n f = LinearFit(pars, xmin=xmin, xmax=xmax, timeseries=True)\n return f.run_or_raise(*self.data)\n\n\nclass ExponentialFitter(Fitter):\n title = 'exp. fit'\n picks = ['First point', 'Second point']\n\n def do_fit(self):\n if self.pickmode:\n (xmin, y1), (xmax, y2) = self.values # pylint: disable=unbalanced-tuple-unpacking\n b0 = np.log(y1 / y2) / (xmin - xmax)\n x0 = xmin - np.log(y1) / b0\n pars = [b0, x0]\n else:\n pars = None\n xmin, xmax = self.limitsFromPlot()\n\n f = ExponentialFit(pars, xmin=xmin, xmax=xmax, timeseries=True)\n return f.run_or_raise(*self.data)\n\n\nclass CosineFitter(Fitter):\n title = 'cosine fit'\n picks = ['Maximum', 'Next minimum']\n\n def do_fit(self):\n pars = None\n if self.pickmode:\n (x1, y1), (x2, y2) = self.values # pylint: disable=unbalanced-tuple-unpacking\n a = abs(y1 - y2) / 2.\n b = (y1 + y2) / 2.\n width = abs(x1 - x2)\n freq = 1 / (width * 2.)\n pars = [a, freq, x1, b]\n\n f = CosineFit(pars)\n return f.run_or_raise(*self.data)\n\n\nclass LorentzFitter(Fitter):\n title = 'peak fit'\n picks = ['Background', 'Peak', 'Half Maximum']\n\n def do_fit(self):\n if self.pickmode:\n (xb, yb), (x0, y0), (xw, _) = self.values # pylint: disable=unbalanced-tuple-unpacking\n pars = [x0, abs(y0-yb), abs(x0-xw), yb]\n totalwidth = abs(x0 - xb)\n xmin = x0 - totalwidth\n xmax = x0 + totalwidth\n else:\n pars = None\n xmin, xmax = self.limitsFromPlot()\n\n f = LorentzFit(pars, xmin=xmin, xmax=xmax)\n return f.run_or_raise(*self.data)\n\n\nclass GaussFitter(Fitter):\n title = 'peak fit'\n picks = ['Background', 'Peak', 'Half Maximum']\n\n def do_fit(self):\n if self.pickmode:\n (xb, yb), (x0, y0), (xw, _) = self.values # pylint: disable=unbalanced-tuple-unpacking\n pars = [x0, abs(y0-yb), abs(x0-xw), yb]\n totalwidth = abs(x0 - xb)\n xmin = x0 - totalwidth\n xmax = x0 + totalwidth\n else:\n pars = None\n xmin, xmax = self.limitsFromPlot()\n\n f = GaussFit(pars, xmin=xmin, xmax=xmax)\n return f.run_or_raise(*self.data)\n\n\nclass SigmoidFitter(Fitter):\n title = 'sigmoid fit'\n picks = ['Left point', 'Right point']\n\n def do_fit(self):\n if self.pickmode:\n (xmin, y1), (xmax, y2) = self.values # pylint: disable=unbalanced-tuple-unpacking\n pars = [y2 - y1, 1, (xmax - xmin) / 2. + xmin, y1]\n else:\n pars = None\n xmin, xmax = self.limitsFromPlot()\n\n f = SigmoidFit(pars, xmin=xmin, xmax=xmax)\n return f.run_or_raise(*self.data)\n\n\nclass PseudoVoigtFitter(Fitter):\n title = 'peak fit (PV)'\n picks = ['Background', 'Peak', 'Half Maximum']\n\n def do_fit(self):\n if self.pickmode:\n (xb, yb), (x0, y0), (xw, _) = self.values # pylint: disable=unbalanced-tuple-unpacking\n pars = [yb, abs(y0 - yb), x0, abs(x0 - xw), 0.5]\n totalwidth = abs(x0 - xb)\n xmin = x0 - totalwidth\n xmax = x0 + totalwidth\n else:\n pars = None\n xmin, xmax = self.limitsFromPlot()\n\n f = PseudoVoigtFit(pars, xmin=xmin, xmax=xmax)\n return f.run_or_raise(*self.data)\n\n\nclass PearsonVIIFitter(Fitter):\n title = 'peak fit (PVII)'\n picks = ['Background', 'Peak', 'Half Maximum']\n\n def do_fit(self):\n if self.pickmode:\n (xb, yb), (x0, y0), (xw, _) = self.values # pylint: disable=unbalanced-tuple-unpacking\n pars = [yb, abs(y0-yb), x0, abs(x0-xw), 5.0]\n totalwidth = abs(x0 - xb)\n xmin = x0 - totalwidth\n xmax = x0 + totalwidth\n else:\n pars = None\n xmin, xmax = self.limitsFromPlot()\n\n f = PearsonVIIFit(pars, xmin=xmin, xmax=xmax)\n return f.run_or_raise(*self.data)\n\n\nclass TcFitter(Fitter):\n title = 'Tc fit'\n picks = ['Background', 'Tc']\n\n def do_fit(self):\n pars = None\n if self.pickmode:\n (_, Ib), (Tc, _) = self.values # pylint: disable=unbalanced-tuple-unpacking\n alpha0 = 0.5\n # guess A from maximum data point\n Tmin = min(self.data[0])\n A0 = max(self.data[1]) / ((Tc-Tmin)/Tc)**alpha0\n pars = [Ib, A0, Tc, alpha0]\n\n f = TcFit(pars)\n return f.run_or_raise(*self.data)\n\n\nclass ArbyFitDialog(QDialog):\n\n def __init__(self, parent):\n QDialog.__init__(self, parent)\n loadUi(self, 'panels/fit_arby.ui')\n self.presets = DlgPresets('fit_arby',\n [(self.function, ''), (self.fitparams, ''),\n (self.xfrom, ''), (self.xto, '')])\n self.presets.load()\n for name in sorted(ArbitraryFitter.arby_functions):\n QListWidgetItem(name, self.oftenUsed)\n\n def on_oftenUsed_itemClicked(self, item):\n params, func = ArbitraryFitter.arby_functions[item.text()]\n self.function.setText(func)\n self.fitparams.setPlainText('\\n'.join(params))\n\n def getFunction(self):\n self.presets.save()\n\n fcnstr = self.function.text()\n try:\n xmin = float(self.xfrom.text())\n except ValueError:\n xmin = None\n try:\n xmax = float(self.xto.text())\n except ValueError:\n xmax = None\n if xmin is not None and xmax is not None and xmin > xmax:\n xmax, xmin = xmin, xmax\n params, values = [], []\n for line in self.fitparams.toPlainText().splitlines():\n name_value = line.strip().split('=', 2)\n if len(name_value) < 2:\n continue\n params.append(name_value[0])\n try:\n values.append(float(name_value[1]))\n except ValueError:\n values.append(1.0)\n\n return fcnstr, params, values, xmin, xmax\n\n\nclass ArbitraryFitter(Fitter):\n title = 'fit'\n\n arby_functions = {\n 'Gaussian x2': (\n ['a =', 'b =', 'c =', 'x1 =', 'x2 =', 's1 =', 's2 ='],\n 'a + b*exp(-(x-x1)**2/s1**2) + c*exp(-(x-x2)**2/s2**2)',\n ),\n 'Gaussian x3 symm.': (\n ['a =', 'b =', 'c =', 'x0 =', 'x1 =', 's0 =', 's1 ='],\n 'a + b*exp(-(x-x0-x1)**2/s1**2) + b*exp(-(x-x0+x1)**2/s1**2) + '\n 'c*exp(-(x-x0)**2/s0**2)',\n ),\n 'Parabola': (\n ['a =', 'b =', 'c ='],\n 'a*x**2 + b*x + c',\n ),\n }\n\n def begin(self):\n dlg = ArbyFitDialog(self.plot)\n ret = dlg.exec_()\n if ret != QDialog.Accepted:\n return\n\n fcnstr, params, values, xmin, xmax = dlg.getFunction()\n\n ns = {}\n exec('from numpy import *', ns)\n try:\n model = eval('lambda x, %s: %s' % (', '.join(params), fcnstr), ns)\n except SyntaxError as e:\n self.plot.showInfo('Syntax error in function: %s' % e)\n return\n\n f = Fit('fit', model, params, values, xmin, xmax)\n res = f.run(*self.data)\n if res._failed:\n self.plot.showInfo('Fitting failed: %s.' % res._message)\n return\n res.label_x = res.curve_x[0]\n res.label_y = max(res.curve_y)\n res.label_contents = list(zip(*res._pars))\n\n self.plot._plotFit(res)\n\n\nclass NicosPlotCurve(MaskedPlotCurve):\n\n GR_MARKER_SIZE = 1.0\n\n _parent = ''\n\n def __init__(self, x, y, errBar1=None, errBar2=None,\n linetype=gr.LINETYPE_SOLID, markertype=gr.MARKERTYPE_DOT,\n linecolor=None, markercolor=1, legend=None, fillx=0, filly=0):\n MaskedPlotCurve.__init__(self, x, y, errBar1, errBar2,\n linetype, markertype, linecolor, markercolor,\n legend, fillx=fillx, filly=filly)\n self._dependent = []\n self._enableErrBars = True\n\n @property\n def dependent(self):\n \"\"\"Return dependent objects which implement the GRMeta interface.\"\"\"\n return self._dependent\n\n @dependent.setter\n def dependent(self, value):\n self._dependent = value\n\n @property\n def visible(self):\n return MaskedPlotCurve.visible.__get__(self)\n\n @visible.setter\n def visible(self, flag):\n MaskedPlotCurve.visible.__set__(self, flag)\n for dep in self.dependent:\n dep.visible = flag\n\n def isErrorBarEnabled(self, idx):\n return self._enableErrBars\n\n def setErrorBarEnabled(self, flag):\n \"\"\"Dis/En-able error bars for this curve.\n\n Disabled error bars are not drawn and the corresponding\n property `errorBar{1,2}` returns None.\n\n Note: The internal reference to the `ErrorBar` is still kept and\n restored on enable.\n\n \"\"\"\n self._enableErrBars = flag\n\n @property\n def errorBar1(self):\n if not self._enableErrBars:\n return None\n return MaskedPlotCurve.errorBar1.__get__(self)\n\n @errorBar1.setter\n def errorBar1(self, value):\n MaskedPlotCurve.errorBar1.__set__(self, value)\n\n @property\n def errorBar2(self):\n if not self._enableErrBars:\n return None\n return MaskedPlotCurve.errorBar2.__get__(self)\n\n @errorBar2.setter\n def errorBar2(self, value):\n MaskedPlotCurve.errorBar2.__set__(self, value)\n\n def drawGR(self):\n gr.setmarkersize(self.GR_MARKER_SIZE)\n MaskedPlotCurve.drawGR(self)\n for dep in self.dependent:\n if dep.visible:\n dep.drawGR()\n\n\nclass NicosPlot(DlgUtils):\n\n HAS_AUTOSCALE = False\n SAVE_EXT = '.png'\n\n def __init__(self, window, timeaxis=False):\n DlgUtils.__init__(self, 'Plot')\n self.window = window\n self.plotcurves = []\n self.show_all = False\n self.timeaxis = timeaxis\n self.hasSymbols = False\n self.hasLines = True\n\n # currently selected normalization column\n self.normalized = None\n\n self.fitter = None\n\n font = self.window.user_font\n bold = QFont(font)\n bold.setBold(True)\n larger = scaledFont(font, 1.6)\n self.setFonts(font, bold, larger)\n\n def setBackgroundColor(self, color):\n raise NotImplementedError\n\n def setFonts(self, font, bold, larger):\n raise NotImplementedError\n\n def titleString(self):\n raise NotImplementedError\n\n def subTitleString(self):\n return ''\n\n def xaxisName(self):\n raise NotImplementedError\n\n def yaxisName(self):\n raise NotImplementedError\n\n def xaxisScale(self):\n return None\n\n def yaxisScale(self):\n return None\n\n def isLegendEnabled(self):\n \"\"\"Return true if the legend is currently enabled.\"\"\"\n raise NotImplementedError\n\n def setLegend(self, on):\n \"\"\"Switch legend on or off.\"\"\"\n raise NotImplementedError\n\n def isErrorBarEnabled(self):\n raise NotImplementedError\n\n def setErrorBarEnabled(self, on):\n \"\"\"Switch error bars on or off.\"\"\"\n raise NotImplementedError\n\n def setVisibility(self, item, on):\n \"\"\"Set visibility on a plot item.\"\"\"\n raise NotImplementedError\n\n def isLogScaling(self, idx=0):\n \"\"\"Return true if main Y axis is logscaled.\"\"\"\n raise NotImplementedError\n\n def isLogXScaling(self, idx=0):\n \"\"\"Return true if X axis is logscaled.\"\"\"\n raise NotImplementedError\n\n def setLogScale(self, on):\n \"\"\"Set logscale on main Y axis.\"\"\"\n raise NotImplementedError\n\n def setLogXScale(self, on):\n \"\"\"Set logscale on X axis\"\"\"\n raise NotImplementedError\n\n def setSymbols(self, on):\n \"\"\"Enable or disable symbols.\"\"\"\n raise NotImplementedError\n\n def setLines(self, on):\n \"\"\"Enable or disable lines.\"\"\"\n raise NotImplementedError\n\n def unzoom(self):\n \"\"\"Unzoom the plot.\"\"\"\n raise NotImplementedError\n\n def addPlotCurve(self, plotcurve, replot=False):\n \"\"\"Add a plot curve.\"\"\"\n raise NotImplementedError\n\n def savePlot(self):\n \"\"\"Save plot, asking user for a filename.\"\"\"\n raise NotImplementedError\n\n def printPlot(self):\n \"\"\"Print plot with print dialog.\"\"\"\n raise NotImplementedError\n\n def saveQuietly(self):\n \"\"\"Save plot quietly to a temporary file with default format.\n\n Return the created filename.\n \"\"\"\n raise NotImplementedError\n\n def visibleCurves(self):\n \"\"\"Return a list of tuples (index, description) of visible curves.\"\"\"\n raise NotImplementedError\n\n def visibleDataCurves(self):\n \"\"\"Return a list of tuples (index, description) of visible curves\n that are not fits.\n \"\"\"\n raise NotImplementedError\n\n def selectCurve(self):\n \"\"\"Let the user select a visible plot curve.\n\n If there is only one curve, return it directly.\n \"\"\"\n visible_curves = self.visibleDataCurves()\n if not visible_curves:\n return\n if len(visible_curves) > 1:\n dlg = dialogFromUi(self, 'panels/selector.ui')\n dlg.setWindowTitle('Select curve to fit')\n dlg.label.setText('Select a curve:')\n for _, descr in visible_curves:\n QListWidgetItem(descr, dlg.list)\n dlg.list.setCurrentRow(0)\n if dlg.exec_() != QDialog.Accepted:\n return\n fitcurve = visible_curves[dlg.list.currentRow()][0]\n else:\n fitcurve = visible_curves[0][0]\n return self.plotcurves[fitcurve]\n\n def beginFit(self, fitterclass, fitteraction, pickmode):\n \"\"\"Begin a fitting operation with given Fitter subclass and QAction.\"\"\"\n if fitteraction and not fitteraction.isChecked():\n # \"unchecking\" the action -> cancel fit\n if self.fitter is not None:\n self.fitter.cancel()\n return\n # other fitter: cancel first\n if self.fitter is not None:\n self.fitter.cancel()\n fitcurve = self.selectCurve()\n if not fitcurve:\n return self.showError('Plot must have a visible curve '\n 'to be fitted.')\n self.fitter = fitterclass(self, self.window, fitteraction, fitcurve,\n pickmode)\n self.fitter.begin()\n\n def _getCurveData(self, curve):\n \"\"\"Return [x, y, dy] or [x, y, None] arrays for given curve.\"\"\"\n raise NotImplementedError\n\n def _getCurveLegend(self, curve):\n \"\"\"Return legend string of the curve.\"\"\"\n raise NotImplementedError\n\n def _isCurveVisible(self, curve):\n \"\"\"Return true if curve is currently visible.\"\"\"\n raise NotImplementedError\n\n def _enterFitMode(self):\n raise NotImplementedError\n\n def _leaveFitMode(self):\n raise NotImplementedError\n\n def _fitRequestPick(self, paramname):\n raise NotImplementedError\n\n def _plotFit(self, fitter):\n raise NotImplementedError\n\n def getViewport(self):\n raise NotImplementedError\n\n def modifyData(self):\n visible_curves = self.visibleCurves()\n # get input from the user: which curves should be modified how\n dlg = dialogFromUi(self, 'panels/modify.ui')\n\n def checkAll():\n for i in range(dlg.list.count()):\n dlg.list.item(i).setCheckState(Qt.Checked)\n dlg.selectall.clicked.connect(checkAll)\n for i, descr in visible_curves:\n li = QListWidgetItem(descr, dlg.list)\n if len(visible_curves) == 1:\n li.setCheckState(Qt.Checked)\n dlg.operation.setFocus()\n else:\n li.setCheckState(Qt.Unchecked)\n if dlg.exec_() != QDialog.Accepted:\n return\n # evaluate selection\n op = dlg.operation.text()\n curves = []\n for i in range(dlg.list.count()):\n li = dlg.list.item(i)\n if li.checkState() == Qt.Checked:\n curves.append(i)\n\n # modify curve data\n for i in curves:\n curve = self.plotcurves[visible_curves[i][0]]\n self._modifyCurve(curve, op)\n self.update()\n\n def _modifyCurve(self, curve, op):\n raise NotImplementedError\n\n\nclass NicosGrPlot(NicosPlot, InteractiveGRWidget):\n\n axescls = NicosPlotAxes\n HAS_AUTOSCALE = True\n SAVE_EXT = '.svg'\n\n def __init__(self, parent, window, timeaxis=False):\n InteractiveGRWidget.__init__(self, parent)\n NicosPlot.__init__(self, window, timeaxis=timeaxis)\n\n self.timeaxis = timeaxis or (self.axescls == NicosTimePlotAxes)\n self.leftTurnedLegend = True\n self.statusMessage = None\n self.mouselocation = None\n self._cursor = self.cursor()\n self._mouseSelEnabled = self.getMouseSelectionEnabled()\n self._markertype = gr.MARKERTYPE_OMARK\n\n dictPrintType = dict(gr.PRINT_TYPE)\n for prtype in [gr.PRINT_JPEG, gr.PRINT_TIF]:\n dictPrintType.pop(prtype)\n self._saveTypes = (\";;\".join(dictPrintType.values()) + \";;\" +\n \";;\".join(gr.GRAPHIC_TYPE.values()))\n self._saveName = None\n self._color = ColorIndexGenerator()\n self._plot = Plot(viewport=(.1, .85, .15, .88))\n self._plot.setLegendWidth(0.05)\n self._axes = self.axescls(viewport=self._plot.viewport)\n self._axes.backgroundColor = 0\n self._plot.addAxes(self._axes)\n self._plot.title = self.titleString()\n self.addPlot(self._plot)\n\n self.cbm.addHandler(LegendEvent.ROI_CLICKED, self.on_legendItemClicked,\n LegendEvent)\n self.cbm.addHandler(ROIEvent.ROI_CLICKED, self.on_roiItemClicked, ROIEvent)\n self.cbm.addHandler(MouseEvent.MOUSE_PRESS, self.on_fitPicker_selected)\n self.cbm.addHandler(MouseEvent.MOUSE_MOVE, self.on_mouseMove)\n self.logXinDomain.connect(self.on_logXinDomain)\n self.logYinDomain.connect(self.on_logYinDomain)\n self.setLegend(True)\n self.updateDisplay()\n\n def xtickCallBack(self, x, y, _svalue, value):\n gr.setcharup(-1. if self.leftTurnedLegend else 1., 1.)\n gr.settextalign(gr.TEXT_HALIGN_RIGHT if self.leftTurnedLegend else\n gr.TEXT_HALIGN_LEFT, gr.TEXT_VALIGN_TOP)\n dx = .015\n timeVal = time.localtime(value)\n gr.text(x + (dx if self.leftTurnedLegend else -dx), y,\n time.strftime(DATEFMT, timeVal))\n gr.text(x - (dx if self.leftTurnedLegend else -dx), y,\n time.strftime(TIMEFMT, timeVal))\n gr.setcharup(0., 1.)\n\n def setAutoScaleFlags(self, xflag, yflag):\n mask = 0x0\n if xflag:\n mask |= PlotAxes.SCALE_X\n if yflag:\n mask |= PlotAxes.SCALE_Y\n self.setAutoScale(mask)\n\n def setBackgroundColor(self, color):\n pass # not implemented\n\n def setFonts(self, font, bold, larger):\n pass # not implemented\n\n def updateDisplay(self):\n self._plot.title = self.titleString()\n if self.subTitleString():\n self._plot.subTitle = self.subTitleString()\n self._plot.xlabel = self.xaxisName()\n self._plot.ylabel = self.yaxisName()\n if self.normalized:\n self._plot.ylabel += \" (norm: %s)\" % self.normalized\n\n self.plotcurves = []\n self.addAllCurves()\n if self.timeaxis:\n self._plot.viewport = (.1, .85, .18, .88)\n self._axes.setXtickCallback(self.xtickCallBack)\n self._plot.offsetXLabel = -.08\n\n scale = self.yaxisScale() # pylint: disable=assignment-from-none\n if scale:\n axes = self._plot.getAxes(0)\n curwin = axes.getWindow()\n if not curwin:\n curwin = [0, 1, scale[0], scale[1]]\n curves = axes.getCurves()\n xmins = []\n xmaxs = []\n for c in curves:\n if c.visible:\n xmins.append(min(c.x))\n xmaxs.append(max(c.x))\n if xmins and xmaxs:\n curwin[0] = min(xmins)\n curwin[1] = max(xmaxs)\n axes.setWindow(curwin[0], curwin[1], scale[0], scale[1])\n InteractiveGRWidget.update(self)\n\n def isLegendEnabled(self):\n return self._plot.isLegendEnabled()\n\n def setLegend(self, on):\n self._plot.setLegend(on)\n self.update()\n\n def isLogScaling(self, idx=0):\n axes = self._plot.getAxes(idx)\n return axes.scale & gr.OPTION_Y_LOG if axes is not None else False\n\n def isLogXScaling(self, idx=0):\n axes = self._plot.getAxes(idx)\n return axes.scale & gr.OPTION_X_LOG if axes is not None else False\n\n def setLogScale(self, on):\n self._plot.setLogY(on, rescale=True)\n self.update()\n\n def setLogXScale(self, on):\n self._plot.setLogX(on, rescale=True)\n self.update()\n\n def isErrorBarEnabled(self):\n axes = self._plot.getAxes(0)\n if axes:\n curves = axes.getCurves()\n if curves:\n return curves[0].isErrorBarEnabled(1)\n return False\n\n def setErrorBarEnabled(self, on):\n for axis in self._plot.getAxes():\n for curve in axis.getCurves():\n curve.setErrorBarEnabled(on)\n self.update()\n\n def setSymbols(self, on):\n markertype = self._markertype if on else gr.MARKERTYPE_DOT\n for axis in self._plot.getAxes():\n for curve in axis.getCurves():\n curve.markertype = markertype\n self.hasSymbols = on\n self.update()\n\n def setLines(self, on):\n linetype = None\n if on:\n linetype = gr.LINETYPE_SOLID\n for axis in self._plot.getAxes():\n for curve in axis.getCurves():\n curve.linetype = linetype\n self.hasLines = on\n self.update()\n\n def unzoom(self):\n self._plot.reset()\n self.update()\n\n def on_logXinDomain(self, flag):\n if not flag:\n self._plot.setLogX(flag)\n self.update()\n\n def on_logYinDomain(self, flag):\n if not flag:\n self.setLogScale(flag)\n\n def on_legendItemClicked(self, event):\n if event.getButtons() & MouseEvent.LEFT_BUTTON:\n event.curve.visible = not event.curve.visible\n if event.curve._parent:\n event.curve._parent.disabled = not event.curve._parent.disabled\n self.update()\n\n def on_roiItemClicked(self, event):\n if event.getButtons() & MouseEvent.RIGHT_BUTTON:\n if isinstance(event.roi.reference, FitResult):\n menu = QMenu(self)\n actionClipboard = QAction(\"Copy fit values to clipboard\", menu)\n menu.addAction(actionClipboard)\n p0dc = event.getDC()\n selectedItem = menu.exec_(self.mapToGlobal(QPoint(p0dc.x,\n p0dc.y)))\n if selectedItem == actionClipboard:\n res = event.roi.reference\n text = '\\n'.join(\n (n + '\\t' if n else '\\t') +\n (v + '\\t' if isinstance(v, str)\n else '%g\\t' % v) +\n (dv if isinstance(dv, str)\n else '%g' % dv)\n for (n, v, dv) in res.label_contents)\n QApplication.clipboard().setText(text)\n\n def on_mouseMove(self, event):\n if event.getWindow(): # inside plot\n self.mouselocation = event\n wc = event.getWC(self._plot.viewport)\n if self.statusMessage:\n msg = \"%s (X = %g, Y = %g)\" % (self.statusMessage, wc.x, wc.y)\n else:\n msg = \"X = %g, Y = %g\" % (wc.x, wc.y)\n self.window.statusBar.showMessage(msg)\n else:\n self.window.statusBar.clearMessage()\n\n def addPlotCurve(self, plotcurve, replot=False):\n existing_curve = next((c for c in self._axes.getCurves()\n if c._parent is plotcurve._parent), None)\n if existing_curve and not replot:\n existing_curve.visible = plotcurve.visible\n existing_curve.legend = plotcurve.legend\n existing_curve.setUpdateXCallback(None)\n existing_curve.setUpdateYCallback(None)\n # update curve\n existing_curve.x, existing_curve.y = plotcurve.x, plotcurve.y\n if plotcurve.errorBar1 and existing_curve.errorBar1:\n mcolor = existing_curve.errorBar1.markercolor\n existing_curve.errorBar1 = plotcurve.errorBar1\n existing_curve.errorBar1.markercolor = mcolor\n else:\n existing_curve.errorBar1 = plotcurve.errorBar1\n if plotcurve.errorBar2 and existing_curve.errorBar2:\n mcolor = existing_curve.errorBar2.markercolor\n existing_curve.errorBar2 = plotcurve.errorBar2\n existing_curve.errorBar2.markercolor = mcolor\n else:\n existing_curve.errorBar2 = plotcurve.errorBar2\n if existing_curve not in self.plotcurves:\n self.plotcurves.append(existing_curve)\n else:\n color = self._color.getNextColorIndex()\n plotcurve.linecolor = color\n plotcurve.markercolor = color\n plotcurve.markertype = self._markertype if self.hasSymbols \\\n else gr.MARKERTYPE_DOT\n if plotcurve.errorBar1:\n plotcurve.errorBar1.markercolor = color\n if plotcurve.errorBar2:\n plotcurve.errorBar2.markercolor = color\n self._axes.addCurves(plotcurve)\n self.plotcurves.append(plotcurve)\n\n def savePlot(self):\n self._saveName = savePlot(self, gr.PRINT_TYPE[gr.PRINT_PDF],\n self._saveName)\n return self._saveName\n\n def printPlot(self):\n self.printDialog(\"Nicos-\" + self._saveName if self._saveName\n else \"untitled\")\n return True\n\n @property\n def plot(self): # pylint: disable=invalid-overridden-method\n \"\"\"Get current gr.pygr.Plot object.\"\"\"\n return self._plot\n\n def _save(self, extension=\".pdf\"):\n fd, pathname = tempfile.mkstemp(extension)\n self.save(pathname)\n os.close(fd)\n return pathname\n\n def saveQuietly(self):\n return self._save(\".svg\")\n\n def _getCurveData(self, curve):\n errBar1 = curve.errorBar1\n return [curve.x, curve.y, errBar1.dpos if errBar1 else None]\n\n def _getCurveLegend(self, curve):\n return curve.legend\n\n def _isCurveVisible(self, curve):\n return curve.visible\n\n def setVisibility(self, item, on):\n item.visible = on\n if item._parent:\n item._parent.disabled = not on\n\n def _enterFitMode(self):\n self.window.statusBar.showMessage(self.statusMessage)\n self._cursor = self.cursor()\n self.setCursor(QCursor(Qt.CrossCursor))\n self._mouseSelEnabled = self.getMouseSelectionEnabled()\n self.setMouseSelectionEnabled(False)\n\n def _fitRequestPick(self, paramname):\n self.statusMessage = 'Fitting: Click on %s' % paramname\n self.window.statusBar.showMessage(self.statusMessage)\n\n def _leaveFitMode(self):\n self.fitter = None\n self.statusMessage = None\n self.setCursor(self._cursor)\n self.setMouseSelectionEnabled(self._mouseSelEnabled)\n\n def _plotFit(self, fitter):\n color = self._color.getNextColorIndex()\n resultcurve = NicosPlotCurve(fitter.curve_x, fitter.curve_y,\n legend=fitter._title,\n linecolor=color, markercolor=color)\n self.addPlotCurve(resultcurve, True)\n resultcurve.markertype = gr.MARKERTYPE_DOT\n self.window.statusBar.showMessage(\"Fitting complete\")\n\n text = '\\n'.join(\n (n + ': ' if n else '') +\n (v if isinstance(v, str) else '%g' % v) +\n (dv if isinstance(dv, str) else ' +/- %g' % dv)\n for (n, v, dv) in fitter.label_contents)\n grtext = Text(fitter.label_x, fitter.label_y, text, self._axes, .012,\n hideviewport=False)\n resultcurve.dependent.append(grtext)\n coord = CoordConverter(self._axes.sizex, self._axes.sizey,\n self._axes.getWindow())\n roi = RegionOfInterest(reference=fitter, regionType=RegionOfInterest.TEXT,\n axes=self._axes)\n for nxi, nyi in zip(*grtext.getBoundingBox()):\n coord.setNDC(nxi, nyi)\n roi.append(coord.getWC(self._axes.viewport))\n self._plot.addROI(roi)\n self.update()\n\n def on_fitPicker_selected(self, point):\n if self.fitter and point.getButtons() & MouseEvent.LEFT_BUTTON and \\\n point.getWindow():\n p = point.getWC(self._plot.viewport)\n self.fitter.addPick((p.x, p.y))\n\n def _modifyCurve(self, curve, op):\n new_y = [eval(op, {'x': v1, 'y': v2})\n for (v1, v2) in zip(curve.x, curve.y)]\n if curve.errorBar1:\n curve.errorBar1.y = new_y\n if curve.errorBar2:\n curve.errorBar2.y = new_y\n curve.y = new_y\n\n def setMarkerType(self, markertype):\n self._markertype = markertype\n\n\nclass ViewPlot(NicosGrPlot):\n\n axescls = NicosTimePlotAxes\n\n def __init__(self, parent, window, view):\n self.view = view\n self.series2curve = {}\n NicosGrPlot.__init__(self, parent, window, timeaxis=True)\n self.setSymbols(False)\n\n def cleanup(self):\n self.view = None\n self._axes.setXtickCallback(None)\n\n def titleString(self):\n return self.view.name\n\n def xaxisName(self):\n return 'time'\n\n def yaxisName(self):\n return 'value'\n\n def yaxisScale(self):\n if self.view.yfrom is not None:\n return (self.view.yfrom, self.view.yto)\n\n def on_mouseMove(self, event):\n wc = event.getWC(self._plot.viewport)\n # overridden to show the correct timestamp\n ts = time.strftime(DATEFMT + ' ' + TIMEFMT, time.localtime(wc.x))\n if self.statusMessage:\n msg = \"%s (X = %s, Y = %g)\" % (self.statusMessage, ts, wc.y)\n else:\n msg = \"X = %s, Y = %g\" % (ts, wc.y)\n self.window.statusBar.showMessage(msg)\n\n def addAllCurves(self):\n for i, series in enumerate(self.view.series.values()):\n self.addCurve(i, series)\n visiblePlotCurves = self._axes.getVisibleCurves()\n if visiblePlotCurves:\n self._axes.curveDataChanged(visiblePlotCurves[-1])\n\n def addCurve(self, i, series, replot=False):\n plotcurve = None\n n = series.n\n if n > 0:\n color = self._color.getNextColorIndex()\n plotcurve = NicosPlotCurve(series.x, series.y,\n legend=series.title,\n linecolor=color, markercolor=color)\n plotcurve._parent = series\n self.series2curve[series] = plotcurve\n self.addPlotCurve(plotcurve, replot)\n return plotcurve\n\n def visibleCurves(self):\n return [(i, self._getCurveLegend(plotcurve))\n for (i, plotcurve) in enumerate(self.plotcurves)\n if self._isCurveVisible(plotcurve)]\n\n def visibleDataCurves(self):\n return [(i, self._getCurveLegend(plotcurve))\n for (i, plotcurve) in enumerate(self.plotcurves)\n if self._isCurveVisible(plotcurve)\n and 'fit' not in self._getCurveLegend(plotcurve)]\n\n def pointsAdded(self, series):\n plotcurve = self.series2curve[series]\n plotcurve.x = series.x\n plotcurve.y = series.y\n plotcurve.legend = series.title\n self._axes.addCurves(plotcurve)\n InteractiveGRWidget.update(self)\n\n def setSlidingWindow(self, window):\n self._axes.slidingwindow = window\n\n def getViewport(self):\n return self._plot.getAxes(0).getWindow()\n\n def saveData(self):\n curvenames = [self._getCurveLegend(plotcurve)\n for plotcurve in self.plotcurves]\n dlg = DataExportDialog(self, curvenames,\n 'Select curve(s), file name and format',\n '', 'ASCII data files (*.dat)')\n res = dlg.exec_()\n if res != QDialog.Accepted:\n return\n if not dlg.selectedFiles():\n return\n fmtno = dlg.formatCombo.currentIndex()\n sel_filename = dlg.selectedFiles()[0]\n if '.' not in sel_filename:\n sel_filename += '.dat'\n base, ext = path.splitext(sel_filename)\n\n curve_index = dlg.curveCombo.currentIndex()\n if curve_index == 0:\n curvedata = [convertXCol(fmtno, *self._getCurveData(c)[:2])\n for c in self.plotcurves]\n if len(curvedata) > 1:\n filenames = [base + '_' +\n safeName(self._getCurveLegend(c)) + ext\n for c in self.plotcurves]\n else:\n filenames = [sel_filename]\n elif curve_index == 1:\n curvedata = [synthesizeSingleCurveData(\n [self._getCurveData(c)[:2] for c in self.plotcurves], fmtno)]\n filenames = [sel_filename]\n else:\n curve = self.plotcurves[curve_index - 2]\n curvedata = [convertXCol(fmtno, *self._getCurveData(curve)[:2])]\n filenames = [sel_filename]\n\n for curve, filename in zip(curvedata, filenames):\n np.savetxt(filename, curve, fmt='%s')\n\n\ndef convertXCol(fmtno, x, *ys):\n ystack = [np.asarray(y) for y in ys]\n if fmtno == 0: # seconds since first datapoint\n x = np.asarray(x)\n return np.stack([x - x[0]] + ystack, 1)\n elif fmtno == 1: # UNIX timestamp\n x = np.asarray(x)\n return np.stack([x] + ystack, 1)\n elif fmtno == 2: # formatted time\n return np.stack([np.array([time.strftime('%Y-%m-%d.%H:%M:%S',\n time.localtime(v))\n for v in x])] + ystack, 1)\n raise NotImplementedError('invalid time format')\n\n\ndef synthesizeSingleCurveData(curvedata, fmtno, window=0.1):\n \"\"\"Generate a single matrix with value Y1...Yn for a single time column\n from a list of separate (X, Y) curves\n\n Y values of curves that don't have data for inbetween points is not\n interpolated, but the last value is repeated.\n \"\"\"\n ncurves = len(curvedata)\n lastvalues = [None] * ncurves\n indices = [0] * ncurves\n times = []\n points = [[] for _ in range(ncurves)]\n timestamps = [c[0] for c in curvedata]\n yvalues = [c[1] for c in curvedata]\n\n while True:\n # find the curve with the least unused timestamp\n ileast = min(range(ncurves),\n key=lambda i: timestamps[i][indices[i]])\n lastvalues[ileast] = yvalues[ileast][indices[ileast]]\n ts = timestamps[ileast][indices[ileast]]\n indices[ileast] += 1\n # find any curves where the next unused timestamp is close to the\n # found least timestamp\n for i in range(ncurves):\n if i != ileast and timestamps[i][indices[i]] - ts <= window:\n lastvalues[i] = yvalues[i][indices[i]]\n indices[i] += 1\n # once all curves have seen a value, synthesize a point with the\n # current \"lastvalues\"\n if None not in lastvalues:\n times.append(ts)\n for pts, value in zip(points, lastvalues):\n pts.append(value)\n # if any of the curves have been exhausted, stop\n if any(indices[i] >= len(timestamps[i]) for i in range(ncurves)):\n break\n\n return convertXCol(fmtno, times, *points)\n\n\nclass DataSetPlot(NicosGrPlot):\n\n axescls = NicosPlotAxes\n\n def __init__(self, parent, window, dataset):\n self.dataset = dataset\n self.current_xname = dataset.default_xname\n NicosGrPlot.__init__(self, parent, window)\n self.setSymbols(True)\n\n def titleString(self):\n return \"Scan %s %s\" % (self.dataset.name, self.dataset.scaninfo)\n\n def subTitleString(self):\n return \"started %s\" % time.strftime(DATEFMT + ' ' + TIMEFMT,\n self.dataset.started)\n\n def xaxisName(self):\n return self.current_xname\n\n def yaxisName(self):\n return ''\n\n def addAllCurves(self):\n for i, curve in enumerate(self.dataset.curves):\n self.addCurve(i, curve)\n visiblePlotCurves = self._axes.getVisibleCurves()\n if visiblePlotCurves:\n self._axes.curveDataChanged(visiblePlotCurves[-1])\n\n def addCurve(self, i, curve, replot=False):\n if self.current_xname != 'Default' and \\\n self.current_xname not in curve.datax:\n return\n if not curve.datay:\n return\n plotcurve = NicosPlotCurve([], [], filly=0.1)\n plotcurve._parent = curve\n self.setCurveData(curve, plotcurve)\n self.addPlotCurve(plotcurve, replot)\n if curve.function:\n plotcurve.markertype = gr.MARKERTYPE_DOT\n return plotcurve\n\n def setCurveData(self, curve, plotcurve):\n xname = curve.default_xname \\\n if self.current_xname == 'Default' else self.current_xname\n if self.normalized == 'Maximum':\n norm = [max(curve.datay)] * len(curve.datay)\n else:\n norm = curve.datanorm[self.normalized] if self.normalized else None\n try:\n x, y, dy = prepareData(curve.datax[xname], curve.datay,\n curve.datady, norm)\n except ValueError:\n # empty column, should be ignored\n x, y, dy = np.array([0]), np.array([0]), None\n y = numpy.ma.masked_equal(y, 0)\n if dy is not None:\n errbar = ErrorBar(x, y, dy, markercolor=plotcurve.markercolor)\n plotcurve.errorBar1 = errbar\n plotcurve.x = x\n plotcurve.y = y\n plotcurve.filly = 0.1 if self.isLogScaling() else 0\n plotcurve.visible = not (curve.disabled or curve.hidden or not x.size)\n plotcurve.legend = curve.full_description if not curve.hidden else ''\n\n def enableCurvesFrom(self, otherplot):\n visible = {}\n for curve in otherplot.plotcurves:\n visible[self._getCurveLegend(curve)] = self._isCurveVisible(curve)\n changed = False\n remaining = len(self.plotcurves)\n for plotcurve in self.plotcurves:\n namestr = self._getCurveLegend(plotcurve)\n if namestr in visible:\n self.setVisibility(plotcurve, visible[namestr])\n changed = True\n if not visible[namestr]:\n remaining -= 1\n # no visible curve left? enable all of them again\n if not remaining:\n for plotcurve in self.plotcurves:\n # XXX only if it has a legend item (excludes monitor/time columns)\n self.setVisibility(plotcurve, True)\n if changed:\n self.update()\n\n def visibleCurves(self):\n return [(i, curve.full_description)\n for (i, curve) in enumerate(self.dataset.curves)\n if self._isCurveVisible(self.plotcurves[i])]\n\n def visibleDataCurves(self):\n # visibleCurves only includes data curves anyway\n return self.visibleCurves()\n\n def setLogScale(self, on):\n NicosGrPlot.setLogScale(self, on)\n filly = .1 if self.isLogScaling() else 0\n for axis in self._plot.getAxes():\n for curve in axis.getCurves():\n curve.filly = filly\n self.update()\n\n def pointsAdded(self):\n for curve, plotcurve in zip(self.dataset.curves, self.plotcurves):\n self.setCurveData(curve, plotcurve)\n if self.plotcurves and len(self.plotcurves[0].x) == 2:\n # When there is only one point, GR autoselects a range related to\n # the magnitude of the point. Now that we have two points, we can\n # scale to actual X interval of the scan.\n self._axes.reset()\n self.updateDisplay()\n\n def fitQuick(self):\n if not self.mouselocation:\n return\n (_coord, _axes, curve) = self._plot.pick(self.mouselocation.getNDC(),\n self.dwidth, self.dheight)\n if not curve:\n return\n self.fitter = GaussFitter(self, self.window, None, curve, False)\n self.fitter.begin()\n\n def getViewport(self):\n return self._plot.getAxes(0).getWindow()\n"
] |
[
[
"numpy.array",
"numpy.savetxt",
"numpy.asarray",
"numpy.log",
"numpy.stack",
"numpy.isfinite"
]
] |
ofir-frd/Machine-Learning-Bootcamp
|
[
"71233cf3764b528c39438d5d45b433f094456717"
] |
[
"gradient-boosting/main.py"
] |
[
"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.model_selection import KFold\r\nimport plotly.express as px\r\nfrom plotly.subplots import make_subplots\r\nimport plotly.graph_objects as go\r\n\r\n\r\n# import data and preprocess it\r\ndef preprocessing(file_name: str):\r\n\r\n # data import\r\n fish_df = pd.read_csv(file_name)\r\n fish_df = pd.get_dummies(fish_df, columns=['Species'], prefix='Species')\r\n\r\n return fish_df\r\n\r\n\r\n# train-test split by a percentage.\r\n# input: dataframe, label column name, split ration, and random state\r\n# returns: x_train, x_test, y_train, y_test\r\ndef split_df(user_df: pd.DataFrame, label_name: str, split_ratio=0.8, random_value=42):\r\n\r\n x_train = user_df.sample(frac=split_ratio, random_state=random_value)\r\n x_test = user_df.drop(x_train.index)\r\n\r\n return x_train.drop(label_name, axis=1), x_test.drop(label_name, axis=1), pd.DataFrame(\r\n x_train[label_name]), pd.DataFrame(x_test[label_name])\r\n\r\n\r\n# Create as arrays of trees in a given size and depth\r\ndef create_random_forest(forest_size: int, max_depth: int, random_state_local: int):\r\n\r\n random_forest = []\r\n for i in range(0, forest_size, 1):\r\n\r\n random_forest.append(DecisionTreeRegressor(criterion='friedman_mse', max_depth=max_depth,\r\n random_state=random_state_local))\r\n\r\n return random_forest\r\n\r\n\r\n# train trees in a forest by fitting each tree to the previous tree's error\r\n# input: forest of trees, initial training guess, x and y databases, alpha coefficient.\r\n# returns: trained forest, initial average value, r_matrix of solutions and mse_list of the results (mean square error)\r\ndef train_forest(random_forest: list, initial_average_weight: float, x_df: pd.DataFrame, y_df: pd.DataFrame,\r\n alpha: float = 0.1):\r\n\r\n # initial average weight and residuals to be used in the 1st tree\r\n predictions = np.ones(len(y_df))*initial_average_weight\r\n residuals = np.array(y_df['Weight'])-predictions\r\n residuals_matrix = [residuals]\r\n\r\n # calculates the first mse value\r\n mse_list = [(np.square(residuals)).sum()/len(predictions)]\r\n\r\n for tree in random_forest:\r\n\r\n # train the current stump\r\n tree.fit(x_df, residuals)\r\n\r\n # predict results based on its training error\r\n residuals = tree.predict(x_df)\r\n\r\n # record residuals and calculate mse\r\n residuals_matrix.append(residuals)\r\n mse_list.append((np.square(residuals)).sum()/len(predictions))\r\n\r\n # update predictions and calculate new residuals\r\n predictions = predictions + alpha * residuals\r\n residuals = np.array(y_df['Weight']) - predictions\r\n\r\n return random_forest, predictions, residuals_matrix, mse_list\r\n\r\n\r\n# predict test database by the trained random forest\r\n# input: forest of trees, initial training guess, x and y databases.\r\n# returns: mse_list of the forest (mean square error)\r\ndef test_forest(random_forest: list, initial_average_weight: float, x_df: pd.DataFrame, y_df: pd.DataFrame,\r\n alpha: float = 0.1):\r\n\r\n predictions = np.ones(len(y_df))*initial_average_weight\r\n mse_list = [(np.square(np.array(y_df['Weight']) - predictions)).sum()/len(predictions)]\r\n\r\n for tree in random_forest:\r\n\r\n predictions = predictions + alpha * tree.predict(x_df)\r\n mse_list.append((np.square(np.array(y_df['Weight']) - predictions)).sum()//len(predictions))\r\n\r\n return predictions, mse_list\r\n\r\n\r\ndef main():\r\n\r\n # data import and preprocessing\r\n fish_df = preprocessing(\"Fish.csv\")\r\n\r\n # splitting of the data\r\n x_train, x_test, y_train, y_test = split_df(fish_df, 'Weight', 0.8, 42)\r\n\r\n # setting up a random forest:\r\n #forest_size_list = [4, 5, 6, 7, 8] # variable calibrated by KFold train-validate\r\n forest_size = 20\r\n # max_depth_list = [1, 2, 3, 4, 5] # variable calibrated by KFold train-validate\r\n max_depth = 3\r\n random_state_local = 42\r\n random_forest = create_random_forest(forest_size, max_depth, random_state_local)\r\n\r\n #%% Train\r\n #alpha_list = [0.1, 0.3, 0.5, 0.7, 0.9] # variable calibrated by KFold train-validate\r\n alpha = 0.5 # gradiant coefficient\r\n \r\n kf = KFold(n_splits=2, shuffle=True, random_state=42)\r\n for train_index, test_index in kf.split(x_train, y_train):\r\n\r\n X_train, X_validate = x_train.iloc[train_index], x_train.iloc[test_index]\r\n Y_train, Y_validate = y_train.iloc[train_index], y_train.iloc[test_index]\r\n\r\n # first guess\r\n initial_average_weight = np.average(Y_train['Weight'].tolist())\r\n\r\n # train forest\r\n random_forest, predictions_train, r_matrix, mse_list_train = train_forest(random_forest, initial_average_weight,\r\n X_train, Y_train, alpha)\r\n\r\n # validate\r\n predictions_validate, mse_list_validate = test_forest(random_forest, initial_average_weight, X_validate,\r\n Y_validate, alpha)\r\n\r\n results = pd.DataFrame(data=np.arange(0, forest_size+1, 1), columns=['tree_intervals'])\r\n results['Train'] = mse_list_train\r\n results['Validation'] = mse_list_validate\r\n fig = px.scatter(results, x='tree_intervals', y=['Train', 'Validation'], size='tree_intervals')\r\n fig.update_layout(xaxis_title=\"Amount of Intervals (num.)\", yaxis_title=\"mean square error\")\r\n fig.show()\r\n\r\n #%% Test\r\n predictions_test, mse_list_test = test_forest(random_forest, initial_average_weight, x_test, y_test, alpha)\r\n\r\n # %% plot success rate vs tree intervals\r\n fig = make_subplots(rows=1, cols=3, subplot_titles=('Train', 'Validation', 'Test'),\r\n x_title='Amount of Intervals (num.)', y_title='mean square error')\r\n\r\n results = pd.DataFrame(data=np.arange(0, forest_size+1, 1), columns=['tree_intervals'])\r\n results['Train'] = mse_list_train\r\n fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Train'], name='Train'), row=1, col=1)\r\n\r\n results = pd.DataFrame(data=np.arange(0, forest_size + 1, 1), columns=['tree_intervals'])\r\n results['Validation'] = mse_list_validate\r\n fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Validation'], name='Validation'), row=1, col=2)\r\n\r\n results = pd.DataFrame(data=np.arange(0, forest_size + 1, 1), columns=['tree_intervals'])\r\n results['Test'] = mse_list_test\r\n fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Test'], name='Test'), row=1, col=3)\r\n\r\n fig.update_layout(title_text=\"Random Forest Gradient Boosting\")\r\n fig.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
] |
[
[
"numpy.square",
"numpy.array",
"pandas.DataFrame",
"numpy.arange",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.model_selection.KFold",
"pandas.read_csv",
"pandas.get_dummies"
]
] |
DimitriPapadopoulos/python-caterva
|
[
"f2162c2cdaea8a818ad948afee1555db3747e3a3"
] |
[
"tests/test_copy.py"
] |
[
"#######################################################################\n# Copyright (C) 2019-present, Blosc Development team <blosc@blosc.org>\n# All rights reserved.\n#\n# This source code is licensed under a BSD-style license (found in the\n# LICENSE file in the root directory of this source tree)\n#######################################################################\n\nimport caterva as cat\nimport pytest\nimport numpy as np\n\n\n@pytest.mark.parametrize(\"shape, chunks1, blocks1, chunks2, blocks2, itemsize\",\n [\n ([521], [212], [33], [121], [18], 8),\n ([20, 134, 13], [10, 43, 10], [3, 13, 5], [10, 43, 10], [3, 6, 5], 4),\n ([12, 13, 14, 15, 16], [6, 6, 6, 6, 6], [2, 2, 2, 2, 2],\n [7, 7, 7, 7, 7], [3, 3, 5, 3, 3], 8)\n ])\ndef test_copy(shape, chunks1, blocks1, chunks2, blocks2, itemsize):\n size = int(np.prod(shape))\n buffer = bytes(size * itemsize)\n a = cat.from_buffer(buffer, shape, itemsize, chunks=chunks1, blocks=blocks1,\n complevel=2)\n b = a.copy(chunks=chunks2, blocks=blocks2,\n itemsize=itemsize, complevel=5, filters=[cat.Filter.BITSHUFFLE])\n buffer2 = b.to_buffer()\n assert buffer == buffer2\n\n\n@pytest.mark.parametrize(\"shape, chunks1, blocks1, chunks2, blocks2, dtype\",\n [\n ([521], [212], [33], [121], [18], \"i8\"),\n ([20, 134, 13], [10, 43, 10], [3, 13, 5], [10, 43, 10], [3, 6, 5], \"f4\"),\n ([12, 13, 14, 15, 16], [6, 6, 6, 6, 6], [2, 2, 2, 2, 2],\n [7, 7, 7, 7, 7], [3, 3, 5, 3, 3], \"f8\")\n ])\ndef test_copy_numpy(shape, chunks1, blocks1, chunks2, blocks2, dtype):\n size = int(np.prod(shape))\n nparray = np.arange(size, dtype=dtype).reshape(shape)\n a = cat.asarray(nparray, chunks=chunks1, blocks=blocks1)\n b = a.copy(chunks=chunks2, blocks=blocks2, complevel=5, filters=[cat.Filter.BITSHUFFLE])\n if chunks2:\n b = b[...]\n nparray2 = np.asarray(b).view(dtype)\n np.testing.assert_almost_equal(nparray, nparray2)\n"
] |
[
[
"numpy.testing.assert_almost_equal",
"numpy.prod",
"numpy.arange",
"numpy.asarray"
]
] |
DAIRLab/drake-pytorch
|
[
"3c7e33d58f1ad26008bd89f3e0fe1951b5175d3b"
] |
[
"rigid_chain_performance_test.py"
] |
[
"from abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import List, Tuple, Callable, Type, Union, Optional\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Module, ModuleList, Parameter, ParameterList\nimport drake_pytorch\nimport numpy as np\n\nfrom pydrake.all import (Parser, AddMultibodyPlantSceneGraph, DiagramBuilder,\n FindResourceOrThrow, MultibodyForces_, Expression,\n RotationalInertia_, SpatialInertia_, UnitInertia_,\n JacobianWrtVariable, MakeVectorVariable, Variable)\n\nimport pdb\nimport timeit\n\n\n\n\n\nname = 'chain'\nbuilder = DiagramBuilder()\nplant, _ = AddMultibodyPlantSceneGraph(builder, 0.0)\nParser(plant).AddModelFromFile('./three_links.urdf', name)\nplant.Finalize()\n\n\nsym_plant = plant.ToSymbolic()\ncontext = sym_plant.CreateDefaultContext()\n\nmodel_index = sym_plant.GetModelInstanceByName(name)\n\nbody_indices = sym_plant.GetBodyIndices(model_index)\nbody_frame_ids = [sym_plant.GetBodyFrameIdOrThrow(body_index) for body_index in body_indices]\nbodies = [sym_plant.get_body(body_index) for body_index in body_indices]\n\njoint_indices = sym_plant.GetJointIndices(model_index)\njoints = [sym_plant.get_joint(joint_index) for joint_index in joint_indices]\n\n\nworld_frame = sym_plant.world_frame()\n\nfree_root = sym_plant.GetUniqueFreeBaseBodyOrThrow(model_index)\n\n\nq = MakeVectorVariable(sym_plant.num_positions(), 'q')\nv = MakeVectorVariable(sym_plant.num_velocities(), 'v')\n\nsym_plant.SetPositionsAndVelocities(context, model_index, np.hstack([q,v]))\n\nprint('creating inertial parameters')\nbody_inertia_variables = []\nfor body in bodies:\n body_name = body.name()\n\n # construct inertial symbolic parameters\n # mass of body\n m_B = Variable(f'm_{body_name}')\n\n # origin Bo to c.o.m. Bo in body axes B\n P_BoBcm_B = MakeVectorVariable(3, f'com_{body_name}')\n\n # unit inertia (Ixx Iyy Izz Ixy Ixz Iyz) about c.o.m. Bcm in body axes\n I_BBcm_B = MakeVectorVariable(6, f'I_{body_name}')\n\n\n # set symbolic quantities\n body.SetMass(context, m_B)\n\n # construct SpatialInertia from sym quantities\n body_spatial_inertia = SpatialInertia_[Expression](m_B,\n P_BoBcm_B, UnitInertia_[Expression](*I_BBcm_B))\n\n body.SetSpatialInertiaInBodyFrame(context, body_spatial_inertia)\n body_inertia_variables.append(\n np.hstack((m_B, P_BoBcm_B, I_BBcm_B)))\n\n\nbody_inertia_variable_matrix = np.vstack(body_inertia_variables)\nprint('Calculating mass matrix')\nM = sym_plant.CalcMassMatrixViaInverseDynamics(context)\n\nprint('Generating pytorch mass matrix function')\n[func_M, string_M] = drake_pytorch.sym_to_pytorch(M, q, body_inertia_variable_matrix, simplify_computation = drake_pytorch.Simplifier.QUICKTRIG)\n\nprint('Printing generated code\\n\\n')\nprint(string_M)\nprint('\\n\\n')\n\n\nbivm = torch.rand((len(bodies), 10))\nbivm.requires_grad = True\n\n\nq = torch.tensor([1.,2,3,4,5,6,7,8,9])\nq[0:4] /= q[0:4].norm()\n#pdb.set_trace()\nprint('Estimating computational cost')\nN = 2 ** 10\nBATCH = 2 ** 6\n\nT_sym = timeit.timeit(lambda: func_M(q, bivm).sum().backward(retain_graph=True), number=N)\nT_sym_batch = timeit.timeit(lambda: func_M(q.unsqueeze(0).repeat(BATCH, 1), bivm.unsqueeze(0).repeat(BATCH, 1, 1)).sum().backward(retain_graph=True), number=N // BATCH)\n\n\nprint(f'Serial computation cost per matrix: {T_sym / N} [s]')\nprint(f'Batched (batch_size={BATCH}) computation cost per matrix: {T_sym_batch / N} [s]')\n\n"
] |
[
[
"numpy.hstack",
"torch.tensor",
"numpy.vstack"
]
] |
MatthewAlexanderFisher/GaussED
|
[
"492473991d47d4acc4155e34389b47cebf0eb664"
] |
[
"src/gaussed/basis/laplace.py"
] |
[
"import math\nfrom functools import lru_cache\nimport torch\n\nfrom gaussed.basis.base import Basis\nfrom gaussed.utils.summation_tensor_gen import sum_tensor_gen\n\n\ndef diff_basis_func_gen(order):\n \"\"\"Generates t\n\n Args:\n order ([type]): [description]\n\n Raises:\n TypeError: [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n\n if order % 4 == 0:\n\n def func(x, i):\n return torch.sin(i * x)\n\n elif order % 4 == 1:\n\n def func(x, i):\n return torch.cos(i * x)\n\n elif order % 4 == 2:\n\n def func(x, i):\n return -torch.sin(i * x)\n\n elif order % 4 == 3:\n\n def func(x, i):\n return -torch.cos(i * x)\n\n else:\n raise TypeError(\"order should be of type int\")\n\n return func\n\n\nclass Laplace(Basis):\n def __init__(self, dim, a=None, b=math.pi, c=0, d=None, dims=None):\n \"\"\"\n General form of basis function is of the form\n\n a(x,i) * sin(i(bx + c)) or a(x,i) * cos(i(bx + c))\n :param dim:\n \"\"\"\n\n super().__init__()\n\n self.bases = [self] # multiple bases\n\n self.dim = dim # dimension of the basis functions generated\n self.input_dim = dim # dimension of the expected x input\n\n if dims is None:\n self.dims = [\n i for i in range(self.dim)\n ] # list of the dimensions the laplace basis takes as input\n self.full_dims = True\n else:\n self.dims = sorted(dims)\n if set(dims) == set([i for i in range(self.dim)]):\n self.full_dims = True\n else:\n self.full_dims = False\n\n if a is None:\n\n def a(x, i):\n return torch.Tensor([1])\n\n self.a = a\n else:\n self.a = a\n\n self.b = torch.ones(self.dim) * b\n self.c = torch.ones(self.dim) * c\n self.norm = (2 * self.b / math.pi).pow(1 / 2)\n\n self.integrable = True\n self.fully_integrated = False\n self.integral_dims = []\n self.differentiable = True\n self.diff_orders = torch.zeros(self.dim)\n\n def integral_const(m, i):\n return torch.Tensor([1])\n\n self.integral_const = integral_const\n\n self.index_func = None\n self.integrated_index_func = None\n\n def func(x, i):\n return torch.sin(i * x)\n\n self.func = func\n self.integrated_func = None\n\n def basis_func(x, i):\n x_t = x.unsqueeze(1)\n return self.func(self.b * x_t + self.c, i)\n\n self.basis_func = basis_func\n self.integrated_basis_func = None\n\n self.func_vector = [self.func for i in range(self.dim)]\n self.integrated_func_vector = []\n\n self.laplace_list = (\n []\n ) # this keeps track of laplace that are built from this object\n\n @lru_cache(maxsize=32)\n def eval(self, x, m):\n \"\"\"\n Computes the tensor with Phi_ij = phi_j(x_i). m is the number of basis functions.\n\n :param x: Tensor of size n x d\n :param m: Tensor or integer value\n :return: Tensor of size n x m\n \"\"\"\n if x is not None:\n n, dim = x.shape\n\n if self.full_dims is False:\n x_r = x.T[self.dims].T\n else:\n x_r = x\n\n j_mat = sum_tensor_gen(self.dim, m)\n\n if (\n self.fully_integrated\n ): # TODO: include normalisation constant when integrated\n # integral_transform const includes m for line_integral\n return self.integral_const(m, j_mat).unsqueeze(0)\n elif self.integral_dims:\n j_mat_int = (j_mat.T[self.integral_dims]).T\n j_mat_other = (\n j_mat.T[\n list(set([i for i in range(self.dim)]) - set(self.integral_dims))\n ]\n ).T\n\n return (\n self.integral_const(m, j_mat_int)\n * self.a(x, j_mat_other)\n * torch.prod(self.basis_func(x_r, j_mat_other), dim=2).reshape(\n n, m ** self.dim\n )\n )\n\n else:\n return self.a(x, j_mat) * torch.prod(\n self.norm * self.basis_func(x_r, j_mat), dim=2\n ).reshape(n, m ** self.dim)\n\n def set_new_basis(self, a, b, c, diff_orders, integral_dims, basis_func=None):\n\n self.a = a\n self.b = b\n self.c = c\n self.norm = (2 * self.b / math.pi).pow(1 / 2)\n\n self.diff_orders = diff_orders\n self.integral_dims = integral_dims\n\n non_integral_dims = list(set([i for i in range(self.dim)]) - set(integral_dims))\n\n diff_orders_integral, diff_orders_other = (\n diff_orders[integral_dims],\n diff_orders[non_integral_dims],\n )\n b_integral, b_other = b[integral_dims], b[non_integral_dims]\n c_integral, c_other = c[integral_dims], c[non_integral_dims]\n\n if (\n basis_func is None and non_integral_dims\n ): # if not all dimensions have been integrated\n\n def index_func(i):\n return (i * b_other).pow(diff_orders_other)\n\n self.index_func = index_func\n\n if torch.equal(diff_orders_other, torch.zeros(self.dim)):\n pass # if all derivative orders are the same\n elif torch.equal(\n diff_orders_other, (torch.ones(self.dim) * diff_orders_other[0])\n ):\n\n self.func = diff_basis_func_gen(diff_orders_other[0])\n self.func_vector = [self.func for i in range(len(non_integral_dims))]\n\n # the following is faster than using the more general torch.stack(...) function below\n def basis_func(x, i):\n x_t = x.unsqueeze(1)\n return index_func(i) * self.func(b_other * x_t + c_other, i)\n\n self.basis_func = basis_func\n else:\n new_func_vector = []\n\n for i in range(len(non_integral_dims)):\n new_func_vector.append(diff_basis_func_gen(diff_orders_other[i]))\n\n self.func_vector = new_func_vector\n\n def basis_func(x, i):\n return (\n index_func(i)\n * torch.stack(\n [\n f(b_other[k] * x.T[k] + c_other[k], i.T[k].unsqueeze(1))\n for k, f in enumerate(self.func_vector)\n ]\n ).T\n )\n\n self.basis_func = basis_func\n\n elif basis_func is not None:\n self.basis_func = basis_func\n else: # if we have integrated all dimension, basis function just returns 1\n\n self.func_vector = []\n self.fully_integrated = True\n\n def basis_func(x, i):\n return torch.Tensor([1])\n\n self.basis_func = basis_func\n\n if (\n integral_dims\n ): # if integral_dims is not the empty list (i.e. we have an integral_transform)\n\n def integrated_index_func(i):\n return (i * b_integral).pow(diff_orders_integral)\n\n self.integrated_index_func = integrated_index_func\n\n if torch.equal(\n diff_orders_integral, (torch.ones(self.dim) * diff_orders_integral[0])\n ):\n\n self.integrated_func = diff_basis_func_gen(diff_orders_integral[0])\n self.integrated_func_vector = [\n self.integrated_func for i in range(len(integral_dims))\n ]\n\n def integrated_basis_func(x, i):\n x_t = x.unsqueeze(1)\n return integrated_index_func(i) * self.integrated_func(\n b_integral * x_t + c_integral, i\n )\n\n self.integrated_basis_func = integrated_basis_func\n\n else:\n new_integrated_func_vector = []\n\n for i in range(len(integral_dims)):\n new_integrated_func_vector.append(\n diff_basis_func_gen(diff_orders_integral[i])\n )\n\n self.integrated_func_vector = new_integrated_func_vector\n\n def integrated_basis_func(x, i):\n return (\n integrated_index_func(i)\n * torch.stack(\n [\n f(\n b_integral[k] * x.T[k] + c_integral[k],\n i.T[k].unsqueeze(1),\n )\n for k, f in enumerate(self.integrated_func_vector)\n ]\n ).T\n )\n\n self.integrated_basis_func = integrated_basis_func\n\n def set_differentiable(self, laplace):\n if laplace.differentiable:\n self.differentiable = True\n else:\n self.differentiable = False\n\n def set_integrable(self, laplace):\n if laplace.integrable:\n self.integrable = True\n else:\n self.integrable = False\n\n def set_domain(self, domain):\n relative_domain = domain[self.dims]\n new_b = math.pi / (relative_domain.T[1] - relative_domain.T[0])\n new_c = -relative_domain.T[0] * new_b\n self.b = new_b\n self.c = new_c\n self.norm = (2 * self.b / math.pi).pow(1 / 2)\n\n @staticmethod\n def affine(laplace, linear):\n \"\"\"\n Function valued affine transform of basis function :- basis_func(x, i) * linear(x) + translation(x)\n :param laplace:\n :param linear: linear function\n :param translation: translation function\n \"\"\"\n\n def new_a(x, i):\n return laplace.a(x, i) * linear(x)\n\n new_laplace = Laplace(laplace.dim)\n new_laplace.set_new_basis(\n new_a,\n laplace.b,\n laplace.c,\n laplace.diff_orders,\n laplace.integral_dims,\n laplace.normalisation,\n )\n\n new_laplace.integrable = False\n new_laplace.differentiable = False\n\n return new_laplace\n\n @staticmethod\n def simple_affine(laplace, A=1, B=0):\n \"\"\"\n Overrides eval with simple affine transform of the form A * f(x) + B\n :param laplace:\n :param A: Tensor of dimension 1\n :param B: Tensor of dimension 1\n \"\"\"\n\n def new_a(x, i):\n return A * laplace.a(x, i)\n\n new_laplace = Laplace(laplace.dim)\n new_laplace.set_new_basis(\n new_a, laplace.b, laplace.c, laplace.diff_orders, laplace.integral_dims\n )\n new_laplace.set_dims(laplace.dims)\n new_laplace.set_differentiable(laplace)\n new_laplace.set_integrable(laplace)\n\n return new_laplace\n\n @staticmethod\n def input_affine(\n laplace, b, c\n ): # TODO: we are not performing affine transform - just overwriting b and c\n\n new_laplace = Laplace(laplace.dim)\n new_laplace.set_new_basis(\n laplace.a, b, c, laplace.diff_orders, laplace.integral_dims\n )\n\n new_laplace.integrable = laplace.integrable\n new_laplace.differentiable = laplace.differentiable\n\n return new_laplace\n\n @staticmethod\n def input_warp(laplace, func):\n \"\"\"\n Composes function to input of basis function :- basis_func(objective(x),i)\n :param laplace:\n :param func: function to be composed\n \"\"\"\n\n if laplace.full_dims is True:\n\n def new_a(x, i):\n return laplace.a(func(x), i)\n\n def new_basis_func(x, i):\n return laplace.basis_func(func(x), i)\n\n else:\n\n def new_a(x, i):\n return laplace.a(func(x).T[[laplace.dims]].T, i)\n\n def new_basis_func(x, i):\n return laplace.basis_func(func(x).T[[laplace.dims]].T, i)\n\n new_laplace = Laplace(laplace.dim)\n\n new_laplace.set_new_basis(\n new_a,\n laplace.b,\n laplace.c,\n laplace.diff_orders,\n laplace.integral_dims,\n new_basis_func,\n )\n new_laplace.set_dims(laplace.dims)\n new_laplace.set_full_dims(laplace.full_dims)\n\n new_laplace.integrable = False\n new_laplace.differentiable = False\n\n return new_laplace\n\n @staticmethod\n def differentiate(laplace, diff_dims, orders):\n \"\"\"\n Overrides basis_func with differentiated basis\n :param laplace:\n :param diff_dims: diff_dims\n :param orders: derivative orders\n \"\"\"\n\n diff_orders = laplace.diff_orders\n\n relative_diff_dims, relative_diff_orders = laplace.get_relative_dims(\n diff_dims, diff_orders=orders\n )\n for j in range(len(relative_diff_dims)):\n diff_orders[relative_diff_dims[j]] = (\n diff_orders[relative_diff_dims[j]] + relative_diff_orders[j]\n )\n\n if laplace.differentiable:\n new_laplace = Laplace(laplace.dim)\n new_laplace.set_new_basis(\n laplace.a, laplace.b, laplace.c, diff_orders, laplace.integral_dims\n )\n new_laplace.set_differentiable(\n laplace\n ) # this is unnecessary since laplace is automatically differentiable\n new_laplace.set_integrable(laplace)\n new_laplace.set_dims(laplace.dims)\n else:\n raise NotImplementedError(\"basis object not differentiable\")\n\n return new_laplace\n\n @staticmethod\n def integrate(laplace, integral_dims, limits, method=None):\n\n # TODO: FIX NORMALISATION FOR INTEGRALS\n integral_dims, relative_limits = laplace.get_relative_dims(integral_dims)\n limits = torch.stack(relative_limits)\n integral_dims = (\n laplace.integral_dims + integral_dims\n ) # check for repeated integral_transform dims?\n integral_dims.sort()\n\n if laplace.integrable:\n\n new_laplace = Laplace(laplace.dim)\n new_laplace.input_dim = laplace.dim - len(integral_dims)\n\n new_diff_orders = laplace.diff_orders\n new_diff_orders[integral_dims] = new_diff_orders[integral_dims] - 1\n\n def integral_const(m, i):\n return torch.prod(\n new_laplace.integrated_basis_func(limits.T, i)[1]\n - new_laplace.integrated_basis_func(limits.T, i)[0],\n dim=1,\n )\n\n new_laplace.integral_const = integral_const\n\n new_laplace.set_new_basis(\n laplace.a, laplace.b, laplace.c, new_diff_orders, integral_dims\n )\n new_laplace.set_differentiable(laplace)\n new_laplace.set_integrable(laplace)\n new_laplace.set_dims(laplace.dims)\n\n else:\n raise NotImplementedError\n # TODO: include method in argument to perform numerical integration for non-integrable bases\n\n return new_laplace\n\n @staticmethod\n def line_integral(laplace, r, method, dr=None):\n def laplace_eval(x, m):\n return laplace.eval(r(x), m)\n\n new_laplace = Laplace(laplace.dim)\n new_laplace.set_new_basis(\n laplace.a,\n laplace.b,\n laplace.c,\n laplace.diff_orders,\n laplace.integral_dims,\n laplace.basis_func,\n )\n\n if dr is None:\n\n def integral_const(m, i):\n return method.line_integral_basis(laplace_eval, m)\n\n else:\n raise NotImplementedError # TODO: implement line_integral with dr/dx (or r')\n\n new_laplace.integral_const = integral_const\n new_laplace.fully_integrated = True\n new_laplace.set_dims(laplace.dims)\n\n return new_laplace\n\n def set_dims(self, dims):\n self.dims = sorted(dims)\n\n def set_full_dims(self, full_dims):\n self.full_dims = full_dims\n\n def get_relative_dims(self, new_dims, diff_orders=None, integral_limits=None):\n relative_dims = []\n\n if diff_orders is not None:\n relative_diff_orders = []\n if integral_limits is not None:\n relative_integral_limits = []\n\n for i in range(self.dim):\n if self.dims[i] in new_dims:\n relative_dims.append(i)\n index = new_dims.index(self.dims[i])\n if diff_orders is not None:\n relative_diff_orders.append(diff_orders[index])\n if integral_limits is not None:\n relative_integral_limits.append(integral_limits[index])\n\n if diff_orders is not None:\n return relative_dims, relative_diff_orders\n elif integral_limits is not None:\n return relative_dims, relative_integral_limits\n else:\n return relative_dims\n"
] |
[
[
"torch.zeros",
"torch.cos",
"torch.stack",
"torch.sin",
"torch.ones",
"torch.Tensor"
]
] |
tomwhite/gimmemotifs
|
[
"984399eaef3f05fd04c0b45c62efe9d287aaccf8"
] |
[
"gimmemotifs/preprocessing.py"
] |
[
"# Copyright (c) 2009-2020 Simon van Heeringen <simon.vanheeringen@gmail.com>\n#\n# This module is free software. You can redistribute it and/or modify it under\n# the terms of the MIT License, see the file COPYING included with this\n# distribution.\n\n\"\"\" Data preprocessing to create GimmeMotifs input. \"\"\"\n# Python imports\nimport os\nimport sys\nimport logging\nimport multiprocessing\nfrom tempfile import NamedTemporaryFile\n\n# External imports\nimport genomepy\nimport numpy as np\nimport pysam\nfrom fluff.fluffio import load_heatmap_data\nimport pandas as pd\nfrom pybedtools import BedTool\nfrom sklearn.preprocessing import scale\nimport qnorm\nfrom tqdm.auto import tqdm\n\n# gimme imports\nfrom gimmemotifs.utils import determine_file_type\n\nlogger = logging.getLogger(\"gimme.preprocessing\")\n\n\ndef coverage_table(\n peakfile,\n datafiles,\n window,\n log_transform=True,\n normalization=\"none\",\n top=0,\n topmethod=\"var\",\n rmdup=True,\n rmrepeats=True,\n ncpus=12,\n):\n for x in datafiles:\n if not os.path.isfile(x):\n print(\"ERROR: Data file '{0}' does not exist\".format(x))\n sys.exit(1)\n for x in datafiles:\n if \".bam\" in x and not os.path.isfile(\"{0}.bai\".format(x)):\n print(\n \"Data file '{0}' does not have an index file.\"\n \" Creating an index file for {0}.\".format(x)\n )\n pysam.index(x)\n\n logger.info(\"Loading data\")\n data = {}\n try:\n # Load data in parallel\n pool = multiprocessing.Pool(processes=ncpus)\n jobs = []\n for datafile in datafiles:\n jobs.append(\n pool.apply_async(\n load_heatmap_data,\n args=(\n peakfile,\n datafile,\n 1,\n window // 2,\n window // 2,\n rmdup,\n False,\n rmrepeats,\n None,\n False,\n None,\n ),\n )\n )\n for job in tqdm(jobs):\n track, regions, profile, guard = job.get()\n data[os.path.splitext(track)[0]] = profile[:, 0]\n except Exception as e:\n sys.stderr.write(\"Error loading data in parallel, trying serial\\n\")\n sys.stderr.write(\"Error: {}\\n\".format(e))\n for datafile in tqdm(datafiles):\n track, regions, profile, guard = load_heatmap_data(\n peakfile,\n datafile,\n 1,\n window // 2,\n window // 2,\n rmdup,\n False,\n rmrepeats,\n None,\n False,\n None,\n )\n data[os.path.splitext(track)[0]] = profile[:, 0]\n\n # Create DataFrame with regions as index\n regions = [\"{}:{}-{}\".format(*region[:3]) for region in regions]\n df = pd.DataFrame(data, index=regions)\n\n if log_transform:\n logger.info(\"Log transform\")\n df = np.log1p(df)\n if normalization == \"scale\":\n logger.info(\"Normalization by scaling\")\n df[:] = scale(df, axis=0)\n if normalization == \"quantile\":\n logger.info(\"Normalization by quantile normalization\")\n # stay between 1-4 ncpus, after 4 highly diminishing returns\n df = qnorm.quantile_normalize(df, ncpus=sorted(1, ncpus, 4)[1])\n else:\n logger.info(\"No normalization\")\n\n if top > 0:\n if topmethod == \"var\":\n idx = df.var(1).sort_values().tail(top).index\n elif topmethod == \"std\":\n idx = df.std(1).sort_values().tail(top).index\n elif topmethod == \"mean\":\n idx = df.mean(1).sort_values().tail(top).index\n elif topmethod == \"random\":\n idx = df.sample(top).index\n else:\n raise ValueError(\n \"unknown method {} for selecting regions\".format(topmethod)\n )\n df = df.loc[idx]\n return df\n\n\ndef read_peak_file_to_df(fname):\n \"\"\"\n Read a MACS2 summits.bed or narrowPeak file and return a DataFrame.\n\n Parameters\n ----------\n fname : str\n Filename.\n\n Returns\n -------\n pandas.DataFrame\n DataFrame with summits.\n \"\"\"\n summit_header = [\"chrom\", \"start\", \"end\", \"name\", \"value\"]\n ftype = determine_file_type(fname)\n # Read MACS2 summit files\n if ftype == \"narrowpeak\":\n header = [\n \"chrom\",\n \"start\",\n \"end\",\n \"name\",\n \"value\",\n \"strand\",\n \"signalValue\",\n \"pval\",\n \"qval\",\n \"peak\",\n ]\n df = pd.read_table(fname, names=header, dtype={\"chrom\": \"str\"})\n df[\"chrom\"] = df[\"chrom\"].astype(str)\n\n # get the summit\n df[\"start\"] = df[\"start\"].astype(int) + df[\"peak\"].astype(int)\n df[\"end\"] = df[\"start\"] + 1\n\n # qva\n df[\"value\"] = df[\"qval\"]\n df = df[summit_header]\n elif ftype == \"bed\":\n df = pd.read_table(fname, names=summit_header, dtype={\"chrom\": \"str\"})\n if ((df[\"end\"] - df[\"start\"]) != 1).sum() != 0:\n raise ValueError(f\"{fname} does not contain summits.\")\n else:\n raise ValueError(\n f\"Can't determine file type of {fname}. \"\n \"Is the file a narrowPeak or summits.bed file?\"\n )\n\n df[\"experiment\"] = df.loc[0, \"name\"].replace(\"_peak_1\", \"\")\n df[\"log_value\"] = np.log1p(df[\"value\"])\n df[\"log_value_scaled\"] = scale(df[[\"log_value\"]])\n return df\n\n\ndef combine_peaks(peaks, genome, window, scale_value):\n \"\"\"\n Combine multiple MACS2 summit files and returns the summit\n with the maximum value.\n\n Parameters\n ----------\n peaks : list\n List with summit file names.\n\n genome : str\n Genome file name. Either a file with chromosome sizes or a genomepy\n genome name.\n\n window : int\n Window size. Summits will be extended to this size before merging.\n\n scale_value : bool\n Scale summit values before taking the maximum.\n\n Returns\n -------\n summits : pandas.DataFrame\n DataFrame with summits.\n \"\"\"\n try:\n g = genomepy.Genome(genome)\n genome = g.sizes_file\n except Exception:\n pass\n\n dfs = [read_peak_file_to_df(fname) for fname in peaks]\n df_all = pd.concat(dfs)\n\n check_col = \"log_value\"\n if scale_value:\n check_col = \"log_value_scaled\"\n\n # store summit location + associated value in col4\n df_all[\"col4\"] = (\n df_all[\"chrom\"].astype(str)\n + \";\"\n + df_all[\"start\"].astype(str)\n + \";\"\n + df_all[\"end\"].astype(str)\n + \";\"\n + df_all[check_col].astype(str)\n )\n\n tmp = NamedTemporaryFile(suffix=\".all_peaks.bed\", delete=False).name\n out = df_all[[\"chrom\", \"start\", \"end\", \"col4\"]].sort_values([\"chrom\", \"start\"])\n out.to_csv(tmp, sep=\"\\t\", index=False, header=False)\n\n b = BedTool(tmp)\n all_summits = []\n # loop over merged peaks based on window size and collapse on col4 (summit + value)\n for f in b.slop(b=window // 2, g=genome).merge(c=4, o=\"collapse\"):\n summits = [x.split(\";\") for x in f[3].split(\",\")]\n # only keep the highest summit\n all_summits.append(sorted(summits, key=lambda x: float(x[3]))[-1][:3])\n\n df = pd.DataFrame(all_summits, columns=[\"chrom\", \"start\", \"end\"])\n return df\n"
] |
[
[
"pandas.read_table",
"pandas.DataFrame",
"numpy.log1p",
"sklearn.preprocessing.scale",
"pandas.concat"
]
] |
Horsmann/LREC2018-DeepTC
|
[
"8451c20c2a36f4ab7b3c0ead352490a8e5f8db6a"
] |
[
"de.unidue.ltl.LREC2018_keras/src/main/resources/kerasCode/posTaggingLstmBatchOne.py"
] |
[
"from sys import argv\nimport numpy as np\nimport sys\nimport argparse\n\nnp.set_printoptions(threshold=np.nan)\n\ndef numpyizeVector(vec):\n\tvout=[]\n\tfile = open(vec, 'r')\n\tfor l in file.readlines():\n\t\tl = l.strip()\n\t\tv = [int(x) for x in l.split()]\n\t\tvout.append(v)\n\tfile.close()\n\treturn np.asarray(vout)\n\t\ndef loadEmbeddings(emb):\n\tmatrix = {}\t\n\tf = open(emb, 'r')\n\tembData = f.readlines()\n\tf.close()\n\tdim = len(embData[0].split())-1\n\tmatrix = np.zeros((len(embData)+1, dim))\t\n\tfor e in embData:\n\t\te = e.strip()\n\t\tif not e:\n\t\t\tcontinue\n\t\tidx = e.find(\" \")\n\t\tid = e[:idx]\n\t\tvector = e[idx+1:]\n\t\tmatrix[int(id)]=np.asarray(vector.split(\" \"), dtype='float32')\n\treturn matrix, dim\n\t\ndef getMaxLabel(trainLabel, testLabel):\n\tmaxLabel=-1\n\tfor e in [trainLabel, testLabel]:\n\t\tfor s in e:\n\t\t\tfor v in s:\n\t\t\t\tif v>maxLabel:\n\t\t\t\t\tmaxLabel=v\t\n\treturn maxLabel+1\n\t\ndef getMaxVocab(trainData, testData):\n\tvocabSize=-1\n\tfor e in [trainData, testData]:\n\t\tfor s in e:\n\t\t\tfor v in s:\n\t\t\t\tif v>vocabSize:\n\t\t\t\t\tvocabSize=v\t\n\treturn vocabSize\n\ndef runExperiment(seed, trainVec, trainOutcome, testVec, testOutcome, embedding, longest_sequence, predictionOut):\t\n\n\tnp.random.seed(seed)\n\n\tfrom keras.preprocessing import sequence\n\tfrom keras.models import Sequential\n\tfrom keras.layers import Dense, Activation, Embedding, TimeDistributed, Bidirectional, Dropout\n\tfrom keras.layers import LSTM\n\tfrom keras.utils import np_utils\n\tfrom keras.optimizers import SGD\n\t\n\n\ttrainVecNump = numpyizeVector(trainVec)\n\ttrainOutcome = numpyizeVector(trainOutcome)\n\t\n\ttestVecNump = numpyizeVector(testVec)\n\ttestOutcome = numpyizeVector(testOutcome)\n \n\tembeddings,dim = loadEmbeddings(embedding)\n\tEMBEDDING_DIM = dim\n\t\n\tx_train=trainVecNump\n\ty_train=trainOutcome\n\t\n\tx_test = testVecNump\n\ty_test = testOutcome\n\t\n\tmaxLabel=getMaxLabel(trainOutcome, testOutcome)\n\t\t\n\tvocabSize=getMaxVocab(trainVecNump, testVecNump)\n\t\t\t\t\n\t# training label to one-hot vectors\n\ty_train = np.array([np_utils.to_categorical(s, maxLabel) for s in y_train])\n\n\tprint(\"Building model\")\n\tmodel = Sequential()\n\tmodel.add(Embedding(output_dim=embeddings.shape[1], input_dim=embeddings.shape[0],\n weights=[embeddings],\n trainable=False)) \n\tmodel.add(Bidirectional(LSTM(100, return_sequences=True, activation=\"tanh\",kernel_initializer=\"glorot_uniform\")))\n\tmodel.add(TimeDistributed(Dense(maxLabel)))\n\tmodel.add(Activation('softmax'))\n\tsgdOptimizer=SGD(lr=0.1, momentum=0., decay=0., nesterov=False)\n\n\t# try using different optimizers and different optimizer configs\n\tmodel.compile(loss='categorical_crossentropy',\n optimizer=sgdOptimizer,\n metrics=['accuracy'])\n\n\t\n\tprint(\"Start training\")\n\tfor i in range(0, 20):\n\t\trandomize = np.arange(len(x_train))\n\t\tnp.random.shuffle(randomize)\n\t\tx_train = x_train[randomize]\n\t\ty_train = y_train[randomize]\n\t\tassert(len(x_train) == len(y_train))\n\t\tfor c, (x,y) in enumerate(zip(x_train, y_train)):\n\t\t\tx=np.asarray([x])\n\t\t\ty=np.asarray([y])\n\t\t\t\n\t\t\t#sys.stdout.write('\\r')\n\t\t\t#sys.stdout.write(\"%.1f %% of data provided\" % ((c/(len(x_train)-1))*100))\n\t\t\t#sys.stdout.flush()\n\t\t\t\n\t\t\ta = model.fit(x, y, batch_size=1, verbose=0)\n\t\tprint(\"\\nEpoche \" + str(i+1) + \" completed\")\n\t\t\n\tprediction = [model.predict_classes(np.asarray([x]), verbose=0) for x in x_test]\n\t\n\tpredictionFile = open(predictionOut, 'w')\n\tpredictionFile.write(\"#Gold\\tPrediction\\n\")\n\tfor i in range(0, len(prediction)):\n\t\tpredictionEntry = prediction[i][0]\n\t\tfor j in range(0, len(y_test[i])):\n\t\t\tif y_test[i][j]==0:\n\t\t\t\tcontinue #we reached the padded area - zero is reserved\n\t\t\tpredictionFile.write(str(y_test[i][j]) +\"\\t\" + str(predictionEntry[j]))\n\t\t\tif j+1 < len(y_test[i]):\n\t\t\t\tpredictionFile.write(\"\\n\")\n\t\tpredictionFile.write(\"\\n\")\n\tpredictionFile.close()\n\n\nif __name__ =='__main__':\n\tparser = argparse.ArgumentParser(description=\"LREC Keras\")\n\tparser.add_argument(\"--trainData\", nargs=1, required=True)\n\tparser.add_argument(\"--trainOutcome\", nargs=1, required=True)\n\tparser.add_argument(\"--testData\", nargs=1, required=True)\n\tparser.add_argument(\"--testOutcome\", nargs=1, required=True) \n\tparser.add_argument(\"--embedding\", nargs=1, required=True) \n\tparser.add_argument(\"--maxLen\", nargs=1, required=True)\n\tparser.add_argument(\"--predictionOut\", nargs=1, required=True)\n\tparser.add_argument(\"--seed\", nargs=1, required=True) \n \n \n\targs = parser.parse_args()\n \n\ttrainData = args.trainData[0]\n\ttrainOutcome = args.trainOutcome[0]\n\ttestData = args.testData[0]\n\ttestOutcome = args.testOutcome[0]\n\tembedding = args.embedding[0]\n\tmaxLen = args.maxLen[0]\n\tpredictionOut = args.predictionOut[0]\n\tseed = args.seed[0]\n\t\n\trunExperiment(int(seed), trainData, trainOutcome, testData, testOutcome, embedding, int(maxLen), predictionOut)\n"
] |
[
[
"numpy.random.seed",
"numpy.set_printoptions",
"numpy.asarray",
"numpy.random.shuffle"
]
] |
Vikas89/private-mxnet
|
[
"7797584450186d36e52c902c3b606f4b4676e0a3"
] |
[
"python/mxnet/image/image.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name\n# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements\n\"\"\"Read individual image files and perform augmentations.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport random\nimport logging\nimport json\nimport warnings\nimport numpy as np\n\ntry:\n import cv2\nexcept ImportError:\n cv2 = None\n\nfrom ..base import numeric_types\nfrom .. import ndarray as nd\nfrom ..ndarray import _internal\nfrom ..ndarray._internal import _cvimresize as imresize\nfrom ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder\nfrom .. import io\nfrom .. import recordio\n\n\ndef imread(filename, *args, **kwargs):\n \"\"\"Read and decode an image to an NDArray.\n\n Note: `imread` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\n Parameters\n ----------\n filename : str\n Name of the image file to be loaded.\n flag : {0, 1}, default 1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : bool, default True\n True for RGB formatted output (MXNet default).\n False for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> mx.img.imread(\"flower.jpg\")\n <NDArray 224x224x3 @cpu(0)>\n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> mx.img.imread(\"flower.jpg\", flag=0)\n <NDArray 224x224x1 @cpu(0)>\n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> mx.img.imread(\"flower.jpg\", to_rgb=0)\n <NDArray 224x224x3 @cpu(0)>\n \"\"\"\n return _internal._cvimread(filename, *args, **kwargs)\n\n\ndef imdecode(buf, *args, **kwargs):\n \"\"\"Decode an image to an NDArray.\n\n Note: `imdecode` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\n Parameters\n ----------\n buf : str/bytes or numpy.ndarray\n Binary image data as string or numpy ndarray.\n flag : int, optional, default=1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : int, optional, default=1\n 1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, flag=0)\n >>> image\n <NDArray 224x224x1 @cpu(0)>\n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, to_rgb=0)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n \"\"\"\n if not isinstance(buf, nd.NDArray):\n buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)\n return _internal._cvimdecode(buf, *args, **kwargs)\n\n\ndef scale_down(src_size, size):\n \"\"\"Scales down crop size if it's larger than image size.\n\n If width/height of the crop is larger than the width/height of the image,\n sets the width/height to the width/height of the image.\n\n Parameters\n ----------\n src_size : tuple of int\n Size of the image in (width, height) format.\n size : tuple of int\n Size of the crop in (width, height) format.\n\n Returns\n -------\n tuple of int\n A tuple containing the scaled crop size in (width, height) format.\n\n Example\n --------\n >>> src_size = (640,480)\n >>> size = (720,120)\n >>> new_size = mx.img.scale_down(src_size, size)\n >>> new_size\n (640,106)\n \"\"\"\n w, h = size\n sw, sh = src_size\n if sh < h:\n w, h = float(w * sh) / h, sh\n if sw < w:\n w, h = sw, float(h * sw) / w\n return int(w), int(h)\n\n\ndef _get_interp_method(interp, sizes=()):\n \"\"\"Get the interpolation method for resize functions.\n The major purpose of this function is to wrap a random interp method selection\n and a auto-estimation method.\n\n Parameters\n ----------\n interp : int\n interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n sizes : tuple of int\n (old_height, old_width, new_height, new_width), if None provided, auto(9)\n will return Area(2) anyway.\n\n Returns\n -------\n int\n interp method from 0 to 4\n \"\"\"\n if interp == 9:\n if sizes:\n assert len(sizes) == 4\n oh, ow, nh, nw = sizes\n if nh > oh and nw > ow:\n return 2\n elif nh < oh and nw < ow:\n return 3\n else:\n return 1\n else:\n return 2\n if interp == 10:\n return random.randint(0, 4)\n if interp not in (0, 1, 2, 3, 4):\n raise ValueError('Unknown interp method %d' % interp)\n return interp\n\n\ndef resize_short(src, size, interp=2):\n \"\"\"Resizes shorter edge to size.\n\n Note: `resize_short` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with OpenCV for `resize_short` to work.\n\n Resizes the original image by setting the shorter edge to size\n and setting the longer edge accordingly.\n Resizing function is called from OpenCV.\n\n Parameters\n ----------\n src : NDArray\n The original image.\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method used for resizing the image.\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n\n Returns\n -------\n NDArray\n An 'NDArray' containing the resized image.\n\n Example\n -------\n >>> with open(\"flower.jpeg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> size = 640\n >>> new_image = mx.img.resize_short(image, size)\n >>> new_image\n <NDArray 2321x3482x3 @cpu(0)>\n \"\"\"\n h, w, _ = src.shape\n if h > w:\n new_h, new_w = size * h // w, size\n else:\n new_h, new_w = size, size * w // h\n return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))\n\n\ndef fixed_crop(src, x0, y0, w, h, size=None, interp=2):\n \"\"\"Crop src at fixed location, and (optionally) resize it to size.\n\n Parameters\n ----------\n src : NDArray\n Input image\n x0 : int\n Left boundary of the cropping area\n y0 : int\n Top boundary of the cropping area\n w : int\n Width of the cropping area\n h : int\n Height of the cropping area\n size : tuple of (w, h)\n Optional, resize to new size after cropping\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n \"\"\"\n out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))\n if size is not None and (w, h) != size:\n sizes = (h, w, size[1], size[0])\n out = imresize(out, *size, interp=_get_interp_method(interp, sizes))\n return out\n\n\ndef random_crop(src, size, interp=2):\n \"\"\"Randomly crop `src` with `size` (width, height).\n Upsample result if `src` is smaller than `size`.\n\n Parameters\n ----------\n src: Source image `NDArray`\n size: Size of the crop formatted as (width, height). If the `size` is larger\n than the image, then the source image is upsampled to `size` and returned.\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n Example\n -------\n >>> im = mx.nd.array(cv2.imread(\"flower.jpg\"))\n >>> cropped_im, rect = mx.image.random_crop(im, (100, 100))\n >>> print cropped_im\n <NDArray 100x100x1 @cpu(0)>\n >>> print rect\n (20, 21, 100, 100)\n \"\"\"\n\n h, w, _ = src.shape\n new_w, new_h = scale_down((w, h), size)\n\n x0 = random.randint(0, w - new_w)\n y0 = random.randint(0, h - new_h)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n\ndef center_crop(src, size, interp=2):\n \"\"\"Crops the image `src` to the given `size` by trimming on all four\n sides and preserving the center of the image. Upsamples if `src` is smaller\n than `size`.\n\n .. note:: This requires MXNet to be compiled with USE_OPENCV.\n\n Parameters\n ----------\n src : NDArray\n Binary source image data.\n size : list or tuple of int\n The desired output image size.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n The cropped image.\n Tuple\n (x, y, width, height) where x, y are the positions of the crop in the\n original image and width, height the dimensions of the crop.\n\n Example\n -------\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.image.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))\n >>> cropped_image\n <NDArray 500x1000x3 @cpu(0)>\n >>> x, y, width, height\n (1241, 910, 1000, 500)\n \"\"\"\n\n h, w, _ = src.shape\n new_w, new_h = scale_down((w, h), size)\n\n x0 = int((w - new_w) / 2)\n y0 = int((h - new_h) / 2)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n\ndef color_normalize(src, mean, std=None):\n \"\"\"Normalize src with mean and std.\n\n Parameters\n ----------\n src : NDArray\n Input image\n mean : NDArray\n RGB mean to be subtracted\n std : NDArray\n RGB standard deviation to be divided\n\n Returns\n -------\n NDArray\n An `NDArray` containing the normalized image.\n \"\"\"\n if mean is not None:\n src -= mean\n if std is not None:\n src /= std\n return src\n\n\ndef random_size_crop(src, size, area, ratio, interp=2, **kwargs):\n \"\"\"Randomly crop src with size. Randomize area and aspect ratio.\n\n Parameters\n ----------\n src : NDArray\n Input image\n size : tuple of (int, int)\n Size of the crop formatted as (width, height).\n area : float in (0, 1] or tuple of (float, float)\n If tuple, minimum area and maximum area to be maintained after cropping\n If float, minimum area to be maintained after cropping, maximum area is set to 1.0\n ratio : tuple of (float, float)\n Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n \"\"\"\n h, w, _ = src.shape\n src_area = h * w\n\n if 'min_area' in kwargs:\n warnings.warn('`min_area` is deprecated. Please use `area` instead.',\n DeprecationWarning)\n area = kwargs.pop('min_area')\n assert not kwargs, \"unexpected keyword arguments for `random_size_crop`.\"\n\n if isinstance(area, numeric_types):\n area = (area, 1.0)\n for _ in range(10):\n target_area = random.uniform(area[0], area[1]) * src_area\n new_ratio = random.uniform(*ratio)\n\n new_w = int(round(np.sqrt(target_area * new_ratio)))\n new_h = int(round(np.sqrt(target_area / new_ratio)))\n\n if random.random() < 0.5:\n new_h, new_w = new_w, new_h\n\n if new_w <= w and new_h <= h:\n x0 = random.randint(0, w - new_w)\n y0 = random.randint(0, h - new_h)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n # fall back to center_crop\n return center_crop(src, size, interp)\n\n\nclass Augmenter(object):\n \"\"\"Image Augmenter base class\"\"\"\n def __init__(self, **kwargs):\n self._kwargs = kwargs\n for k, v in self._kwargs.items():\n if isinstance(v, nd.NDArray):\n v = v.asnumpy()\n if isinstance(v, np.ndarray):\n v = v.tolist()\n self._kwargs[k] = v\n\n def dumps(self):\n \"\"\"Saves the Augmenter to string\n\n Returns\n -------\n str\n JSON formatted string that describes the Augmenter.\n \"\"\"\n return json.dumps([self.__class__.__name__.lower(), self._kwargs])\n\n def __call__(self, src):\n \"\"\"Abstract implementation body\"\"\"\n raise NotImplementedError(\"Must override implementation.\")\n\n\nclass SequentialAug(Augmenter):\n \"\"\"Composing a sequential augmenter list.\n\n Parameters\n ----------\n ts : list of augmenters\n A series of augmenters to be applied in sequential order.\n \"\"\"\n def __init__(self, ts):\n super(SequentialAug, self).__init__()\n self.ts = ts\n\n def dumps(self):\n \"\"\"Override the default to avoid duplicate dump.\"\"\"\n return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n for aug in self.ts:\n src = aug(src)\n return src\n\n\nclass ResizeAug(Augmenter):\n \"\"\"Make resize shorter edge to size augmenter.\n\n Parameters\n ----------\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(ResizeAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return resize_short(src, self.size, self.interp)\n\n\nclass ForceResizeAug(Augmenter):\n \"\"\"Force resize to size regardless of aspect ratio\n\n Parameters\n ----------\n size : tuple of (int, int)\n The desired size as in (width, height)\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(ForceResizeAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])\n return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))\n\n\nclass RandomCropAug(Augmenter):\n \"\"\"Make random crop augmenter\n\n Parameters\n ----------\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(RandomCropAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return random_crop(src, self.size, self.interp)[0]\n\n\nclass RandomSizedCropAug(Augmenter):\n \"\"\"Make random crop with random resizing and random aspect ratio jitter augmenter.\n\n Parameters\n ----------\n size : tuple of (int, int)\n Size of the crop formatted as (width, height).\n area : float in (0, 1] or tuple of (float, float)\n If tuple, minimum area and maximum area to be maintained after cropping\n If float, minimum area to be maintained after cropping, maximum area is set to 1.0\n ratio : tuple of (float, float)\n Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, area, ratio, interp=2, **kwargs):\n super(RandomSizedCropAug, self).__init__(size=size, area=area,\n ratio=ratio, interp=interp)\n self.size = size\n if 'min_area' in kwargs:\n warnings.warn('`min_area` is deprecated. Please use `area` instead.',\n DeprecationWarning)\n self.area = kwargs.pop('min_area')\n else:\n self.area = area\n self.ratio = ratio\n self.interp = interp\n assert not kwargs, \"unexpected keyword arguments for `RandomSizedCropAug`.\"\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]\n\n\nclass CenterCropAug(Augmenter):\n \"\"\"Make center crop augmenter.\n\n Parameters\n ----------\n size : list or tuple of int\n The desired output image size.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(CenterCropAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return center_crop(src, self.size, self.interp)[0]\n\n\nclass RandomOrderAug(Augmenter):\n \"\"\"Apply list of augmenters in random order\n\n Parameters\n ----------\n ts : list of augmenters\n A series of augmenters to be applied in random order\n \"\"\"\n def __init__(self, ts):\n super(RandomOrderAug, self).__init__()\n self.ts = ts\n\n def dumps(self):\n \"\"\"Override the default to avoid duplicate dump.\"\"\"\n return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n random.shuffle(self.ts)\n for t in self.ts:\n src = t(src)\n return src\n\n\nclass BrightnessJitterAug(Augmenter):\n \"\"\"Random brightness jitter augmentation.\n\n Parameters\n ----------\n brightness : float\n The brightness jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, brightness):\n super(BrightnessJitterAug, self).__init__(brightness=brightness)\n self.brightness = brightness\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-self.brightness, self.brightness)\n src *= alpha\n return src\n\n\nclass ContrastJitterAug(Augmenter):\n \"\"\"Random contrast jitter augmentation.\n\n Parameters\n ----------\n contrast : float\n The contrast jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, contrast):\n super(ContrastJitterAug, self).__init__(contrast=contrast)\n self.contrast = contrast\n self.coef = nd.array([[[0.299, 0.587, 0.114]]])\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-self.contrast, self.contrast)\n gray = src * self.coef\n gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)\n src *= alpha\n src += gray\n return src\n\n\nclass SaturationJitterAug(Augmenter):\n \"\"\"Random saturation jitter augmentation.\n\n Parameters\n ----------\n saturation : float\n The saturation jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, saturation):\n super(SaturationJitterAug, self).__init__(saturation=saturation)\n self.saturation = saturation\n self.coef = nd.array([[[0.299, 0.587, 0.114]]])\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-self.saturation, self.saturation)\n gray = src * self.coef\n gray = nd.sum(gray, axis=2, keepdims=True)\n gray *= (1.0 - alpha)\n src *= alpha\n src += gray\n return src\n\n\nclass HueJitterAug(Augmenter):\n \"\"\"Random hue jitter augmentation.\n\n Parameters\n ----------\n hue : float\n The hue jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, hue):\n super(HueJitterAug, self).__init__(hue=hue)\n self.hue = hue\n self.tyiq = np.array([[0.299, 0.587, 0.114],\n [0.596, -0.274, -0.321],\n [0.211, -0.523, 0.311]])\n self.ityiq = np.array([[1.0, 0.956, 0.621],\n [1.0, -0.272, -0.647],\n [1.0, -1.107, 1.705]])\n\n def __call__(self, src):\n \"\"\"Augmenter body.\n Using approximate linear transfomation described in:\n https://beesbuzz.biz/code/hsv_color_transforms.php\n \"\"\"\n alpha = random.uniform(-self.hue, self.hue)\n u = np.cos(alpha * np.pi)\n w = np.sin(alpha * np.pi)\n bt = np.array([[1.0, 0.0, 0.0],\n [0.0, u, -w],\n [0.0, w, u]])\n t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T\n src = nd.dot(src, nd.array(t))\n return src\n\n\nclass ColorJitterAug(RandomOrderAug):\n \"\"\"Apply random brightness, contrast and saturation jitter in random order.\n\n Parameters\n ----------\n brightness : float\n The brightness jitter ratio range, [0, 1]\n contrast : float\n The contrast jitter ratio range, [0, 1]\n saturation : float\n The saturation jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, brightness, contrast, saturation):\n ts = []\n if brightness > 0:\n ts.append(BrightnessJitterAug(brightness))\n if contrast > 0:\n ts.append(ContrastJitterAug(contrast))\n if saturation > 0:\n ts.append(SaturationJitterAug(saturation))\n super(ColorJitterAug, self).__init__(ts)\n\n\nclass LightingAug(Augmenter):\n \"\"\"Add PCA based noise.\n\n Parameters\n ----------\n alphastd : float\n Noise level\n eigval : 3x1 np.array\n Eigen values\n eigvec : 3x3 np.array\n Eigen vectors\n \"\"\"\n def __init__(self, alphastd, eigval, eigvec):\n super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = np.random.normal(0, self.alphastd, size=(3,))\n rgb = np.dot(self.eigvec * alpha, self.eigval)\n src += nd.array(rgb)\n return src\n\n\nclass ColorNormalizeAug(Augmenter):\n \"\"\"Mean and std normalization.\n\n Parameters\n ----------\n mean : NDArray\n RGB mean to be subtracted\n std : NDArray\n RGB standard deviation to be divided\n \"\"\"\n def __init__(self, mean, std):\n super(ColorNormalizeAug, self).__init__(mean=mean, std=std)\n self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)\n self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return color_normalize(src, self.mean, self.std)\n\n\nclass RandomGrayAug(Augmenter):\n \"\"\"Randomly convert to gray image.\n\n Parameters\n ----------\n p : float\n Probability to convert to grayscale\n \"\"\"\n def __init__(self, p):\n super(RandomGrayAug, self).__init__(p=p)\n self.p = p\n self.mat = nd.array([[0.21, 0.21, 0.21],\n [0.72, 0.72, 0.72],\n [0.07, 0.07, 0.07]])\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n if random.random() < self.p:\n src = nd.dot(src, self.mat)\n return src\n\n\nclass HorizontalFlipAug(Augmenter):\n \"\"\"Random horizontal flip.\n\n Parameters\n ----------\n p : float\n Probability to flip image horizontally\n \"\"\"\n def __init__(self, p):\n super(HorizontalFlipAug, self).__init__(p=p)\n self.p = p\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n if random.random() < self.p:\n src = nd.flip(src, axis=1)\n return src\n\n\nclass CastAug(Augmenter):\n \"\"\"Cast to float32\"\"\"\n def __init__(self, typ='float32'):\n super(CastAug, self).__init__(type=typ)\n self.typ = typ\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n src = src.astype(self.typ)\n return src\n\n\ndef CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,\n mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,\n pca_noise=0, rand_gray=0, inter_method=2):\n \"\"\"Creates an augmenter list.\n\n Parameters\n ----------\n data_shape : tuple of int\n Shape for output data\n resize : int\n Resize shorter edge if larger than 0 at the begining\n rand_crop : bool\n Whether to enable random cropping other than center crop\n rand_resize : bool\n Whether to enable random sized cropping, require rand_crop to be enabled\n rand_gray : float\n [0, 1], probability to convert to grayscale for all channels, the number\n of channels will not be reduced to 1\n rand_mirror : bool\n Whether to apply horizontal flip to image with probability 0.5\n mean : np.ndarray or None\n Mean pixel values for [r, g, b]\n std : np.ndarray or None\n Standard deviations for [r, g, b]\n brightness : float\n Brightness jittering range (percent)\n contrast : float\n Contrast jittering range (percent)\n saturation : float\n Saturation jittering range (percent)\n hue : float\n Hue jittering range (percent)\n pca_noise : float\n Pca noise level (percent)\n inter_method : int, default=2(Area-based)\n Interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n\n Examples\n --------\n >>> # An example of creating multiple augmenters\n >>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,\n ... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,\n ... saturation=0.125, pca_noise=0.05, inter_method=10)\n >>> # dump the details\n >>> for aug in augs:\n ... aug.dumps()\n \"\"\"\n auglist = []\n\n if resize > 0:\n auglist.append(ResizeAug(resize, inter_method))\n\n crop_size = (data_shape[2], data_shape[1])\n if rand_resize:\n assert rand_crop\n auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))\n elif rand_crop:\n auglist.append(RandomCropAug(crop_size, inter_method))\n else:\n auglist.append(CenterCropAug(crop_size, inter_method))\n\n if rand_mirror:\n auglist.append(HorizontalFlipAug(0.5))\n\n auglist.append(CastAug())\n\n if brightness or contrast or saturation:\n auglist.append(ColorJitterAug(brightness, contrast, saturation))\n\n if hue:\n auglist.append(HueJitterAug(hue))\n\n if pca_noise > 0:\n eigval = np.array([55.46, 4.794, 1.148])\n eigvec = np.array([[-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203]])\n auglist.append(LightingAug(pca_noise, eigval, eigvec))\n\n if rand_gray > 0:\n auglist.append(RandomGrayAug(rand_gray))\n\n if mean is True:\n mean = nd.array([123.68, 116.28, 103.53])\n elif mean is not None:\n assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]\n\n if std is True:\n std = nd.array([58.395, 57.12, 57.375])\n elif std is not None:\n assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]\n\n if mean is not None or std is not None:\n auglist.append(ColorNormalizeAug(mean, std))\n\n return auglist\n\n\nclass ImageIter(io.DataIter):\n \"\"\"Image data iterator with a large number of augmentation choices.\n This iterator supports reading from both .rec files and raw image files.\n\n To load input images from .rec files, use `path_imgrec` parameter and to load from raw image\n files, use `path_imglist` and `path_root` parameters.\n\n To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.\n\n Parameters\n ----------\n batch_size : int\n Number of examples per batch.\n data_shape : tuple\n Data shape in (channels, height, width) format.\n For now, only RGB image with 3 channels is supported.\n label_width : int, optional\n Number of labels per example. The default label width is 1.\n path_imgrec : str\n Path to image record file (.rec).\n Created with tools/im2rec.py or bin/im2rec.\n path_imglist : str\n Path to image list (.lst).\n Created with tools/im2rec.py or with custom script.\n Format: Tab separated record of index, one or more labels and relative_path_from_root.\n imglist: list\n A list of images with the label(s).\n Each item is a list [imagelabel: float or list of float, imgpath].\n path_root : str\n Root folder of image files.\n path_imgidx : str\n Path to image index file. Needed for partition and shuffling when using .rec source.\n shuffle : bool\n Whether to shuffle all images at the start of each iteration or not.\n Can be slow for HDD.\n part_index : int\n Partition index.\n num_parts : int\n Total number of partitions.\n data_name : str\n Data name for provided symbols.\n label_name : str\n Label name for provided symbols.\n dtype : str\n Label data type. Default: float32. Other options: int32, int64, float64\n last_batch_handle : str, optional\n How to handle the last batch.\n This parameter can be 'pad'(default), 'discard' or 'roll_over'.\n If 'pad', the last batch will be padded with data starting from the begining\n If 'discard', the last batch will be discarded\n If 'roll_over', the remaining elements will be rolled over to the next iteration\n kwargs : ...\n More arguments for creating augmenter. See mx.image.CreateAugmenter.\n \"\"\"\n\n def __init__(self, batch_size, data_shape, label_width=1,\n path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,\n shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,\n data_name='data', label_name='softmax_label', dtype='float32',\n last_batch_handle='pad', **kwargs):\n super(ImageIter, self).__init__()\n assert path_imgrec or path_imglist or (isinstance(imglist, list))\n assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'\n num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)\n logging.info('Using %s threads for decoding...', str(num_threads))\n logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'\n ' larger number to use more threads.')\n class_name = self.__class__.__name__\n if path_imgrec:\n logging.info('%s: loading recordio %s...',\n class_name, path_imgrec)\n if path_imgidx:\n self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type\n self.imgidx = list(self.imgrec.keys)\n else:\n self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type\n self.imgidx = None\n else:\n self.imgrec = None\n\n if path_imglist:\n logging.info('%s: loading image list %s...', class_name, path_imglist)\n with open(path_imglist) as fin:\n imglist = {}\n imgkeys = []\n for line in iter(fin.readline, ''):\n line = line.strip().split('\\t')\n label = nd.array(line[1:-1], dtype=dtype)\n key = int(line[0])\n imglist[key] = (label, line[-1])\n imgkeys.append(key)\n self.imglist = imglist\n elif isinstance(imglist, list):\n logging.info('%s: loading image list...', class_name)\n result = {}\n imgkeys = []\n index = 1\n for img in imglist:\n key = str(index) # pylint: disable=redefined-variable-type\n index += 1\n if len(img) > 2:\n label = nd.array(img[:-1], dtype=dtype)\n elif isinstance(img[0], numeric_types):\n label = nd.array([img[0]], dtype=dtype)\n else:\n label = nd.array(img[0], dtype=dtype)\n result[key] = (label, img[-1])\n imgkeys.append(str(key))\n self.imglist = result\n else:\n self.imglist = None\n self.path_root = path_root\n\n self.check_data_shape(data_shape)\n self.provide_data = [(data_name, (batch_size,) + data_shape)]\n if label_width > 1:\n self.provide_label = [(label_name, (batch_size, label_width))]\n else:\n self.provide_label = [(label_name, (batch_size,))]\n self.batch_size = batch_size\n self.data_shape = data_shape\n self.label_width = label_width\n self.shuffle = shuffle\n if self.imgrec is None:\n self.seq = imgkeys\n elif shuffle or num_parts > 1:\n assert self.imgidx is not None\n self.seq = self.imgidx\n else:\n self.seq = None\n\n if num_parts > 1:\n assert part_index < num_parts\n N = len(self.seq)\n C = N // num_parts\n self.seq = self.seq[part_index * C:(part_index + 1) * C]\n if aug_list is None:\n self.auglist = CreateAugmenter(data_shape, **kwargs)\n else:\n self.auglist = aug_list\n self.cur = 0\n self._allow_read = True\n self.last_batch_handle = last_batch_handle\n self.num_image = len(self.seq) if self.seq is not None else None\n self._cache_data = None\n self._cache_label = None\n self._cache_idx = None\n self.reset()\n\n def reset(self):\n \"\"\"Resets the iterator to the beginning of the data.\"\"\"\n if self.seq is not None and self.shuffle:\n random.shuffle(self.seq)\n if self.last_batch_handle != 'roll_over' or \\\n self._cache_data is None:\n if self.imgrec is not None:\n self.imgrec.reset()\n self.cur = 0\n if self._allow_read is False:\n self._allow_read = True\n\n def hard_reset(self):\n \"\"\"Resets the iterator and ignore roll over data\"\"\"\n if self.seq is not None and self.shuffle:\n random.shuffle(self.seq)\n if self.imgrec is not None:\n self.imgrec.reset()\n self.cur = 0\n self._allow_read = True\n self._cache_data = None\n self._cache_label = None\n self._cache_idx = None\n\n def next_sample(self):\n \"\"\"Helper function for reading in next sample.\"\"\"\n if self._allow_read is False:\n raise StopIteration\n if self.seq is not None:\n if self.cur < self.num_image:\n idx = self.seq[self.cur]\n else:\n if self.last_batch_handle != 'discard':\n self.cur = 0\n raise StopIteration\n self.cur += 1\n if self.imgrec is not None:\n s = self.imgrec.read_idx(idx)\n header, img = recordio.unpack(s)\n if self.imglist is None:\n return header.label, img\n else:\n return self.imglist[idx][0], img\n else:\n label, fname = self.imglist[idx]\n return label, self.read_image(fname)\n else:\n s = self.imgrec.read()\n if s is None:\n if self.last_batch_handle != 'discard':\n self.imgrec.reset()\n raise StopIteration\n header, img = recordio.unpack(s)\n return header.label, img\n\n def _batchify(self, batch_data, batch_label, start=0):\n \"\"\"Helper function for batchifying data\"\"\"\n i = start\n batch_size = self.batch_size\n try:\n while i < batch_size:\n label, s = self.next_sample()\n data = self.imdecode(s)\n try:\n self.check_valid_image(data)\n except RuntimeError as e:\n logging.debug('Invalid image, skipping: %s', str(e))\n continue\n data = self.augmentation_transform(data)\n assert i < batch_size, 'Batch size must be multiples of augmenter output length'\n batch_data[i] = self.postprocess_data(data)\n batch_label[i] = label\n i += 1\n except StopIteration:\n if not i:\n raise StopIteration\n return i\n\n def next(self):\n \"\"\"Returns the next batch of data.\"\"\"\n batch_size = self.batch_size\n c, h, w = self.data_shape\n # if last batch data is rolled over\n if self._cache_data is not None:\n # check both the data and label have values\n assert self._cache_label is not None, \"_cache_label didn't have values\"\n assert self._cache_idx is not None, \"_cache_idx didn't have values\"\n batch_data = self._cache_data\n batch_label = self._cache_label\n i = self._cache_idx\n # clear the cache data\n else:\n batch_data = nd.empty((batch_size, c, h, w))\n batch_label = nd.empty(self.provide_label[0][1])\n i = self._batchify(batch_data, batch_label)\n # calculate the padding\n pad = batch_size - i\n # handle padding for the last batch\n if pad != 0:\n if self.last_batch_handle == 'discard':\n raise StopIteration\n # if the option is 'roll_over', throw StopIteration and cache the data\n elif self.last_batch_handle == 'roll_over' and \\\n self._cache_data is None:\n self._cache_data = batch_data\n self._cache_label = batch_label\n self._cache_idx = i\n raise StopIteration\n else:\n _ = self._batchify(batch_data, batch_label, i)\n if self.last_batch_handle == 'pad':\n self._allow_read = False\n else:\n self._cache_data = None\n self._cache_label = None\n self._cache_idx = None\n return io.DataBatch([batch_data], [batch_label], pad=pad)\n\n def check_data_shape(self, data_shape):\n \"\"\"Checks if the input data shape is valid\"\"\"\n if not len(data_shape) == 3:\n raise ValueError('data_shape should have length 3, with dimensions CxHxW')\n if not data_shape[0] == 3:\n raise ValueError('This iterator expects inputs to have 3 channels.')\n\n def check_valid_image(self, data):\n \"\"\"Checks if the input data is valid\"\"\"\n if len(data[0].shape) == 0:\n raise RuntimeError('Data shape is wrong')\n\n def imdecode(self, s):\n \"\"\"Decodes a string or byte string to an NDArray.\n See mx.img.imdecode for more details.\"\"\"\n def locate():\n \"\"\"Locate the image file/index if decode fails.\"\"\"\n if self.seq is not None:\n idx = self.seq[(self.cur % self.num_image) - 1]\n else:\n idx = (self.cur % self.num_image) - 1\n if self.imglist is not None:\n _, fname = self.imglist[idx]\n msg = \"filename: {}\".format(fname)\n else:\n msg = \"index: {}\".format(idx)\n return \"Broken image \" + msg\n try:\n img = imdecode(s)\n except Exception as e:\n raise RuntimeError(\"{}, {}\".format(locate(), e))\n return img\n\n def read_image(self, fname):\n \"\"\"Reads an input image `fname` and returns the decoded raw bytes.\n Example usage:\n ----------\n >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.\n \"\"\"\n with open(os.path.join(self.path_root, fname), 'rb') as fin:\n img = fin.read()\n return img\n\n def augmentation_transform(self, data):\n \"\"\"Transforms input data with specified augmentation.\"\"\"\n for aug in self.auglist:\n data = aug(data)\n return data\n\n def postprocess_data(self, datum):\n \"\"\"Final postprocessing step before image is loaded into the batch.\"\"\"\n return nd.transpose(datum, axes=(2, 0, 1))\n"
] |
[
[
"numpy.random.normal",
"numpy.array",
"numpy.dot",
"numpy.sin",
"numpy.cos",
"numpy.frombuffer",
"numpy.sqrt"
]
] |
imk1/MethylationQTLCode
|
[
"d193f518994074a29a39e081d470b1c67a95a527"
] |
[
"correlateSNPsMethylationPlusPlus.py"
] |
[
"def makeIntDict(intDictFileName):\n\t# Make a dictionary from pairs of ints in a file\n\tintDictFile = open(intDictFileName)\n\tintDict = {}\n\t\n\tfor line in intDictFile:\n\t\t# Iterate through the lines of the int dictionary file and enter each into the dictionary\n\t\tlineElements = line.strip().split(\"\\t\")\n\t\tintDict[int(lineElements[0])] = int(lineElements[1])\n\t\t\n\tintDictFile.close()\n\treturn intDict\n\t\n\t\ndef sufficientMinor(numFirst, numSecond, vecLen, minReadsCutoffSingleMinor, readsToMinMinorReads):\n\t# Compute if there are enough reads with the minor allele or methylation state\n\tif vecLen >= minReadsCutoffSingleMinor:\n\t\t# Need only 1 minor allele or methylation state for a significant p-value to be possible\n\t\tif (numFirst > 0) and (numSecond > 0):\n\t\t\t# There are enough reads for a significant p-value to be possible\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\n\telse:\n\t\tminMinorReads = readsToMinMinorReads[vecLen]\n\t\tif (numFirst >= minMinorReads) and (numSecond >= minMinorReads):\n\t\t\t# There are enough reads for a significant p-value to be possible\n\t\t\treturn True\n\treturn False\n\n\ndef outputCorrPlusPlus(SNP, methyl, SNPVec, methylVec, minReadsCutoff, minReadsCutoffSingleMinor, SNPMethylCorrsFile):\n\t# Compute and output the correlation if the reads and MAF cutoffs are satisfied\n\tvecLen = len(methylVec)\n\tif vecLen >= minReadsCutoff:\n\t\t# The minimum reads cutoff is satisfied\n\t\tnumRefAlleles = SNPVec.count(0)\n\t\tnumAltAlleles = SNPVec.count(1)\n\t\tif sufficientMinor(numRefAlleles, numAltAlleles, vecLen, minReadsCutoffSingleMinor, readsToMinMinorReads) == True:\n\t\t\t# Both alleles have sufficiently high numbers of reads\n\t\t\tnumMethyl = methylVec.count(1)\n\t\t\tnumUnmethyl = methylVec.count(0)\n\t\t\tif sufficientMinor(numMethyl, numUnmethyl, vecLen, minReadsCutoffSingleMinor, readsToMinMinorReads) == True:\n\t\t\t\t# C is methylated and unmethylated a sufficient fraction of the time\n\t\t\t\t\n\t\t\t\tcorr = scipy.stats.pearsonr(SNPVec, methylVec)[0] # REQUIRES SCIPY 12+ (scipy 8 maybe o.k.)\n\t\t\t\tSNPMethylCorrsFile.write(SNP[0] + \"\\t\" + str(SNP[1]) + \"\\t\" + methyl[0] + \"\\t\" + str(methyl[1]) + \"\\t\" + str(corr) + \"\\n\")\n\n\ndef correlateSNPsMethylationPlusPlus(SNPMethylFileName, SNPMethylCorrsFileName, readsToMinMinorReads):\n\t# Compute the correlation between genotype and methylation\n\t# ASSUMES THAT SNPMethylFile IS SORTED BY METHYLATION CHROM., METHYLATION POSITION, SNP CHROM., SNP POSITION\n\t# SNPMethylCorrsFile will have the following information:\n\t# 1. SNP chromosome\n\t# 2. SNP position in chromosome\n\t# 3. Methylation chromosome\n\t# 4. Methylation position in chromosome\n\t# 5. Correlation between genotype and methylation\n\t# For each SNP, methylation location pair, there will be a line in SNPVecFile and a line in methylVecFile\n\t# These lines will contain the SNP/methylation chromosome, position, and vector used in computing the correlation\n\t# Reference allele will be recorded as 0; alternate allele will be recorded as 1\n\t# Methylated will be recorded as 1; unmethylated will be recorded as 0\n\t# Excludes SNP, methylation pairs that do not have at least the minimum number of reads with the minor allele and methylation status for their number of reads\n\tSNPMethylFile = gzip.open(SNPMethylFileName, 'rb')\n\tSNPMethylCorrsFile = open(SNPMethylCorrsFileName, 'wb')\n\tminReadsCutoff = min(readsToMinMinorReads.keys())\n\tminReadsCutoffSingleMinor = max(readsToMinMinorReads.keys()) + 1\n\tlastSNP = (\"\", 0)\n\tlastMethyl = (\"\", 0)\n\tSNPVec = []\n\tmethylVec = []\n\n\tfor line in SNPMethylFile:\n\t\t# Iterate through the lines of the SNP methylation file and compute the correlation for each SNP, C pair\n\t\tlineElements = line.strip().split(\"\\t\")\n\t\tcurrentSNP = (lineElements[1], int(lineElements[2]))\n\t\tcurrentMethyl = (lineElements[4], int(lineElements[5]))\n\t\tif (currentSNP != lastSNP) or (currentMethyl != lastMethyl):\n\t\t\t# At a new SNP or methylation location, so find the correlation for the previous one\n\t\t\toutputCorrPlusPlus(lastSNP, lastMethyl, SNPVec, methylVec, minReadsCutoff, minReadsCutoffSingleMinor, SNPMethylCorrsFile)\n\t\t\tlastSNP = currentSNP\n\t\t\tlastMethyl = currentMethyl\n\t\t\tSNPVec = []\n\t\t\tmethylVec = []\n\t\t\n\t\tSNPVec.append(int(lineElements[3]))\n\t\tmethylVec.append(int(lineElements[6]))\n\n\toutputCorrPlusPlus(lastSNP, lastMethyl, SNPVec, methylVec, minReadsCutoff, minReadsCutoffSingleMinor, SNPMethylCorrsFile)\n\tSNPMethylFile.close()\n\tSNPMethylCorrsFile.close()\n\n\nif __name__==\"__main__\":\r\n\timport sys\n\timport scipy\n\tfrom scipy import stats\n\timport gzip\r\n\tSNPMethylFileName = sys.argv[1] # Should end with .gz\n\tSNPMethylCorrsFileName = sys.argv[2]\n\treadsToMinMinorReadsFileName = sys.argv[3]\r\n\n\treadsToMinMinorReads = makeIntDict(readsToMinMinorReadsFileName)\n\tcorrelateSNPsMethylationPlusPlus(SNPMethylFileName, SNPMethylCorrsFileName, readsToMinMinorReads)\n"
] |
[
[
"scipy.stats.pearsonr"
]
] |
Guymer/PyGuymer3
|
[
"c2e2788a8b65854fa1e84d6ba5017fb2544fc195"
] |
[
"pyguymer3/media/return_ISO_palette.py"
] |
[
"def return_ISO_palette(fname, kwArgCheck = None, usr_track = -1, errors = \"replace\"):\n # Import standard modules ...\n import html\n import os\n import shutil\n import subprocess\n\n # Import special modules ...\n try:\n import lxml\n import lxml.etree\n except:\n raise Exception(\"\\\"lxml\\\" is not installed; run \\\"pip install --user lxml\\\"\") from None\n try:\n import numpy\n except:\n raise Exception(\"\\\"numpy\\\" is not installed; run \\\"pip install --user numpy\\\"\") from None\n\n # Import sub-functions ...\n from .yuv2rgb import yuv2rgb\n\n # Check keyword arguments ...\n if kwArgCheck is not None:\n print(f\"WARNING: \\\"{__name__}\\\" has been called with an extra positional argument\")\n\n # Check input ...\n if usr_track == -1:\n raise Exception(\"no track was requested\") from None\n\n # Check that \"lsdvd\" is installed ...\n if shutil.which(\"lsdvd\") is None:\n raise Exception(\"\\\"lsdvd\\\" is not installed\") from None\n\n # Find track info ...\n # NOTE: \"lsdvd\" specifies the output encoding in the accompanying XML\n # header, however, this is a lie. By inspection of \"oxml.c\" in the\n # \"lsdvd\" source code it appears that the XML header is hard-coded and\n # that \"lsdvd\" does not perform any checks to make sure that the\n # output is either valid XML or valid UTF-8. Therefore, I must load it\n # as a byte sequence and manually convert it to a UTF-8 string whilst\n # replacing the invalid UTF-8 bytes (and remove the XML header).\n # NOTE: Don't merge standard out and standard error together as the result\n # will probably not be valid XML if standard error is not empty.\n stdout = subprocess.check_output(\n [\n \"lsdvd\",\n \"-x\",\n \"-Ox\",\n fname\n ],\n stderr = open(os.devnull, \"wt\")\n ).decode(\"utf-8\", errors = errors)\n tmp = stdout.index(\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n\")\n stdout = stdout[tmp + len(\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n\"):]\n\n # Fix the file name itself ...\n stdout = stdout.replace(\"<device>{:s}</device>\".format(fname), \"<device>{:s}</device>\".format(html.escape(fname)))\n\n # Fix common errors ...\n stdout = stdout.replace(\"<df>Pan&Scan</df>\", \"<df>Pan&Scan</df>\")\n stdout = stdout.replace(\"<df>P&S + Letter</df>\", \"<df>P&S + Letter</df>\")\n\n # Loop over all tracks ...\n for track in lxml.etree.fromstring(stdout).findall(\"track\"):\n # Skip if this track is not the chosen one ...\n if int(track.find(\"ix\").text) != int(usr_track):\n continue\n\n # Create empty list ...\n vals = []\n\n # Loop over all colours in the palette ...\n for color in track.find(\"palette\").findall(\"color\"):\n # Convert YUV to RGB ...\n yuv = numpy.zeros((1, 1, 3), dtype = numpy.uint8)\n yuv[0, 0, 0] = int(color.text[0:2], 16)\n yuv[0, 0, 1] = int(color.text[2:4], 16)\n yuv[0, 0, 2] = int(color.text[4:6], 16)\n rgb = yuv2rgb(yuv)\n vals.append(format(rgb[0, 0, 0], \"x\").rjust(2, '0') + format(rgb[0, 0, 1], \"x\").rjust(2, '0') + format(rgb[0, 0, 2], \"x\").rjust(2, '0'))\n\n # Return answer ...\n return \",\".join(vals)\n"
] |
[
[
"numpy.zeros"
]
] |
DavidKatz-il/filesystem_spec
|
[
"449faf44c35a501df28ffd723191aeef77021a7e"
] |
[
"fsspec/tests/test_spec.py"
] |
[
"import json\nimport pickle\n\nimport numpy as np\nimport pytest\n\nimport fsspec\nfrom fsspec.implementations.ftp import FTPFileSystem\nfrom fsspec.spec import AbstractFileSystem, AbstractBufferedFile\n\n\nclass DummyTestFS(AbstractFileSystem):\n protocol = \"mock\"\n _fs_contents = (\n {\"name\": \"top_level\", \"type\": \"directory\"},\n {\"name\": \"top_level/second_level\", \"type\": \"directory\"},\n {\"name\": \"top_level/second_level/date=2019-10-01\", \"type\": \"directory\"},\n {\n \"name\": \"top_level/second_level/date=2019-10-01/a.parquet\",\n \"type\": \"file\",\n \"size\": 100,\n },\n {\n \"name\": \"top_level/second_level/date=2019-10-01/b.parquet\",\n \"type\": \"file\",\n \"size\": 100,\n },\n {\"name\": \"top_level/second_level/date=2019-10-02\", \"type\": \"directory\"},\n {\n \"name\": \"top_level/second_level/date=2019-10-02/a.parquet\",\n \"type\": \"file\",\n \"size\": 100,\n },\n {\"name\": \"top_level/second_level/date=2019-10-04\", \"type\": \"directory\"},\n {\n \"name\": \"top_level/second_level/date=2019-10-04/a.parquet\",\n \"type\": \"file\",\n \"size\": 100,\n },\n {\"name\": \"misc\", \"type\": \"directory\"},\n {\"name\": \"misc/foo.txt\", \"type\": \"file\", \"size\": 100},\n {\"name\": \"glob_test/hat/^foo.txt\", \"type\": \"file\", \"size\": 100},\n {\"name\": \"glob_test/dollar/$foo.txt\", \"type\": \"file\", \"size\": 100},\n {\"name\": \"glob_test/lbrace/{foo.txt\", \"type\": \"file\", \"size\": 100},\n {\"name\": \"glob_test/rbrace/}foo.txt\", \"type\": \"file\", \"size\": 100},\n )\n\n def __getitem__(self, name):\n for item in self._fs_contents:\n if item[\"name\"] == name:\n return item\n raise IndexError(\"{name} not found!\".format(name=name))\n\n def ls(self, path, detail=True, **kwargs):\n path = self._strip_protocol(path)\n\n files = {\n file[\"name\"]: file\n for file in self._fs_contents\n if path == self._parent(file[\"name\"])\n }\n\n if detail:\n return [files[name] for name in sorted(files)]\n\n return list(sorted(files))\n\n\n@pytest.mark.parametrize(\n \"test_path, expected\",\n [\n (\n \"mock://top_level/second_level/date=2019-10-01/a.parquet\",\n [\"top_level/second_level/date=2019-10-01/a.parquet\"],\n ),\n (\n \"mock://top_level/second_level/date=2019-10-01/*\",\n [\n \"top_level/second_level/date=2019-10-01/a.parquet\",\n \"top_level/second_level/date=2019-10-01/b.parquet\",\n ],\n ),\n (\"mock://top_level/second_level/date=2019-10\", []),\n (\n \"mock://top_level/second_level/date=2019-10-0[1-4]\",\n [\n \"top_level/second_level/date=2019-10-01\",\n \"top_level/second_level/date=2019-10-02\",\n \"top_level/second_level/date=2019-10-04\",\n ],\n ),\n (\n \"mock://top_level/second_level/date=2019-10-0[1-4]/*\",\n [\n \"top_level/second_level/date=2019-10-01/a.parquet\",\n \"top_level/second_level/date=2019-10-01/b.parquet\",\n \"top_level/second_level/date=2019-10-02/a.parquet\",\n \"top_level/second_level/date=2019-10-04/a.parquet\",\n ],\n ),\n (\n \"mock://top_level/second_level/date=2019-10-0[1-4]/[a].*\",\n [\n \"top_level/second_level/date=2019-10-01/a.parquet\",\n \"top_level/second_level/date=2019-10-02/a.parquet\",\n \"top_level/second_level/date=2019-10-04/a.parquet\",\n ],\n ),\n (\"mock://glob_test/hat/^foo.*\", [\"glob_test/hat/^foo.txt\"]),\n (\"mock://glob_test/dollar/$foo.*\", [\"glob_test/dollar/$foo.txt\"]),\n (\"mock://glob_test/lbrace/{foo.*\", [\"glob_test/lbrace/{foo.txt\"]),\n (\"mock://glob_test/rbrace/}foo.*\", [\"glob_test/rbrace/}foo.txt\"]),\n ],\n)\ndef test_glob(test_path, expected):\n test_fs = DummyTestFS()\n res = test_fs.glob(test_path)\n res = sorted(res) # FIXME: py35 back-compat\n assert res == expected\n res = test_fs.glob(test_path, detail=True)\n assert isinstance(res, dict)\n assert sorted(res) == expected # FIXME: py35 back-compat\n for name, info in res.items():\n assert info == test_fs[name]\n\n\ndef test_find_details():\n test_fs = DummyTestFS()\n filenames = test_fs.find(\"/\")\n details = test_fs.find(\"/\", detail=True)\n for filename in filenames:\n assert details[filename] == test_fs.info(filename)\n\n\ndef test_cache():\n fs = DummyTestFS()\n fs2 = DummyTestFS()\n assert fs is fs2\n\n assert len(fs._cache) == 1\n del fs2\n assert len(fs._cache) == 1\n del fs\n assert len(DummyTestFS._cache) == 1\n\n DummyTestFS.clear_instance_cache()\n assert len(DummyTestFS._cache) == 0\n\n\ndef test_alias():\n with pytest.warns(FutureWarning, match=\"add_aliases\"):\n DummyTestFS(add_aliases=True)\n\n\ndef test_add_docs_warns():\n with pytest.warns(FutureWarning, match=\"add_docs\"):\n AbstractFileSystem(add_docs=True)\n\n\ndef test_cache_options():\n fs = DummyTestFS()\n f = AbstractBufferedFile(fs, \"misc/foo.txt\", cache_type=\"bytes\")\n assert f.cache.trim\n\n # TODO: dummy buffered file\n f = AbstractBufferedFile(\n fs, \"misc/foo.txt\", cache_type=\"bytes\", cache_options=dict(trim=False)\n )\n assert f.cache.trim is False\n\n f = fs.open(\"misc/foo.txt\", cache_type=\"bytes\", cache_options=dict(trim=False))\n assert f.cache.trim is False\n\n\ndef test_trim_kwarg_warns():\n fs = DummyTestFS()\n with pytest.warns(FutureWarning, match=\"cache_options\"):\n AbstractBufferedFile(fs, \"misc/foo.txt\", cache_type=\"bytes\", trim=False)\n\n\ndef test_eq():\n fs = DummyTestFS()\n result = fs == 1\n assert result is False\n\n\ndef test_pickle_multiple():\n a = DummyTestFS(1)\n b = DummyTestFS(2, bar=1)\n\n x = pickle.dumps(a)\n y = pickle.dumps(b)\n\n del a, b\n DummyTestFS.clear_instance_cache()\n\n result = pickle.loads(x)\n assert result.storage_args == (1,)\n assert result.storage_options == {}\n\n result = pickle.loads(y)\n assert result.storage_args == (2,)\n assert result.storage_options == dict(bar=1)\n\n\ndef test_json():\n a = DummyTestFS(1)\n b = DummyTestFS(2, bar=1)\n\n outa = a.to_json()\n outb = b.to_json()\n\n assert json.loads(outb) # is valid JSON\n assert a != b\n assert \"bar\" in outb\n\n assert DummyTestFS.from_json(outa) is a\n assert DummyTestFS.from_json(outb) is b\n\n\n@pytest.mark.parametrize(\n \"dt\",\n [\n np.int8,\n np.int16,\n np.int32,\n np.int64,\n np.uint8,\n np.uint16,\n np.uint32,\n np.uint64,\n np.float32,\n np.float64,\n ],\n)\ndef test_readinto_with_numpy(tmpdir, dt):\n store_path = str(tmpdir / \"test_arr.npy\")\n arr = np.arange(10, dtype=dt)\n arr.tofile(store_path)\n\n arr2 = np.empty_like(arr)\n with fsspec.open(store_path, \"rb\") as f:\n f.readinto(arr2)\n\n assert np.array_equal(arr, arr2)\n\n\n@pytest.mark.parametrize(\n \"dt\",\n [\n np.int8,\n np.int16,\n np.int32,\n np.int64,\n np.uint8,\n np.uint16,\n np.uint32,\n np.uint64,\n np.float32,\n np.float64,\n ],\n)\ndef test_readinto_with_multibyte(ftp_writable, tmpdir, dt):\n host, port, user, pw = ftp_writable\n ftp = FTPFileSystem(host=host, port=port, username=user, password=pw)\n\n with ftp.open(\"/out\", \"wb\") as fp:\n arr = np.arange(10, dtype=dt)\n fp.write(arr.tobytes())\n\n with ftp.open(\"/out\", \"rb\") as fp:\n arr2 = np.empty_like(arr)\n fp.readinto(arr2)\n\n assert np.array_equal(arr, arr2)\n"
] |
[
[
"numpy.arange",
"numpy.array_equal",
"numpy.empty_like"
]
] |
samcw/nanodet
|
[
"dc7c4f6021199d6988221b516d49af392a52d748"
] |
[
"nanodet/model/backbone/ghostnet.py"
] |
[
"\"\"\"\n2020.06.09-Changed for building GhostNet\nHuawei Technologies Co., Ltd. <foss@huawei.com>\nCreates a GhostNet Model as defined in:\nGhostNet: More Features from Cheap Operations By Kai Han, Yunhe Wang,\nQi Tian, Jianyuan Guo, Chunjing Xu, Chang Xu.\nhttps://arxiv.org/abs/1911.11907\nModified from https://github.com/d-li14/mobilenetv3.pytorch\nand https://github.com/rwightman/pytorch-image-models\n\"\"\"\nimport logging\nimport math\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..module.activation import act_layers\n\n\ndef get_url(width_mult=1.0):\n if width_mult == 1.0:\n return \"https://raw.githubusercontent.com/huawei-noah/CV-Backbones/master/ghostnet_pytorch/models/state_dict_73.98.pth\" # noqa E501\n else:\n logging.info(\"GhostNet only has 1.0 pretrain model. \")\n return None\n\n\ndef _make_divisible(v, divisor, min_value=None):\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\ndef hard_sigmoid(x, inplace: bool = False):\n if inplace:\n return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)\n else:\n return F.relu6(x + 3.0) / 6.0\n\n\nclass SqueezeExcite(nn.Module):\n def __init__(\n self,\n in_chs,\n se_ratio=0.25,\n reduced_base_chs=None,\n activation=\"ReLU\",\n gate_fn=hard_sigmoid,\n divisor=4,\n **_\n ):\n super(SqueezeExcite, self).__init__()\n self.gate_fn = gate_fn\n reduced_chs = _make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)\n self.act1 = act_layers(activation)\n self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)\n\n def forward(self, x):\n x_se = self.avg_pool(x)\n x_se = self.conv_reduce(x_se)\n x_se = self.act1(x_se)\n x_se = self.conv_expand(x_se)\n x = x * self.gate_fn(x_se)\n return x\n\n# EA layer\nclass EcaLayer(nn.Module):\n \"\"\"Constructs a ECA module.\n Args:\n channel: Number of channels of the input feature map\n k_size: Adaptive selection of kernel size\n \"\"\"\n def __init__(self, channel, k_size=3):\n super(EcaLayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n # x: input features with shape [b, c, h, w]\n b, c, h, w = x.size()\n\n # feature descriptor on the global spatial information\n y = self.avg_pool(x)\n\n # Two different branches of ECA module\n y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)\n\n # Multi-scale information fusion\n y = self.sigmoid(y)\n\n return x * y.expand_as(x)\n\n\nclass ConvBnAct(nn.Module):\n def __init__(self, in_chs, out_chs, kernel_size, stride=1, activation=\"ReLU\"):\n super(ConvBnAct, self).__init__()\n self.conv = nn.Conv2d(\n in_chs, out_chs, kernel_size, stride, kernel_size // 2, bias=False\n )\n self.bn1 = nn.BatchNorm2d(out_chs)\n self.act1 = act_layers(activation)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn1(x)\n x = self.act1(x)\n return x\n\n\nclass GhostModule(nn.Module):\n def __init__(\n self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, activation=\"ReLU\"\n ):\n super(GhostModule, self).__init__()\n self.oup = oup\n init_channels = math.ceil(oup / ratio)\n new_channels = init_channels * (ratio - 1)\n\n self.primary_conv = nn.Sequential(\n nn.Conv2d(\n inp, init_channels, kernel_size, stride, kernel_size // 2, bias=False\n ),\n nn.BatchNorm2d(init_channels),\n act_layers(activation) if activation else nn.Sequential(),\n )\n\n self.cheap_operation = nn.Sequential(\n nn.Conv2d(\n init_channels,\n new_channels,\n dw_size,\n 1,\n dw_size // 2,\n groups=init_channels,\n bias=False,\n ),\n nn.BatchNorm2d(new_channels),\n act_layers(activation) if activation else nn.Sequential(),\n )\n\n def forward(self, x):\n x1 = self.primary_conv(x)\n x2 = self.cheap_operation(x1)\n out = torch.cat([x1, x2], dim=1)\n return out\n\n\nclass GhostBottleneck(nn.Module):\n \"\"\"Ghost bottleneck w/ optional SE\"\"\"\n\n def __init__(\n self,\n in_chs,\n mid_chs,\n out_chs,\n dw_kernel_size=3,\n stride=1,\n activation=\"ReLU\",\n se_ratio=0.0,\n ):\n super(GhostBottleneck, self).__init__()\n has_se = se_ratio is not None and se_ratio > 0.0\n self.stride = stride\n\n # Point-wise expansion\n self.ghost1 = GhostModule(in_chs, mid_chs, activation=activation)\n\n # Depth-wise convolution\n if self.stride > 1:\n self.conv_dw = nn.Conv2d(\n mid_chs,\n mid_chs,\n dw_kernel_size,\n stride=stride,\n padding=(dw_kernel_size - 1) // 2,\n groups=mid_chs,\n bias=False,\n )\n self.bn_dw = nn.BatchNorm2d(mid_chs)\n\n # Squeeze-and-excitation\n if has_se:\n self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio)\n else:\n self.se = None\n\n # Point-wise linear projection\n self.ghost2 = GhostModule(mid_chs, out_chs, activation=None)\n\n # shortcut\n if in_chs == out_chs and self.stride == 1:\n self.shortcut = nn.Sequential()\n else:\n self.shortcut = nn.Sequential(\n nn.Conv2d(\n in_chs,\n in_chs,\n dw_kernel_size,\n stride=stride,\n padding=(dw_kernel_size - 1) // 2,\n groups=in_chs,\n bias=False,\n ),\n nn.BatchNorm2d(in_chs),\n nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(out_chs),\n )\n\n def forward(self, x):\n residual = x\n\n # 1st ghost bottleneck\n x = self.ghost1(x)\n\n # Depth-wise convolution\n if self.stride > 1:\n x = self.conv_dw(x)\n x = self.bn_dw(x)\n\n # Squeeze-and-excitation\n\n # 2nd ghost bottleneck\n x = self.ghost2(x)\n\n x += self.shortcut(residual)\n return x\n\nclass GhostBottleneckSandGlass(nn.Module):\n \"\"\" Ghost bottleneck w/ optional SE\"\"\"\n\n def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3,\n stride=1, activation=\"ReLU\", se_ratio=0.0):\n super(GhostBottleneckSandGlass, self).__init__()\n self.stride = stride\n\n # Depth-wise for more space detail\n self.dw1 = nn.Sequential(\n nn.Conv2d(in_chs, in_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size - 1) // 2, groups=in_chs, bias=False),\n nn.BatchNorm2d(in_chs),\n nn.ReLU6()\n )\n\n # Point-wise expansion\n self.ghost1 = GhostModule(in_chs, mid_chs)\n\n # Eca-Layer\n self.eca = EcaLayer(out_chs)\n\n # Point-wise linear projection\n self.ghost2 = GhostModule(mid_chs, out_chs)\n\n # Depth-wise for more space detail\n self.dw2 = nn.Sequential(\n nn.Conv2d(out_chs, out_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size - 1) // 2, groups=out_chs, bias=False),\n nn.BatchNorm2d(out_chs)\n )\n\n # shortcut\n if in_chs == out_chs and self.stride == 1:\n self.shortcut = nn.Sequential()\n else:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_chs, in_chs, dw_kernel_size, stride=stride,\n padding=(dw_kernel_size - 1) // 2, groups=in_chs, bias=False),\n nn.BatchNorm2d(in_chs),\n nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(out_chs),\n )\n\n def forward(self, x):\n residual = x\n\n x = self.dw1(x)\n\n # 1st ghost bottleneck\n x = self.ghost1(x)\n\n # 2nd ghost bottleneck\n x = self.ghost2(x)\n\n x = self.dw2(x)\n\n # eca\n x = self.eca(x)\n\n x += self.shortcut(residual)\n return x\n\n\nclass GhostNet(nn.Module):\n def __init__(\n self,\n width_mult=1.0,\n out_stages=(4, 6, 9),\n activation=\"ReLU\",\n pretrain=True,\n act=None,\n ):\n super(GhostNet, self).__init__()\n assert set(out_stages).issubset(i for i in range(10))\n self.width_mult = width_mult\n self.out_stages = out_stages\n # setting of inverted residual blocks\n self.cfgs = [\n # k, t, c, SE, s\n # stage1\n [[3, 16, 16, 0, 1]], # 0\n # stage2\n [[3, 48, 24, 0, 2]], # 1\n [[3, 72, 24, 0, 1]], # 2 1/4\n # stage3\n [[5, 72, 40, 0.25, 2]], # 3\n [[5, 120, 40, 0.25, 1]], # 4 1/8\n # stage4\n [[3, 240, 80, 0, 2]], # 5\n [\n [3, 200, 80, 0, 1],\n [3, 184, 80, 0, 1],\n [3, 184, 80, 0, 1],\n [3, 480, 112, 0.25, 1],\n [3, 672, 112, 0.25, 1],\n ], # 6 1/16\n # stage5\n [[5, 672, 160, 0.25, 2]], # 7\n [\n [5, 960, 160, 0, 1],\n [5, 960, 160, 0.25, 1],\n [5, 960, 160, 0, 1],\n [5, 960, 160, 0.25, 1],\n ], # 8\n ]\n # ------conv+bn+act----------# 9 1/32\n\n self.activation = activation\n if act is not None:\n warnings.warn(\n \"Warning! act argument has been deprecated, \" \"use activation instead!\"\n )\n self.activation = act\n\n # building first layer\n output_channel = _make_divisible(16 * width_mult, 4)\n self.conv_stem = nn.Conv2d(3, output_channel, 3, 2, 1, bias=False)\n self.bn1 = nn.BatchNorm2d(output_channel)\n self.act1 = act_layers(self.activation)\n input_channel = output_channel\n\n # building inverted residual blocks\n stages = []\n block = GhostBottleneck\n # block = GhostBottleneckSandGlass\n for cfg in self.cfgs:\n layers = []\n for k, exp_size, c, se_ratio, s in cfg:\n output_channel = _make_divisible(c * width_mult, 4)\n hidden_channel = _make_divisible(exp_size * width_mult, 4)\n layers.append(\n block(\n input_channel,\n hidden_channel,\n output_channel,\n k,\n s,\n activation=self.activation,\n se_ratio=se_ratio,\n )\n )\n input_channel = output_channel\n stages.append(nn.Sequential(*layers))\n\n output_channel = _make_divisible(exp_size * width_mult, 4)\n stages.append(\n nn.Sequential(\n ConvBnAct(input_channel, output_channel, 1, activation=self.activation)\n )\n ) # 9\n\n self.blocks = nn.Sequential(*stages)\n\n self._initialize_weights(pretrain)\n\n def forward(self, x):\n x = self.conv_stem(x)\n x = self.bn1(x)\n x = self.act1(x)\n output = []\n for i in range(10):\n x = self.blocks[i](x)\n if i in self.out_stages:\n output.append(x)\n return tuple(output)\n\n def _initialize_weights(self, pretrain=True):\n print(\"init weights...\")\n for name, m in self.named_modules():\n if isinstance(m, nn.Conv2d):\n if \"conv_stem\" in name:\n nn.init.normal_(m.weight, 0, 0.01)\n else:\n nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.0001)\n nn.init.constant_(m.running_mean, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.0001)\n nn.init.constant_(m.running_mean, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n if pretrain:\n url = get_url(self.width_mult)\n if url is not None:\n state_dict = torch.hub.load_state_dict_from_url(url, progress=True)\n self.load_state_dict(state_dict, strict=False)\n"
] |
[
[
"torch.cat",
"torch.nn.functional.relu6",
"torch.nn.Sigmoid",
"torch.nn.Conv1d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.init.constant_",
"torch.nn.ReLU6",
"torch.nn.Conv2d",
"torch.nn.init.normal_",
"torch.hub.load_state_dict_from_url",
"torch.nn.AdaptiveAvgPool2d"
]
] |
wangck20/GeDML
|
[
"1f76ac2094d7b88be7fd4eb6145e5586e547b9ca"
] |
[
"src/gedml/launcher/managers/base_manager.py"
] |
[
"import torch\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nimport torch.distributed as dist \nimport logging\nimport pandas as pd \nimport traceback\n\nfrom ...core import models\nfrom ..misc import utils\n\nclass BaseManager:\n \"\"\"\n Manager all modules and computation devices. Support three kinds of computation:\n\n 1. DataParallel (single machine)\n 2. DistributedDataParallel (single machine)\n 3. DistributedDataParallel (multi machines)\n \"\"\"\n def __init__(\n self,\n trainer,\n tester,\n recorder,\n objects_dict,\n device=None,\n schedulers=None,\n gradclipper=None,\n samplers=None,\n collatefns=None,\n is_resume=False,\n is_distributed=False,\n device_wrapper_type=\"DP\",\n dist_port=23456,\n world_size=None,\n phase=\"train\",\n primary_metric=[\"test\", \"recall_at_1\"],\n to_device_list=[\"models\", \"collectors\"],\n to_wrap_list=[\"models\"],\n patience=10,\n ):\n self.trainer = trainer\n self.tester = tester\n self.recorder = recorder\n self.objects_dict = objects_dict\n self.device = device\n self.schedulers = schedulers\n self.gradclipper = gradclipper\n self.samplers = samplers\n self.collatefns = collatefns\n self.epochs = 0\n self.is_resume = is_resume\n self.is_distributed = is_distributed\n self.device_wrapper_type = device_wrapper_type\n self.dist_port = dist_port\n self.world_size = world_size\n self.phase = phase\n self.primary_metric = primary_metric\n self.to_device_list = to_device_list\n self.to_wrap_list = to_wrap_list\n self.patience = patience\n\n self.best_metric = -1\n self.patience_counts = 0\n self.is_best = False\n\n self.assert_phase()\n self.assert_device()\n self.assert_required_member()\n self.assert_resume_folder_exist()\n\n self.initiate_objects_dict()\n self.initiate_members()\n \n @property\n def _required_member(self):\n return [\n \"metrics\",\n \"collectors\",\n \"selectors\",\n \"models\",\n \"losses\",\n \"evaluators\",\n \"optimizers\",\n \"transforms\",\n \"datasets\",\n ]\n \n def assert_phase(self):\n assert self.phase in [\"train\", \"evaluate\"]\n\n def assert_device(self):\n assert self.device_wrapper_type in [\"DP\", \"DDP\"]\n if self.is_distributed:\n assert self.device_wrapper_type == \"DDP\"\n\n def assert_required_member(self):\n object_dict_keys = list(self.objects_dict.keys())\n assert all(\n [item in object_dict_keys\n for item in self._required_member]\n )\n \n def assert_resume_folder_exist(self):\n if self.is_resume:\n assert not self.recorder.delete_old_folder\n\n def initiate_objects_dict(self):\n for k, v in self.objects_dict.items():\n setattr(self, k, v)\n del self.objects_dict\n \n def initiate_members(self):\n self.initiate_device()\n self.initiate_models()\n self.initiate_collectors()\n self.initiate_selectors()\n self.initiate_losses()\n self.initiate_schedulers()\n # for distributed training\n if self.is_distributed:\n self.initiate_distributed_trainers()\n self.initiate_distributed_testers()\n self.initiate_addition_items()\n \n def initiate_addition_items(self):\n pass\n\n def initiate_device(self):\n if self.device_wrapper_type == \"DDP\" and not self.is_distributed:\n torch.distributed.init_process_group(\n backend='nccl',\n init_method='tcp://localhost:{}'.format(self.dist_port),\n rank=0,\n world_size=1\n )\n \n if self.is_distributed:\n self.world_size = (\n dist.get_world_size()\n if self.world_size is None\n else self.world_size\n )\n\n self.main_device_id, self.device_ids = None, None\n self.multi_gpu = False\n if self.device is None:\n self.main_device_id = 0\n self.device_ids = [0]\n elif isinstance(self.device, int):\n self.main_device_id = self.device\n self.device_ids = [self.device]\n elif isinstance(self.device, list):\n self.main_device_id = self.device[0]\n self.device_ids = self.device\n self.multi_gpu = (\n True if len(self.device_ids) > 1\n else False\n )\n else:\n raise TypeError(\n \"Device type error!\"\n )\n # initiate self.device\n self.device = torch.device(\n \"cuda:{}\".format(self.main_device_id)\n if torch.cuda.is_available()\n else \"cpu\"\n )\n \n def initiate_models(self):\n # to device\n is_to_device = \"models\" in self.to_device_list\n is_to_wrap = \"models\" in self.to_wrap_list\n if is_to_device:\n self._members_to_device(\"models\", to_warp=is_to_wrap)\n \n def initiate_collectors(self):\n # to device\n is_to_device = \"collectors\" in self.to_device_list\n is_to_wrap = \"collectors\" in self.to_wrap_list\n if is_to_device:\n self._members_to_device(\"collectors\", to_warp=is_to_wrap)\n \n def initiate_selectors(self):\n # to device\n is_to_device = \"selectors\" in self.to_device_list\n is_to_wrap = \"selectors\" in self.to_wrap_list\n if is_to_device:\n self._members_to_device(\"selectors\", to_warp=is_to_wrap)\n\n def initiate_losses(self):\n # to device\n is_to_device = \"losses\" in self.to_device_list\n is_to_wrap = \"losses\" in self.to_wrap_list\n if is_to_device:\n self._members_to_device(\"losses\", to_warp=is_to_wrap)\n \n def initiate_distributed_trainers(self):\n total_batch_size = self.trainer.batch_size\n assert (total_batch_size % self.world_size) == 0\n sub_batch_size = int(total_batch_size // self.world_size)\n self.trainer.set_distributed(True)\n self.trainer.set_batch_size(sub_batch_size)\n \n def initiate_distributed_testers(self):\n self.tester.set_distributed(True)\n \n def initiate_schedulers(self):\n if self.schedulers is None:\n self.schedulers = {}\n \n def _members_to_device(self, module_name: str, to_warp=True):\n members = getattr(self, module_name)\n # to device\n if not self.is_distributed:\n # single-device\n if self.multi_gpu:\n for k, v in members.items():\n members[k] = members[k].to(self.device)\n if to_warp:\n if self.device_wrapper_type == \"DP\":\n members[k] = torch.nn.DataParallel(\n v,\n device_ids=self.device_ids\n )\n else:\n try:\n members[k] = DDP(\n v,\n device_ids=self.device_ids,\n find_unused_parameters=True\n )\n except:\n trace = traceback.format_exc()\n logging.warning(\"{}\".format(trace))\n else:\n for k, v in members.items():\n members[k] = v.to(self.device)\n else:\n # multi-device\n for k, v in members.items():\n members[k] = members[k].to(self.device)\n try:\n members[k] = DDP(\n members[k], \n device_ids=self.device_ids,\n find_unused_parameters=True\n )\n except:\n trace = traceback.format_exc()\n logging.warning(\"{}\".format(trace))\n \n \"\"\"\n Run\n \"\"\"\n \n def run(self, phase=\"train\", start_epoch=0, total_epochs=61, is_test=True, is_save=True, interval=1, warm_up=2, warm_up_list=None):\n self.phase = phase\n self.assert_phase()\n self.prepare()\n self.maybe_resume()\n if self.phase == \"train\":\n for i in range(start_epoch, total_epochs):\n self.epochs = i\n if i < warm_up:\n logging.info(\"Warm up with {}\".format(warm_up_list))\n self.trainer.set_activated_optims(warm_up_list)\n else:\n self.trainer.set_activated_optims()\n self.train(epochs=self.epochs)\n self.release_memory()\n if is_test:\n if (i % interval) == 0:\n self.test()\n self.display_metrics()\n self.save_metrics()\n self.release_memory()\n if is_save:\n self.save_models()\n # early stop\n if self.patience_counts >= self.patience:\n logging.info(\"Training terminated!\")\n break\n elif self.phase == \"evaluate\":\n self.test()\n self.display_metrics()\n \n def prepare(self):\n # prepare trainer\n utils.func_params_mediator(\n [self],\n self.trainer.prepare\n )\n # prepare tester\n utils.func_params_mediator(\n [\n {\"recorders\": self.recorder},\n self,\n ],\n self.tester.prepare\n )\n \n def maybe_resume(self):\n if self.is_resume:\n logging.info(\"Resume objects...\")\n self.recorder.load_models(\n obj=self.trainer,\n device=self.device\n )\n \n def meta_test(self):\n self.epochs = -1\n self.test()\n self.save_metrics()\n self.display_metrics()\n \n def save_metrics(self):\n for k, v in self.metrics.items():\n data, _ = self.recorder.get_data({k:v})\n self.recorder.update(data, self.epochs)\n \n def display_metrics(self):\n # best metric check\n cur_metric = self.metrics[self.primary_metric[0]][self.primary_metric[1]]\n if cur_metric > self.best_metric:\n self.best_metric = cur_metric\n self.is_best = True\n logging.info(\"NEW BEST METRIC!!!\")\n self.patience_counts = 0\n else:\n self.is_best = False\n self.patience_counts += 1\n self.metrics[self.primary_metric[0]][\"BEST_\" + self.primary_metric[1]] = self.best_metric\n\n # display\n for k, v in self.metrics.items():\n logging.info(\"{} Metrics ---\".format(k.upper()))\n print(pd.DataFrame([v]))\n \n def save_models(self):\n self.recorder.save_models(self.trainer, step=self.epochs, best=self.is_best)\n \n def train(self, epochs=None):\n self.trainer.train(epochs=epochs)\n \n def test(self):\n self.metrics = self.tester.test()\n \n def release_memory(self):\n torch.cuda.empty_cache()\n"
] |
[
[
"torch.distributed.get_world_size",
"pandas.DataFrame",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.nn.DataParallel"
]
] |
RuiNian7319/Woodberry_Distillation
|
[
"4ee8ab9de8e313bca48d9a7af9393abcad85ece4"
] |
[
"Benchmark_Plots/Optimal_input_plots/plotting.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Load Data\ntransfer_function = np.loadtxt('transfer_function.csv', delimiter=',')\nmatlab_state_space = np.loadtxt('state_space.csv', delimiter=',')\npython_state_space = np.loadtxt('python_state_space.csv')\n\n\"\"\"\nMath Plotting Library settings\n\"\"\"\n\nfonts = {\"family\": \"serif\",\n \"weight\": \"normal\",\n \"size\": \"12\"}\n\nplt.rc('font', **fonts)\nplt.rc('text', usetex=True)\n\n# Distillate Trajectory\n\nplt.rcParams[\"figure.figsize\"] = (10, 5)\n\nplt.subplot(1, 2, 1)\nplt.title(r\"$X_D$ Trajectory\")\nplt.ylabel(r\"\\%MeOH in Distillate, \\textit{$X_D$} (\\%)\")\nplt.xlabel(r\"Time, \\textit{T} (steps)\")\n\nplt.plot(transfer_function[:, 0] * 100, label='Transfer Function')\nplt.plot(matlab_state_space[:, 0] * 100, label='State Space (MATLAB)')\nplt.plot(python_state_space[6:, 0] * 100, label='State Space (Python)')\n\nplt.xlim([0, 150])\n\nplt.legend(loc=4, prop={'size': 10}, frameon=False)\n\n# Bottoms Trajectory\n\nplt.subplot(1, 2, 2)\nplt.title(r\"$X_B$ Trajectory\")\nplt.ylabel(r\"\\%MeOH in Bottoms, \\textit{$X_B$} (\\%)\")\nplt.xlabel(r\"Time, \\textit{T} (steps)\")\n\nplt.plot(transfer_function[:, 1] * 100, label='Transfer Function')\nplt.plot(matlab_state_space[:, 1] * 100, label='State Space (MATLAB)')\nplt.plot(python_state_space[6:, 1] * 100, label='State Space (Python)')\n\nplt.xlim([0, 150])\n\nplt.legend(loc=4, prop={'size': 10}, frameon=False)\n\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.rc",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot"
]
] |
Tobi-Alonso/ResNet50-PYNQ
|
[
"7c203c2b249479c5384afe152dde2bb06576339b"
] |
[
"host/synth_bench_power.py"
] |
[
"# Copyright (c) 2019, Xilinx\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport pynq\nfrom pynq import pmbus\nfrom pynq import Device\n\nimport numpy as np\nimport pandas as pd\nimport time\nimport argparse\n\n# Set up data acquisition using PYNQ's PMBus API\ndef setup_power_recording():\n rails = pmbus.get_xrt_sysfs_rails()\n\n #We create a recorder monitoring the three rails that have power measurement on Alveo. \n #Total board power is obtained by summing together the PCI Express and Auxilliary 12V rails. \n #While some current is also drawn over the PCIe 5V rail this is negligible compared to the 12V rails and isn't recorded. \n #We also measure the VCC_INT power which is the primary supply to the FPGA.\n recorder = pmbus.DataRecorder(rails[\"12v_aux\"].power,\n rails[\"12v_pex\"].power,\n rails[\"vccint\"].power)\n\n return recorder\n\n\n# ## Synthetic Throughput Test\n# We execute inference of a configurable-size batch of images, without data movement. We measure the latency, throughput, and power\ndef benchmark_synthetic(bs, nreps):\n ibuf = pynq.allocate((bs,3,224,224), dtype=np.int8, target=ol.bank0)\n obuf = pynq.allocate((bs,5), dtype=np.uint32, target=ol.bank0)\n\n # Start power monitoring\n pwr_rec = setup_power_recording()\n pwr_rec.record(0.1)\n\n total_duration = time.monotonic()\n for i in range(nreps):\n accelerator.call(ibuf, obuf, fcbuf, bs)\n total_duration = time.monotonic() - total_duration\n\n # Stop the power monitoring\n pwr_rec.stop()\n\n latency = total_duration/nreps\n fps = int((nreps/total_duration)*bs)\n\n # Aggregate board/fpga power into a Pandas dataframe\n f = pwr_rec.frame\n powers = pd.DataFrame(index=f.index)\n powers['board_power'] = f['12v_aux_power'] + f['12v_pex_power']\n powers['fpga_power'] = f['vccint_power']\n\n return fps, latency, powers\n\nif __name__== \"__main__\":\n\n parser = argparse.ArgumentParser(description='ResNet50 inference with FINN and PYNQ on Alveo')\n parser.add_argument('--xclbin', type=str, default='resnet50.xclbin', help='Accelerator image file (xclbin)')\n parser.add_argument('--fcweights', type=str, default='fcweights.csv', help='FC weights file (CSV)')\n parser.add_argument('--shell', type=str, default='xilinx_u250_xdma_201830_2', help='Name of compatible shell')\n parser.add_argument('--bs', type=int, default=1, help='Batch size (images processed per accelerator invocation)')\n parser.add_argument('--reps',type=int, default=100, help='Number of batches to run')\n args = parser.parse_args()\n\n # discover a compatible shell if there are multiple\n devices = Device.devices\n if len(devices) > 1:\n for i in range(len(devices)):\n print(\"{}) {}\".format(i, devices[i].name))\n if devices[i].name == args.shell:\n print(\"Compatible shell found, using device\",i)\n Device.active_device = devices[i]\n break\n\n ol=pynq.Overlay(args.xclbin)\n accelerator=ol.resnet50_1\n\n #allocate a buffer for FC weights, targeting the Alveo DDR Bank 0\n fcbuf = pynq.allocate((1000,2048), dtype=np.int8, target=ol.bank0)\n\n # Load the weight from a CSV file and push them to the accelerator buffer:\n fcweights = np.genfromtxt(args.fcweights, delimiter=',', dtype=np.int8)\n #csv reader erroneously adds one extra element to the end, so remove, then reshape\n fcweights = fcweights[:-1].reshape(1000,2048)\n fcbuf[:] = fcweights\n\n #Move the data to the Alveo DDR\n fcbuf.sync_to_device()\n\n fps, latency, power = benchmark_synthetic(args.bs,args.reps)\n\n print(\"Throughput:\",fps,\"FPS\")\n print(\"Latency:\",round(latency*1000,2),\"ms\")\n print(\"FPGA Power:\",round(power.mean()['fpga_power'],2),\"Watts\")\n print(\"Board Power:\",round(power.mean()['board_power'],2),\"Watts\")\n\n"
] |
[
[
"pandas.DataFrame",
"numpy.genfromtxt"
]
] |
BehaviorPredictionTestingPlatform/VerifAI
|
[
"db05f3573c2e7d98c03029c1b4efca93e6b08edb"
] |
[
"examples/openai_gym/cartpole/cartpole_simulation.py"
] |
[
"from verifai.simulators.openai_gym.baselines_task import *\nfrom verifai.simulators.openai_gym.client_gym import *\nfrom dotmap import DotMap\nimport numpy as np\n\n# 0 for control only, 1 for training only and >=2 for both\nsample_type = 0\nclass cartpole_standing(control_task):\n def __init__(self, baselines_params=None):\n if baselines_params is None:\n baselines_params = DotMap()\n baselines_params.alg = 'ppo2'\n baselines_params.env_id = 'CartPole-v1'\n baseline_params.num_timesteps = 1e5\n else:\n if 'env_id' not in baseline_params or baseline_params.env_id !='CartPole-v1':\n baseline_params.env_id = 'CartPole-v1'\n if 'alg' not in baseline_params:\n baseline_params.alg = 'ppo2'\n super().__init__(baselines_params=baselines_params)\n if sample_type >= 1:\n self.run_task = self.run_task_retrain\n self.algs = ['ppo2', 'deepq', 'acer', 'a2c', 'trpo_mpi', 'acktr']\n\n\n def use_sample(self, sample):\n if sample_type == 0 or sample_type >=2:\n init_condition = sample.init_conditions\n x_init, theta_init, length, masscart,masspole = init_condition.x_init[0], \\\n init_condition.theta_init[0], \\\n init_condition.length[0], \\\n init_condition.masscart[0], \\\n init_condition.masspole[0]\n self.init_state = np.array([x_init, 0.0, theta_init, 0.0])\n self.env.env.length = length\n self.env.env.masscart = masscart\n self.env.env.masspole = masspole\n if sample_type >=1:\n training_condition = sample.training_conditions\n num_timesteps, alg = int(training_condition.num_timesteps[0]), \\\n training_condition.alg\n self.num_timesteps = num_timesteps\n self.alg = self.algs[alg]\n self.train()\n if self.model is None:\n self.train()\n\n def trajectory_definition(self):\n traj = np.array(self.trajectory)\n traj_x, _, traj_theta, _ = traj.T\n xinrange = [(j*self.env.env.tau, self.env.env.x_threshold + 0.1 - np.abs(v))\n for j,v in enumerate(traj_x)]\n thetainrange = [(j*self.env.env.tau, self.env.env.theta_threshold_radians + 0.01 - np.abs(v))\n for j, v in enumerate(traj_theta)]\n sim_results= {'xinrange':xinrange, 'thetainrange':thetainrange}\n return sim_results\n\n\nPORT = 8888\nBUFSIZE = 4096\nN_SIM_STEPS = 500\nsimulation_data = DotMap()\nsimulation_data.port = PORT\nsimulation_data.bufsize = BUFSIZE\nbaseline_params = DotMap()\nbaseline_params.num_timesteps = 1e5\nsimulation_data.task = cartpole_standing(baselines_params=baseline_params)\n\nclient_task = ClientGym(simulation_data)\nwhile True:\n if not client_task.run_client():\n print(\"End of all cartpole simulations\")\n break\n"
] |
[
[
"numpy.array",
"numpy.abs"
]
] |
gregtucker/facetmorphology
|
[
"0bad54943b825f2742ebc3ee4e82a7d2f47ed2d7"
] |
[
"ModelRunScripts/SensitivityAnalysisDandV/run_v_w.py"
] |
[
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 5 09:10:56 2018\n\n@author: gtucker\n\"\"\"\n\nimport numpy as np\nimport datetime\nfrom grainhill import GrainFacetSimulator\nfrom grainhill import SlopeMeasurer\nimport landlab\nfrom landlab.io.native_landlab import save_grid\nimport os\n\n\ndef create_folder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Error: Creating directory ' + directory)\n\n\nparams = {\n 'grid_size' : (111, 81),\n 'report_interval' : 5.0, \n 'output_interval' : 1.0e99, \n 'disturbance_rate' : 1.0e-4,\n 'weathering_rate' : 0.0,\n 'dissolution_rate': 0.0,\n 'friction_coef' : 1.0,\n 'fault_x' : -0.01, \n 'cell_width' : 0.5, \n 'grav_accel' : 9.8,\n }\n\n\n# Open a file to record output:\nd = datetime.datetime.today()\ntoday_str = str(d.year) + str(d.month).zfill(2) + str(d.day).zfill(2)\nresults_file = open('results_v_vs_w' + today_str + '.csv', 'w')\nresults_file.write('Landlab version,' + landlab.__version__ + ',\\n')\n\n\n# Print header in file\nresults_file.write('Uplift interval (yr),Weathering rate '\n + 'parameter (1/yr),Gradient (m/m),'\n + 'Slope angle (deg)\\n')\n\n\n# Sweep through a range of dissolution rate parameters\nfor uplift_interval_exp in np.arange(2, 5.2, 0.2):\n for weath_exp in np.arange(-5, -1.8, 0.2):\n\n weath_rate = 10.0**weath_exp\n uplift_interval = 10.0**uplift_interval_exp\n params['uplift_interval'] = uplift_interval\n params['weathering_rate'] = weath_rate\n\n # Set run duration long enough for uplift of 150 rows\n params['run_duration'] = 100 * uplift_interval\n params['plot_interval'] = 10 * uplift_interval\n\n print('Uplift interval: ' + str(params['uplift_interval']) + ' 1/y')\n print('Weathering rate: ' + str(params['weathering_rate']) + ' 1/y')\n\n opname = ('tau' + str(int(round(10 * uplift_interval_exp))) + 'w' + str(int(round(10 * weath_exp))))\n create_folder(opname)\n params['plot_file_name'] = opname + '/' + opname\n\n gfs = GrainFacetSimulator(**params)\n gfs.run()\n\n sm = SlopeMeasurer(gfs)\n sm.pick_rock_surface()\n (m, b) = sm.fit_straight_line_to_surface()\n angle = np.degrees(np.arctan(m))\n\n results_file.write(str(uplift_interval) + ',' + str(weath_rate) + ','\n + str(m) + ',' + str(angle) + '\\n')\n results_file.flush()\n\n save_grid(gfs.grid, opname + '/' + opname + '.grid', clobber=True)\n\nresults_file.close()\n"
] |
[
[
"numpy.arctan",
"numpy.arange"
]
] |
JiaxiangBU/link-prediction
|
[
"8fd569dae07cc4fc2972e2fb97cce0fb00875111"
] |
[
"gae/initializations.py"
] |
[
"import tensorflow as tf\nimport numpy as np\n\ndef weight_variable_glorot(input_dim, output_dim, dtype=tf.float32, name=\"\"):\n \"\"\"Create a weight variable with Glorot & Bengio (AISTATS 2010)\n initialization.\n \"\"\"\n init_range = np.sqrt(6.0 / (input_dim + output_dim))\n initial = tf.random_uniform([input_dim, output_dim], minval=-init_range,\n maxval=init_range, dtype=dtype)\n return tf.Variable(initial, name=name)\n"
] |
[
[
"tensorflow.random_uniform",
"tensorflow.Variable",
"numpy.sqrt"
]
] |
decisionforce/pgdrive
|
[
"19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee",
"19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee"
] |
[
"pgdrive/tests/test_env/local_test_pgdrive_rgb_depth.py",
"pgdrive/policy/idm_policy.py"
] |
[
"from pgdrive.envs.pgdrive_env import PGDriveEnv\nfrom pgdrive.constants import TerminationState\nimport numpy as np\n\ninfo_keys = [\n \"cost\", \"velocity\", \"steering\", \"acceleration\", \"step_reward\", TerminationState.CRASH_VEHICLE,\n TerminationState.OUT_OF_ROAD, TerminationState.SUCCESS\n]\n\n\ndef _act(env, action):\n assert env.action_space.contains(action)\n obs, reward, done, info = env.step(action)\n assert env.observation_space.contains(obs)\n assert np.isscalar(reward)\n assert isinstance(info, dict)\n for k in info_keys:\n assert k in info\n\n\ndef test_pgdrive_env_rgb():\n env = PGDriveEnv(dict(offscreen_render=True))\n try:\n obs = env.reset()\n assert env.observation_space.contains(obs)\n _act(env, env.action_space.sample())\n for x in [-1, 0, 1]:\n env.reset()\n for y in [-1, 0, 1]:\n _act(env, [x, y])\n finally:\n env.close()\n\n\nif __name__ == '__main__':\n test_pgdrive_env_rgb()\n",
"import logging\n\nimport numpy as np\n\nfrom pgdrive.component.vehicle_module.PID_controller import PIDController\nfrom pgdrive.policy.base_policy import BasePolicy\nfrom pgdrive.utils.math_utils import not_zero, wrap_to_pi, point_distance\nfrom pgdrive.utils.scene_utils import is_same_lane_index, is_following_lane_index\n\n\nclass FrontBackObjects:\n def __init__(self, front_ret, back_ret, front_dist, back_dist):\n self.front_objs = front_ret\n self.back_objs = back_ret\n self.front_dist = front_dist\n self.back_dist = back_dist\n\n def left_lane_exist(self):\n return True if self.front_dist[0] is not None else False\n\n def right_lane_exist(self):\n return True if self.front_dist[-1] is not None else False\n\n def has_front_object(self):\n return True if self.front_objs[1] is not None else False\n\n def has_back_object(self):\n return True if self.back_objs[1] is not None else False\n\n def has_left_front_object(self):\n return True if self.front_objs[0] is not None else False\n\n def has_left_back_object(self):\n return True if self.back_objs[0] is not None else False\n\n def has_right_front_object(self):\n return True if self.front_objs[-1] is not None else False\n\n def has_right_back_object(self):\n return True if self.back_objs[-1] is not None else False\n\n def front_object(self):\n return self.front_objs[1]\n\n def left_front_object(self):\n return self.front_objs[0]\n\n def right_front_object(self):\n return self.front_objs[-1]\n\n def back_object(self):\n return self.back_objs[1]\n\n def left_back_object(self):\n return self.back_objs[0]\n\n def right_back_object(self):\n return self.back_objs[-1]\n\n def left_front_min_distance(self):\n assert self.left_lane_exist(), \"left lane doesn't exist\"\n return self.front_dist[0]\n\n def right_front_min_distance(self):\n assert self.right_lane_exist(), \"right lane doesn't exist\"\n return self.front_dist[-1]\n\n def front_min_distance(self):\n return self.front_dist[1]\n\n def left_back_min_distance(self):\n assert self.left_lane_exist(), \"left lane doesn't exist\"\n return self.back_dist[0]\n\n def right_back_min_distance(self):\n assert self.right_lane_exist(), \"right lane doesn't exist\"\n return self.back_dist[-1]\n\n def back_min_distance(self):\n return self.back_dist[1]\n\n @classmethod\n def get_find_front_back_objs(cls, objs, lane, position, max_distance, ref_lanes=None):\n \"\"\"\n Find objects in front of/behind the lane and its left lanes/right lanes, return objs, dist.\n If ref_lanes is None, return filter results of this lane\n \"\"\"\n if ref_lanes is not None:\n assert lane in ref_lanes\n idx = lane.index[-1]\n left_lane = ref_lanes[idx - 1] if idx > 0 and ref_lanes is not None else None\n right_lane = ref_lanes[idx + 1] if ref_lanes and idx + 1 < len(ref_lanes) is not None else None\n lanes = [left_lane, lane, right_lane]\n\n min_front_long = [max_distance if lane is not None else None for lane in lanes]\n min_back_long = [max_distance if lane is not None else None for lane in lanes]\n\n front_ret = [None, None, None]\n back_ret = [None, None, None]\n\n find_front_in_current_lane = [False, False, False]\n find_back_in_current_lane = [False, False, False]\n\n current_long = [lane.local_coordinates(position)[0] if lane is not None else None for lane in lanes]\n left_long = [lane.length - current_long[idx] if lane is not None else None for idx, lane in enumerate(lanes)]\n\n for i, lane in enumerate(lanes):\n if lane is None:\n continue\n for obj in objs:\n if obj.lane is lane:\n long = lane.local_coordinates(obj.position)[0] - current_long[i]\n if min_front_long[i] > long > 0:\n min_front_long[i] = long\n front_ret[i] = obj\n find_front_in_current_lane[i] = True\n if long < 0 and abs(long) < min_back_long[i]:\n min_back_long[i] = abs(long)\n back_ret[i] = obj\n find_back_in_current_lane[i] = True\n\n elif not find_front_in_current_lane[i] and lane.is_previous_lane_of(obj.lane):\n long = obj.lane.local_coordinates(obj.position)[0] + left_long[i]\n if min_front_long[i] > long > 0:\n min_front_long[i] = long\n front_ret[i] = obj\n elif not find_back_in_current_lane[i] and obj.lane.is_previous_lane_of(lane):\n long = obj.lane.length - obj.lane.local_coordinates(obj.position)[0] + current_long[i]\n if min_back_long[i] > long:\n min_back_long[i] = long\n back_ret[i] = obj\n\n return cls(front_ret, back_ret, min_front_long, min_back_long)\n\n\nclass IDMPolicy(BasePolicy):\n \"\"\"\n We implement this policy based on the HighwayEnv code base.\n \"\"\"\n TAU_ACC = 0.6 # [s]\n TAU_HEADING = 0.3 # [s]\n TAU_LATERAL = 0.8 # [s]\n\n TAU_PURSUIT = 0.5 * TAU_HEADING # [s]\n KP_A = 1 / TAU_ACC\n KP_HEADING = 1 / TAU_HEADING\n KP_LATERAL = 1 / TAU_LATERAL # [1/s]\n MAX_STEERING_ANGLE = np.pi / 3 # [rad]\n DELTA_SPEED = 5 # [m/s]\n\n DISTANCE_WANTED = 10.0\n \"\"\"Desired jam distance to the front vehicle.\"\"\"\n\n TIME_WANTED = 1.5 # [s]\n \"\"\"Desired time gap to the front v\"\"\"\n\n DELTA = 10.0 # []\n \"\"\"Exponent of the velocity term.\"\"\"\n\n DELTA_RANGE = [3.5, 4.5]\n \"\"\"Range of delta when chosen randomly.\"\"\"\n\n # Lateral policy parameters\n LANE_CHANGE_FREQ = 50 # [step]\n LANE_CHANGE_SPEED_INCREASE = 10\n SAFE_LANE_CHANGE_DISTANCE = 15\n MAX_LONG_DIST = 30\n MAX_SPEED = 100\n\n # Normal speed\n NORMAL_SPEED = 30\n\n # Creep Speed\n CREEP_SPEED = 5\n\n # acc factor\n ACC_FACTOR = 1.0\n DEACC_FACTOR = -5\n\n def __init__(self, control_object, random_seed):\n super(IDMPolicy, self).__init__(control_object=control_object, random_seed=random_seed)\n self.target_speed = self.NORMAL_SPEED\n self.routing_target_lane = None\n self.available_routing_index_range = None\n self.overtake_timer = self.np_random.randint(0, self.LANE_CHANGE_FREQ)\n\n self.heading_pid = PIDController(1.7, 0.01, 3.5)\n self.lateral_pid = PIDController(0.3, .002, 0.05)\n\n def act(self, *args, **kwargs):\n # concat lane\n sucess = self.move_to_next_road()\n all_objects = self.control_object.lidar.get_surrounding_objects(self.control_object)\n try:\n if sucess:\n # perform lane change due to routing\n acc_front_obj, acc_front_dist, steering_target_lane = self.lane_change_policy(all_objects)\n else:\n # can not find routing target lane\n surrounding_objects = FrontBackObjects.get_find_front_back_objs(\n all_objects,\n self.routing_target_lane,\n self.control_object.position,\n max_distance=self.MAX_LONG_DIST\n )\n acc_front_obj = surrounding_objects.front_object()\n acc_front_dist = surrounding_objects.front_min_distance()\n steering_target_lane = self.routing_target_lane\n except:\n # error fallback\n acc_front_obj = None\n acc_front_dist = 5\n steering_target_lane = self.routing_target_lane\n logging.warning(\"IDM bug! fall back\")\n print(\"IDM bug! fall back\")\n\n # control by PID and IDM\n steering = self.steering_control(steering_target_lane)\n acc = self.acceleration(acc_front_obj, acc_front_dist)\n return [steering, acc]\n\n def move_to_next_road(self):\n # routing target lane is in current ref lanes\n current_lanes = self.control_object.navigation.current_ref_lanes\n if self.routing_target_lane is None:\n self.routing_target_lane = self.control_object.lane\n return True if self.routing_target_lane in current_lanes else False\n if self.routing_target_lane not in current_lanes:\n for lane in current_lanes:\n if self.routing_target_lane.is_previous_lane_of(lane):\n # two lanes connect\n self.routing_target_lane = lane\n return True\n # lane change for lane num change\n return False\n elif self.control_object.lane in current_lanes and self.routing_target_lane is not self.control_object.lane:\n # lateral routing lane change\n self.routing_target_lane = self.control_object.lane\n self.overtake_timer = self.np_random.randint(0, int(self.LANE_CHANGE_FREQ / 2))\n return True\n else:\n return True\n\n def steering_control(self, target_lane) -> float:\n # heading control following a lateral distance control\n ego_vehicle = self.control_object\n long, lat = target_lane.local_coordinates(ego_vehicle.position)\n lane_heading = target_lane.heading_at(long + 1)\n v_heading = ego_vehicle.heading_theta\n steering = self.heading_pid.get_result(wrap_to_pi(lane_heading - v_heading))\n steering += self.lateral_pid.get_result(-lat)\n return float(steering)\n\n def acceleration(self, front_obj, dist_to_front) -> float:\n ego_vehicle = self.control_object\n ego_target_speed = not_zero(self.target_speed, 0)\n acceleration = self.ACC_FACTOR * (1 - np.power(max(ego_vehicle.speed, 0) / ego_target_speed, self.DELTA))\n if front_obj:\n d = dist_to_front\n speed_diff = self.desired_gap(ego_vehicle, front_obj) / not_zero(d)\n acceleration -= self.ACC_FACTOR * (speed_diff**2)\n return acceleration\n\n def desired_gap(self, ego_vehicle, front_obj, projected: bool = True) -> float:\n d0 = self.DISTANCE_WANTED\n tau = self.TIME_WANTED\n ab = -self.ACC_FACTOR * self.DEACC_FACTOR\n dv = np.dot(ego_vehicle.velocity - front_obj.velocity, ego_vehicle.heading) if projected \\\n else ego_vehicle.speed - front_obj.speed\n d_star = d0 + ego_vehicle.speed * tau + ego_vehicle.speed * dv / (2 * np.sqrt(ab))\n return d_star\n\n def reset(self):\n self.heading_pid.reset()\n self.lateral_pid.reset()\n self.target_speed = self.NORMAL_SPEED\n self.routing_target_lane = None\n self.available_routing_index_range = None\n self.overtake_timer = self.np_random.randint(0, self.LANE_CHANGE_FREQ)\n\n def lane_change_policy(self, all_objects):\n current_lanes = self.control_object.navigation.current_ref_lanes\n surrounding_objects = FrontBackObjects.get_find_front_back_objs(\n all_objects, self.routing_target_lane, self.control_object.position, self.MAX_LONG_DIST, current_lanes\n )\n self.available_routing_index_range = [i for i in range(len(current_lanes))]\n next_lanes = self.control_object.navigation.next_ref_lanes\n lane_num_diff = len(current_lanes) - len(next_lanes) if next_lanes is not None else 0\n\n # must perform lane change due to routing lane num change\n if lane_num_diff > 0:\n # lane num decreasing happened in left road or right road\n if current_lanes[0].is_previous_lane_of(next_lanes[0]):\n index_range = [i for i in range(len(next_lanes))]\n else:\n index_range = [i for i in range(lane_num_diff, len(current_lanes))]\n self.available_routing_index_range = index_range\n if self.routing_target_lane.index[-1] not in index_range:\n # not on suitable lane do lane change !!!\n if self.routing_target_lane.index[-1] > index_range[-1]:\n # change to left\n if surrounding_objects.left_back_min_distance(\n ) < self.SAFE_LANE_CHANGE_DISTANCE or surrounding_objects.left_front_min_distance() < 5:\n # creep to wait\n self.target_speed = self.CREEP_SPEED\n return surrounding_objects.front_object(), surrounding_objects.front_min_distance(\n ), self.routing_target_lane\n else:\n # it is time to change lane!\n self.target_speed = self.NORMAL_SPEED\n return surrounding_objects.left_front_object(), surrounding_objects.left_front_min_distance(), \\\n current_lanes[self.routing_target_lane.index[-1] - 1]\n else:\n # change to right\n if surrounding_objects.right_back_min_distance(\n ) < self.SAFE_LANE_CHANGE_DISTANCE or surrounding_objects.right_front_min_distance() < 5:\n # unsafe, creep and wait\n self.target_speed = self.CREEP_SPEED\n return surrounding_objects.front_object(), surrounding_objects.front_min_distance(\n ), self.routing_target_lane,\n else:\n # change lane\n self.target_speed = self.NORMAL_SPEED\n return surrounding_objects.right_front_object(), surrounding_objects.right_front_min_distance(), \\\n current_lanes[self.routing_target_lane.index[-1] + 1]\n\n # lane follow or active change lane/overtake for high driving speed\n if abs(self.control_object.speed - self.NORMAL_SPEED) > 3 and surrounding_objects.has_front_object(\n ) and abs(surrounding_objects.front_object().speed -\n self.NORMAL_SPEED) > 3 and self.overtake_timer > self.LANE_CHANGE_FREQ:\n # may lane change\n right_front_speed = surrounding_objects.right_front_object().speed if surrounding_objects.has_right_front_object() else self.MAX_SPEED \\\n if surrounding_objects.right_lane_exist() and surrounding_objects.right_front_min_distance() > self.SAFE_LANE_CHANGE_DISTANCE and surrounding_objects.right_back_min_distance() > self.SAFE_LANE_CHANGE_DISTANCE else None\n front_speed = surrounding_objects.front_object().speed if surrounding_objects.has_front_object(\n ) else self.MAX_SPEED\n left_front_speed = surrounding_objects.left_front_object().speed if surrounding_objects.has_left_front_object() else self.MAX_SPEED \\\n if surrounding_objects.left_lane_exist() and surrounding_objects.left_front_min_distance() > self.SAFE_LANE_CHANGE_DISTANCE and surrounding_objects.left_back_min_distance() > self.SAFE_LANE_CHANGE_DISTANCE else None\n if left_front_speed is not None and left_front_speed - front_speed > self.LANE_CHANGE_SPEED_INCREASE:\n # left overtake has a high priority\n expect_lane_idx = current_lanes.index(self.routing_target_lane) - 1\n if expect_lane_idx in self.available_routing_index_range:\n return surrounding_objects.left_front_object(), surrounding_objects.left_front_min_distance(), \\\n current_lanes[expect_lane_idx]\n if right_front_speed is not None and right_front_speed - front_speed > self.LANE_CHANGE_SPEED_INCREASE:\n expect_lane_idx = current_lanes.index(self.routing_target_lane) + 1\n if expect_lane_idx in self.available_routing_index_range:\n return surrounding_objects.right_front_object(), surrounding_objects.right_front_min_distance(), \\\n current_lanes[expect_lane_idx]\n\n # fall back to lane follow\n self.target_speed = self.NORMAL_SPEED\n self.overtake_timer += 1\n return surrounding_objects.front_object(), surrounding_objects.front_min_distance(), self.routing_target_lane\n"
] |
[
[
"numpy.isscalar"
],
[
"numpy.dot",
"numpy.sqrt"
]
] |
nobodykid/sinkhorngan-positive
|
[
"811f697da4fe02599fc7f0e1bdf77c89d183aba4"
] |
[
"old/dataloader/anomaly/mnist.py"
] |
[
"from __future__ import print_function\n\nimport codecs\nimport gzip\nimport os\nimport os.path\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nfrom PIL import Image\nfrom torchvision.datasets.utils import download_url, makedir_exist_ok\n\n\nclass MNIST(data.Dataset):\n \"\"\"`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where ``processed/training.pt``\n and ``processed/test.pt`` exist.\n train (bool, optional): If True, creates dataset from ``training.pt``,\n otherwise from ``test.pt``.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n \"\"\"\n urls = [\n 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',\n 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',\n 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',\n 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',\n ]\n training_file = 'training.pt'\n test_file = 'test.pt'\n classes = ['0 - normal', '1 - abnormal']\n\n @property\n def train_labels(self):\n warnings.warn(\"train_labels has been renamed targets\")\n return self.targets\n\n @property\n def test_labels(self):\n warnings.warn(\"test_labels has been renamed targets\")\n return self.targets\n\n @property\n def train_data(self):\n warnings.warn(\"train_data has been renamed data\")\n return self.data\n\n @property\n def test_data(self):\n warnings.warn(\"test_data has been renamed data\")\n return self.data\n\n def __init__(self, root, train=True, transform=None, target_transform=None, download=False, anomaly_class=None):\n self.test_percentage = 0.2\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.target_transform = target_transform\n self.train = train # training set or test set\n\n if anomaly_class is None:\n raise RuntimeError('Please fill the anomaly_class argument' +\n 'anomaly_class=<listOfClass or integer>')\n\n if (isinstance(anomaly_class, list)):\n self.anomaly_class = anomaly_class\n if (isinstance(anomaly_class, int)):\n self.anomaly_class = [anomaly_class]\n\n if download:\n self.download()\n\n if not self._check_exists():\n raise RuntimeError('Dataset not found.' +\n ' You can use download=True to download it')\n\n if self.train:\n data_file = self.training_file\n else:\n data_file = self.test_file\n self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img, target = self.data[index], int(self.targets[index])\n\n # doing this so that it is consistent with all other dataloader\n # to return a PIL Image\n img = Image.fromarray(img.numpy(), mode='L')\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return len(self.data)\n\n @property\n def raw_folder(self):\n return os.path.join(self.root, self.__class__.__name__, 'raw')\n\n @property\n def processed_folder(self):\n return os.path.join(self.root, self.__class__.__name__,\n 'processed_with_anomaly_' + \"_\".join([str(i) for i in self.anomaly_class]))\n\n @property\n def class_to_idx(self):\n return {_class: i for i, _class in enumerate(self.classes)}\n\n def _check_exists(self):\n return os.path.exists(os.path.join(self.processed_folder, self.training_file)) and \\\n os.path.exists(os.path.join(self.processed_folder, self.test_file))\n\n @staticmethod\n def extract_gzip(gzip_path, remove_finished=False):\n print('Extracting {}'.format(gzip_path))\n with open(gzip_path.replace('.gz', ''), 'wb') as out_f, \\\n gzip.GzipFile(gzip_path) as zip_f:\n out_f.write(zip_f.read())\n if remove_finished:\n os.unlink(gzip_path)\n\n def download(self):\n \"\"\"Download the MNIST data if it doesn't exist in processed_folder already.\"\"\"\n\n if self._check_exists():\n return\n\n makedir_exist_ok(self.raw_folder)\n makedir_exist_ok(self.processed_folder)\n\n # download files\n for url in self.urls:\n filename = url.rpartition('/')[2]\n file_path = os.path.join(self.raw_folder, filename)\n download_url(url, root=self.raw_folder, filename=filename, md5=None)\n self.extract_gzip(gzip_path=file_path, remove_finished=False)\n\n # process and save as torch files\n print('Processing...')\n\n normal_data = []\n normal_targets = []\n abnormal_data = []\n abnormal_targets = []\n tmp_data = np.append(read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),\n read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')), 0)\n tmp_targets = np.append(read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte')),\n read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte')), 0)\n\n for d, t in zip(tmp_data, tmp_targets):\n if (t in self.anomaly_class):\n abnormal_data.append(d)\n abnormal_targets.append(t)\n else:\n normal_data.append(d)\n normal_targets.append(t)\n\n # convert all label to 0 - normal and 1 - abnormal\n normal_data = np.asarray(normal_data)\n normal_targets = np.zeros_like(normal_targets)\n\n abnormal_data = np.asarray(abnormal_data)\n abnormal_targets = np.ones_like(abnormal_targets)\n\n # Create new anomaly dataset based on the following data structure:\n # - anomaly dataset\n # . -> train\n # . -> normal\n # . -> test\n # . -> normal\n # . -> abnormal\n\n test_idx = int(normal_targets.shape[0] * self.test_percentage)\n\n training_data = normal_data[test_idx:, ]\n training_targets = normal_targets[test_idx:, ]\n\n test_data = np.append(normal_data[:test_idx, ], abnormal_data, 0)\n test_targets = np.append(normal_targets[:test_idx, ], abnormal_targets, 0)\n\n training_set = (\n torch.from_numpy(training_data).view(*training_data.shape),\n torch.from_numpy(training_targets).view(*training_targets.shape).long()\n )\n test_set = (\n torch.from_numpy(test_data).view(*test_data.shape),\n torch.from_numpy(test_targets).view(*test_targets.shape).long()\n )\n\n with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:\n torch.save(training_set, f)\n with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:\n torch.save(test_set, f)\n\n print('Done!')\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n tmp = 'train' if self.train is True else 'test'\n fmt_str += ' Split: {}\\n'.format(tmp)\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n\n\nclass FashionMNIST(MNIST):\n \"\"\"`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where ``processed/training.pt``\n and ``processed/test.pt`` exist.\n train (bool, optional): If True, creates dataset from ``training.pt``,\n otherwise from ``test.pt``.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n \"\"\"\n urls = [\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',\n ]\n classes = classes = ['0 - normal', '1 - abnormal']\n\n\nclass KMNIST(MNIST):\n \"\"\"`Kuzushiji-MNIST <https://github.com/rois-codh/kmnist>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where ``processed/training.pt``\n and ``processed/test.pt`` exist.\n train (bool, optional): If True, creates dataset from ``training.pt``,\n otherwise from ``test.pt``.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n \"\"\"\n urls = [\n 'http://codh.rois.ac.jp/kmnist/dataset/kmnist/train-images-idx3-ubyte.gz',\n 'http://codh.rois.ac.jp/kmnist/dataset/kmnist/train-labels-idx1-ubyte.gz',\n 'http://codh.rois.ac.jp/kmnist/dataset/kmnist/t10k-images-idx3-ubyte.gz',\n 'http://codh.rois.ac.jp/kmnist/dataset/kmnist/t10k-labels-idx1-ubyte.gz',\n ]\n classes = ['0 - normal', '1 - abnormal']\n\n\ndef get_int(b):\n return int(codecs.encode(b, 'hex'), 16)\n\n\ndef read_label_file(path):\n with open(path, 'rb') as f:\n data = f.read()\n assert get_int(data[:4]) == 2049\n length = get_int(data[4:8])\n parsed = np.frombuffer(data, dtype=np.uint8, offset=8)\n return torch.from_numpy(parsed).view(length).long()\n\n\ndef read_image_file(path):\n with open(path, 'rb') as f:\n data = f.read()\n assert get_int(data[:4]) == 2051\n length = get_int(data[4:8])\n num_rows = get_int(data[8:12])\n num_cols = get_int(data[12:16])\n parsed = np.frombuffer(data, dtype=np.uint8, offset=16)\n return torch.from_numpy(parsed).view(length, num_rows, num_cols)\n"
] |
[
[
"numpy.append",
"numpy.zeros_like",
"numpy.ones_like",
"numpy.asarray",
"torch.save",
"torch.from_numpy",
"numpy.frombuffer"
]
] |
tangshulien/Deep-Learning-Steering
|
[
"848c8e04fa39b668fd59f3223cb34275eace5abd"
] |
[
"hevctoh5a.py"
] |
[
"# YPL & JLL, 2021.3.19\n# Code: /home/jinn/openpilot/tools/lib/hevctoh5a.py \n# Input: /home/jinn/data1/8bfda98c9c9e4291|2020-05-11--03-00-57--61/fcamera.hevc\n# Output: /home/jinn/data1/8bfda98c9c9e4291|2020-05-11--03-00-57--61/camera.h5\nimport os\nimport cv2\nimport h5py\nimport matplotlib.pyplot as plt\nfrom tools.lib.logreader import LogReader\nfrom tools.lib.framereader import FrameReader\n\ndirs=os.listdir('/home/jinn/data1')\ndirs=['/home/jinn/data1/'+i +'/' for i in dirs]\nprint(dirs)\n\npath_all=[]\nfor di1 in dirs:\n dir1=os.listdir(di1)\n path=[di1 + d for d in dir1]\n for dd in path:\n path_all.append(dd)\n\ndef mkcamera(path):\n for f in path:\n fr=FrameReader(f)\n fcount = fr.frame_count - 1192 # -1192 produces very small .h5 for debugging\n print(fcount)\n ca=f.replace('fcamera.hevc','camera.h5')\n if not os.path.isfile(ca): \n with h5py.File(ca,'w') as f2:\n f2.create_dataset('X',(fcount,160,320,3))\n for i in range(fcount): \n img=fr.get(i,pix_fmt='rgb24')[0]\n img=img[:650,:,:]\n img=cv2.resize(img,(320,160)) \n f2['X'][i]=img\n plt.imshow(img) # see resized img\n plt.show()\n print(i) \n f3 = h5py.File(ca, 'r') # read .h5, 'w': write\n print(f3.keys())\n dset = f3['X']\n print(dset.shape)\n print(dset.dtype)\n print(dset[0])\n \nif __name__ == '__main__':\n print(path_all)\n mkcamera(path_all)\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow"
]
] |
LucasAntognoni/SCC0270
|
[
"31065234d658b6944f75c845d669fdb85bc9cf16"
] |
[
"Assignment 3/rbf.py"
] |
[
"import numpy as np \nimport sys\n\nclass RBF():\n def __init__(self, Input, Output, Ptypes, Nclasses):\n\n self.input = Input\n self.hidden = Ptypes * Nclasses\n self.output = Output\n self.ptypes = Ptypes\n self.nclasses = Nclasses\n\n self.protos = 0\n self.weights = 0\n self.spread = 0\n \n def createPrototypes(self, data):\n\n groups = np.random.randint(0, data.shape[0], size = (self.hidden))\n \n prototypes = np.zeros((self.hidden, data.shape[1]))\n \n i = 0\n \n for element in groups: \n prototypes[i] = data[element, :]\n i += 1\n \n self.protos = prototypes\n\n def sigma(self):\n \n temp = 0\n \n for i in range(self.hidden):\n for j in range(self.hidden):\n distance = np.square(np.linalg.norm(self.protos[i] - self.protos[j]))\n \n if distance > temp:\n temp = distance\n \n self.spread = temp/np.sqrt(self.hidden)\n\n def train(self, data, classes):\n\n self.createPrototypes(data)\n self.sigma()\n hidden_out = np.zeros(shape=(0,self.hidden))\n \n for data in data:\n output=[]\n \n for proto in self.protos:\n distance = np.square(np.linalg.norm(data - proto))\n neuron_output = np.exp(-(distance)/(np.square(self.spread)))\n output.append(neuron_output)\n hidden_out = np.vstack([hidden_out,np.array(output)])\n \n self.weights = np.dot(np.linalg.pinv(hidden_out), classes)\n\n def test(self, data, classes):\n \n right = 0\n \n for i in range(len(data)):\n \n d = data[i]\n output = []\n \n for proto in self.protos:\n distance = np.square(np.linalg.norm(d-proto))\n neuron_output = np.exp(-(distance)/np.square(self.spread))\n output.append(neuron_output)\n \n network_output = np.dot(np.array(output),self.weights)\n \n print (\"Expected: \", classes[i].argmax(axis=0) +1)\n print (\"Result: \", network_output.argmax(axis=0) + 1)\n print ()\n\n if network_output.argmax(axis=0) + 1 == classes[i].argmax(axis=0) +1:\n right += 1\n \n print (\"Accuracy(%): \", (right * 100) / len(data))\n\ndef read_iris(percentage):\n \n dataset = np.loadtxt('iris.data', delimiter=',', skiprows=0)\n\n np.random.shuffle(dataset)\n \n q = int(dataset.shape[0] * percentage) + 2\n \n X_training = dataset[0:q, 0:4]\n Y_training = dataset[0:q, 4]\n \n X_test = dataset[q:150, 0:4]\n Y_test = dataset[q:150, 4]\n \n return X_training, Y_training, X_test, Y_test\n\ndef process_iris_data(data):\n \n p_data = np.zeros((data.shape[0], data.shape[1]))\n\n max_col1 = np.amax(data[:,0])\n max_col2 = np.amax(data[:,1])\n max_col3 = np.amax(data[:,2])\n max_col4 = np.amax(data[:,3])\n\n for n in range(len(data)):\n \n p_data[n, 0] = data[n,0] / max_col1\n p_data[n, 1] = data[n,1] / max_col2\n p_data[n, 2] = data[n,2] / max_col3\n p_data[n, 3] = data[n,3] / max_col4\n\n return p_data\n\ndef process_iris_labels(labels, operation):\n \n if operation == 0:\n \n p_labels = np.zeros((labels.shape[0], 3))\n\n for n in range(len(labels)):\n p_labels[n, int(labels[n])] = 1 \n\n return p_labels\n else:\n p_labels = np.argmax(labels, axis=1)\n return p_labels\n\n\nif __name__ == '__main__':\n \n # input params\n # percentage \n \n parameters = (sys.argv)\n print(parameters)\n\n x1, y1, x2, y2 = read_iris(float(parameters[1]))\n xp = process_iris_data(x1)\n yp = process_iris_labels(y1,0)\n\n nn = RBF(xp.shape[1], y1.shape[0], xp.shape[1], 3)\n\n nn.train(xp, yp) \n\n xp = process_iris_data(x2)\n yp = process_iris_labels(y2,0)\n nn.test(xp, yp)"
] |
[
[
"numpy.square",
"numpy.array",
"numpy.linalg.norm",
"numpy.zeros",
"numpy.linalg.pinv",
"numpy.random.shuffle",
"numpy.loadtxt",
"numpy.random.randint",
"numpy.amax",
"numpy.argmax",
"numpy.sqrt"
]
] |
scale-lab/BitTrain
|
[
"3a15f96cc32222e3d6fceb00a622521e31745d4c"
] |
[
"expr/utils/sparsity_stats.py"
] |
[
"import torch\nimport torchvision\nimport collections\nimport numpy as np\n\n'''\n_sparsity_ratios is a dictionary to save the sparsity ratios of each layer,\nKey - the layer name\nValue - list of sparsity ratios for the executed forward passes\n'''\n_sparsity_ratios_per_layer = collections.defaultdict(list)\n_sparsity_ratios_per_layer_type = collections.defaultdict(list)\n_total_memory_per_layer_type = collections.defaultdict(list)\n_bitmap_memory_footprint = collections.defaultdict(list)\n_dense_memory_footprint = collections.defaultdict(list)\n_activations_stats_for_hist = []\n\n_layers_types = [torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.Dropout,\n torch.nn.Linear, torch.nn.MaxPool2d, torch.nn.AdaptiveAvgPool2d]\n\n_layers_names = {torch.nn.Conv2d:\"Conv\", \n torch.nn.BatchNorm2d: \"BatchNorm\", \n torch.nn.Dropout: \"Dropout\",\n torch.nn.Linear: \"Linear\",\n torch.nn.MaxPool2d: \"MaxPool\",\n torch.nn.AdaptiveAvgPool2d: \"AvgPool\"}\n\nclass Hook():\n def __init__(self, module, name, pre=False):\n self.name = name\n self.type = _layers_names[type(module)]\n if pre==False:\n self.hook = module.register_forward_hook(self.hook_fn)\n else:\n self.hook = module.register_forward_pre_hook(self.hook_pre_fn)\n \n def hook_fn(self, module, input, output):\n assert len(input) == 1\n self.input = input[0].detach()\n self.output = output.detach()\n\n def hook_pre_fn(self, module, input):\n assert len(input) == 1\n self.input = input[0].detach()\n\n def close(self):\n self.hook.remove()\n\n'''\n_place_hooks: places hooks at the given layer types\n'''\ndef _place_hooks(model, layers_types):\n hooks = []\n for name, module in model.named_modules():\n if type(module) in layers_types:\n hooks.append(Hook(module, name, pre=True))\n else:\n print(\"Skipped\", name, type(module))\n return hooks\n\n'''\n_update_sparsity_ratios_dict: updates the sparsity ratios dictinary \n according to new values at hooks\n'''\ndef _update_sparsity_ratios_dict(hooks):\n for hook in hooks:\n activation = hook.input\n _activations_stats_for_hist.extend(list(activation.view(-1)))\n total_elements = torch.numel(activation)\n non_zero_elements = torch.count_nonzero(torch.abs(activation) > 0.001)\n sparsity_ratio = 1 - (non_zero_elements/total_elements)\n _sparsity_ratios_per_layer[hook.name].append(sparsity_ratio.item())\n _sparsity_ratios_per_layer_type[hook.type].append(sparsity_ratio.item())\n _total_memory_per_layer_type[hook.type].append(total_elements)\n _bitmap_memory_footprint[hook.name].append(total_elements*1 + non_zero_elements*4)\n _dense_memory_footprint[hook.name].append(np.prod(list(activation.shape))*4)\n\n'''\n_compute_sparsity_ratios_at_hooks: loop on dataset and \n calculate the sparsity at each layer\n'''\ndef _compute_sparsity_ratios_at_hooks(model, hooks, dataloader, device):\n for inputs, _ in dataloader:\n # Perform the forward path to save activations\n print(inputs.shape)\n inputs = inputs.to(device)\n model(inputs)\n\n # Update the sparsity matrix and clear the activations\n _update_sparsity_ratios_dict(hooks)\n break\n'''\n_replace_relu_inplace_to_relu: used as a workaround because hooks work wrong \n with inplace operations, replace each inplace ReLU \n with similar one with implace = False\n'''\ndef _replace_relu_inplace_to_relu(model, relu_type):\n for child_name, child in model.named_children():\n if isinstance(child, relu_type):\n setattr(model, child_name, relu_type(inplace=False))\n else:\n _replace_relu_inplace_to_relu(child, relu_type)\n\ndef calc_zero_activations_percentages(model, dataloader, \\\n name, device, verbose=False):\n\n # Workaround:: Replace the RELU inplace to normal because \n # the hooks work wrong with ReLU inplace\n relu_types = [torch.nn.ReLU6, torch.nn.ReLU]\n\n for layer in relu_types:\n _replace_relu_inplace_to_relu(model, layer)\n \n print(model)\n # Place the hooks at the required layer type\n hooks = _place_hooks(model, _layers_types)\n\n # Compute sparsity ratios\n _compute_sparsity_ratios_at_hooks(model, hooks, dataloader, device)\n\n # Reemove hooks\n for hook in hooks:\n hook.close()\n \n # Print average sparsity ratios\n avg_sparsity_per_layer = []\n avg_saving_to_dense_per_layer = []\n \n for layer_name in _sparsity_ratios_per_layer:\n avg_sparsity = np.mean(_sparsity_ratios_per_layer[layer_name])\n avg_saving_to_dense = 1 - np.mean(_bitmap_memory_footprint[layer_name])/ \\\n np.mean(_dense_memory_footprint[layer_name])\n \n if avg_sparsity > 0.15:\n avg_saving_to_dense_per_layer.append(avg_saving_to_dense)\n avg_sparsity_per_layer.append(100*avg_sparsity)\n else:\n avg_saving_to_dense_per_layer.append(0)\n avg_sparsity_per_layer.append(0)\n\n if verbose:\n print('Layer {} - input sparsity is {:.2f} %, saved {:.2f}% than dense \\\n and {:.2f}% than COO'.format(layer_name, 100*avg_sparsity, \\\n 100*avg_saving_to_dense))\n \n total_avg = np.mean(avg_sparsity_per_layer)\n if verbose:\n print('All - average zero activations percentage is {:.2f} %'.format(total_avg))\n print(\"Average Saving compared to dense is {:.2f}\".format(100*np.mean(avg_saving_to_dense_per_layer)))\n \n avg_sparsity_per_layer_type = []\n total_memory = []\n layer_types = []\n for layer_type in _sparsity_ratios_per_layer_type:\n avg_sparsity = np.mean(_sparsity_ratios_per_layer_type[layer_type])\n if verbose:\n print('Layer {} - input sparsity is {:.4f} %'.format(layer_type, 100*avg_sparsity))\n avg_sparsity_per_layer_type.append(100*avg_sparsity)\n layer_types.append(layer_type)\n total_memory.append(np.sum(_total_memory_per_layer_type[layer_type]))\n \n total_memory_percentage = []\n for idx, value in enumerate(total_memory):\n total_memory_percentage.append(value/np.sum(total_memory))\n\n return avg_sparsity_per_layer, avg_sparsity_per_layer_type, total_memory_percentage, \\\n layer_types, _activations_stats_for_hist"
] |
[
[
"torch.abs",
"numpy.sum",
"torch.numel",
"numpy.mean"
]
] |
ElisaCovato/Computer-pointer-controller---Intel-Edge-AI-Nanodegree
|
[
"89e626b5591f543139d9cdab0dec9a6d8db53c6d"
] |
[
"src/facial_landmarks_detection.py"
] |
[
"import logging as log\nimport cv2\nimport sys\nimport numpy as np\n\n\nclass LandmarksDetectionModel:\n '''\n Class for the Face Landmarks Detection Model.\n\n Load and configure inference plugins for the specified target devices,\n and performs either synchronous or asynchronous modes for the\n specified infer requests.\n '''\n\n def __init__(self, model_name, device='CPU', extensions=None, async_infer=True):\n '''\n Set instance variables.\n '''\n self.plugin = None\n self.network = None\n self.exec_network = None\n self.infer_request_handle = None\n\n self.input_blob = None\n self.input_shape = None\n self.output_blob = None\n self.output_shape = None\n\n self.model_name = model_name\n self.device = device\n self.extensions = extensions\n self.async_infer = async_infer\n\n def load_model(self, plugin):\n '''\n This method is for loading the model (in IR format) to the device specified by the user.\n Default device is CPU.\n '''\n\n # Get model\n model_structure = self.model_name + '.xml'\n model_weights = self.model_name + '.bin'\n\n # Initialize the plugin - load the inference engine API\n # Plugin is the one already created for the Face Detection model\n self.plugin = plugin\n\n # Add a CPU extension, if applicable\n if self.extensions and 'CPU' in self.device:\n self.plugin.add_extension(self.extensions, self.device)\n\n # Read the IR as IENetwork\n try:\n self.network = self.plugin.read_network(model=model_structure, weights=model_weights)\n except:\n raise ValueError(\"Could not initialise the network. Have you entered the correct model path?\")\n\n # Check if model and CPU plugin are supported\n if self.device == 'CPU':\n self.check_model()\n\n # Load the IENetwork into the plugin\n self.exec_network = self.plugin.load_network(network=self.network, device_name=self.device, num_requests=1)\n\n # Get the input and output layers\n self.input_blob = next(iter(self.network.inputs))\n self.input_shape = self.network.inputs[self.input_blob].shape\n self.output_blob = next(iter(self.network.outputs))\n self.output_shape = self.network.outputs[self.output_blob].shape\n return\n\n def predict(self, image):\n '''\n This method is meant for running predictions on the input image.\n '''\n if np.all(np.array(image.shape)):\n # Create input image to feed into the network\n net_input = {self.input_blob: self.preprocess_input(image)}\n\n # Start inference. Infer mode (async/sync) is input by user\n if self.async_infer:\n self.infer_request_handle = self.exec_network.start_async(request_id=0, inputs=net_input)\n # Wait for the result of the inference\n if self.exec_network.requests[0].wait(-1) == 0:\n # Get result of the inference request\n outputs = self.infer_request_handle.outputs[self.output_blob]\n eyes_coords, crop_left, crop_right = self.preprocess_output(outputs, image)\n\n else:\n self.infer_request_handle = self.exec_network.infer(inputs=net_input)\n # Get result of the inference request\n outputs = self.infer_request_handle[self.output_blob]\n eyes_coords, crop_left, crop_right = self.preprocess_output(outputs, image)\n\n else:\n eyes_coords = []\n crop_left = []\n crop_right = []\n\n return eyes_coords, crop_left, crop_right\n\n def check_model(self):\n '''\n This method check whether the model (along with the plugin) is support on the CPU device.\n If anything is missing (such as a CPU extension), let the user know and exit the programm.\n '''\n\n supported_layers = self.plugin.query_network(network=self.network, device_name='CPU')\n unsupported_layers = [l for l in self.network.layers.keys() if l not in supported_layers]\n\n if len(unsupported_layers) != 0:\n log.error(\"Unsupported layers found: {}\".format(unsupported_layers))\n if self.extensions:\n log.error(\"The extensions specified do not support some layers. Please specify a new extension.\")\n else:\n log.error(\n \"Please try to specify an extension library path by using the --extensions command line argument.\")\n sys.exit(1)\n return\n\n def preprocess_input(self, image):\n '''\n Method to process inputs before feeding them into the model for inference.\n '''\n image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))\n image = image.transpose((2, 0, 1))\n image = image.reshape(1, *image.shape)\n return image\n\n def preprocess_output(self, outputs, image):\n '''\n Method to process outputs before feeding them into the next model for\n inference or for the last step of the app.\n '''\n\n w = image.shape[1]\n h = image.shape[0]\n outputs = outputs[0]\n\n xl, yl = int(outputs[0][0][0] * w), int(outputs[1][0][0] * h)\n xr, yr = int(outputs[2][0][0] * w), int(outputs[3][0][0] * h)\n\n eyes_coords = [xl, yl, xr, yr]\n\n # Using the fact that eyes take 1/5 of your face width\n # define bounding boxes around the eyes according to this\n square_size = int(w / 10)\n left_eye_box = [xl - square_size, yl - square_size, xl + square_size, yl + square_size]\n right_eye_box = [xr - square_size, yr - square_size, xr + square_size, yr + square_size]\n\n crop_left = image[left_eye_box[1]:left_eye_box[3], left_eye_box[0]:left_eye_box[2]]\n crop_right = image[right_eye_box[1]:right_eye_box[3], right_eye_box[0]:right_eye_box[2]]\n\n return eyes_coords, crop_left, crop_right\n"
] |
[
[
"numpy.array"
]
] |
allenai/advisor
|
[
"6849755042c6dab1488f64cf21bde2322add3cc1"
] |
[
"poisoneddoors_plugin/poisoneddoors_offpolicy.py"
] |
[
"import typing\nfrom typing import Dict, Union, Tuple, Iterator, Any\nfrom typing import Optional\n\nimport numpy as np\nimport torch\nfrom gym.utils import seeding\n\nfrom advisor_losses import AlphaScheduler, AdvisorWeightedStage\nfrom allenact.algorithms.offpolicy_sync.losses.abstract_offpolicy_loss import (\n AbstractOffPolicyLoss,\n)\nfrom allenact.algorithms.onpolicy_sync.policy import ActorCriticModel\nfrom allenact.base_abstractions.misc import Memory\n\n_DATASET_CACHE: Dict[str, Any] = {}\n\n\nclass PoisonedDoorsOffPolicyExpertCELoss(AbstractOffPolicyLoss[ActorCriticModel]):\n def __init__(self, total_episodes_in_epoch: Optional[int] = None):\n super().__init__()\n self.total_episodes_in_epoch = total_episodes_in_epoch\n\n def loss(\n self,\n model: ActorCriticModel,\n batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],\n memory: Memory,\n *args,\n **kwargs\n ) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:\n\n rollout_len, nrollouts, _, = batch[\"poisoned_door_state\"].shape\n\n observations = {}\n for k in [\"poisoned_door_state\"]:\n if k in batch:\n observations[k] = batch[k].view(\n rollout_len, nrollouts, *batch[k].shape[2:]\n )\n\n ac_out, memory = model.forward(\n observations=observations,\n memory=memory,\n prev_actions=None,\n masks=batch[\"masks\"],\n )\n\n expert_ce_loss = -ac_out.distributions.log_prob(\n batch[\"expert_action\"].view(rollout_len, nrollouts, 1)\n ).mean()\n\n info = {\"expert_ce\": expert_ce_loss.item()}\n\n if self.total_episodes_in_epoch is not None:\n if \"completed_episode_count\" not in memory:\n memory[\"completed_episode_count\"] = 0\n memory[\"completed_episode_count\"] += (\n int(np.prod(batch[\"masks\"].shape)) - batch[\"masks\"].sum().item()\n )\n info[\"epoch_progress\"] = (\n memory[\"completed_episode_count\"] / self.total_episodes_in_epoch\n )\n\n return expert_ce_loss, info, memory, rollout_len * nrollouts\n\n\nclass PoisonedDoorsOffPolicyAdvisorLoss(AbstractOffPolicyLoss[ActorCriticModel]):\n def __init__(\n self,\n total_episodes_in_epoch: Optional[int] = None,\n fixed_alpha: Optional[float] = 1,\n fixed_bound: Optional[float] = 0.0,\n alpha_scheduler: AlphaScheduler = None,\n smooth_expert_weight_decay: Optional[float] = None,\n *args,\n **kwargs\n ):\n super().__init__()\n\n self.advisor_loss = AdvisorWeightedStage(\n rl_loss=None,\n fixed_alpha=fixed_alpha,\n fixed_bound=fixed_bound,\n alpha_scheduler=alpha_scheduler,\n smooth_expert_weight_decay=smooth_expert_weight_decay,\n *args,\n **kwargs\n )\n self.total_episodes_in_epoch = total_episodes_in_epoch\n\n def loss(\n self,\n step_count: int,\n model: ActorCriticModel,\n batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],\n memory: Memory,\n **kwargs\n ) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:\n\n rollout_len, nrollouts, _ = batch[\"poisoned_door_state\"].shape\n\n observations = {\"poisoned_door_state\": batch[\"poisoned_door_state\"]}\n\n ac_out, memory = model.forward(\n observations=observations,\n memory=memory,\n prev_actions=None,\n masks=batch[\"masks\"].view(rollout_len, nrollouts, -1),\n )\n\n total_loss, losses_dict = self.advisor_loss.loss(\n step_count=step_count,\n batch={\n \"observations\": {\n \"expert_action\": torch.cat(\n (\n batch[\"expert_action\"].view(rollout_len, nrollouts, 1),\n torch.ones(rollout_len, nrollouts, 1, dtype=torch.int64).to(\n batch[\"expert_action\"].device\n ),\n ),\n dim=-1,\n )\n }\n },\n actor_critic_output=ac_out,\n )\n\n info = {\"offpolicy_\" + key: val for key, val in losses_dict.items()}\n\n if self.total_episodes_in_epoch is not None:\n if \"completed_episode_count\" not in memory:\n memory[\"completed_episode_count\"] = 0\n memory[\"completed_episode_count\"] += (\n int(np.prod(batch[\"masks\"].shape)) - batch[\"masks\"].sum().item()\n )\n info[\"epoch_progress\"] = (\n memory[\"completed_episode_count\"] / self.total_episodes_in_epoch\n )\n\n return total_loss, info, memory, rollout_len * nrollouts\n\n\nclass PoisonedDoorsExpertTrajectoryIterator(Iterator):\n def __init__(\n self, num_doors: int, nrollouts: int, rollout_len: int, dataset_size: int,\n ):\n super(PoisonedDoorsExpertTrajectoryIterator, self).__init__()\n self.np_seeded_random_gen, _ = typing.cast(\n Tuple[np.random.RandomState, Any], seeding.np_random(0)\n )\n\n self.ndoors = num_doors\n self.nrollouts = nrollouts\n self.rollout_len = rollout_len\n self.dataset_size = dataset_size\n\n self.initial_observations = np.zeros(\n (rollout_len, nrollouts, 1), dtype=np.int64\n )\n\n self.mask = np.zeros((rollout_len, nrollouts, 1), dtype=np.float32)\n\n self.expert_actions = np.random.randint(\n 4, 3 + num_doors, size=(self.dataset_size, 1)\n )\n\n self.current_ind = 0\n\n def __next__(self) -> Dict[str, torch.Tensor]:\n start = self.current_ind\n end = self.current_ind + self.nrollouts * self.rollout_len\n if end > self.dataset_size:\n raise StopIteration()\n self.current_ind = end\n\n return {\n \"masks\": torch.from_numpy(self.mask),\n \"poisoned_door_state\": torch.from_numpy(self.initial_observations),\n \"expert_action\": torch.from_numpy(\n self.expert_actions[start:end].reshape(\n (self.rollout_len, self.nrollouts)\n )\n ),\n }\n\n\ndef create_poisoneddoors_offpolicy_data_iterator(\n num_doors: int, nrollouts: int, rollout_len: int, dataset_size: int,\n) -> PoisonedDoorsExpertTrajectoryIterator:\n\n return PoisonedDoorsExpertTrajectoryIterator(\n num_doors=num_doors,\n nrollouts=nrollouts,\n rollout_len=rollout_len,\n dataset_size=dataset_size,\n )\n"
] |
[
[
"numpy.zeros",
"torch.ones",
"torch.from_numpy",
"numpy.prod",
"numpy.random.randint"
]
] |
hos/pyfem1d
|
[
"8ad3528670dd4ab936206ed775b2bdad29a5c7a6"
] |
[
"pyfem1d/umat_defaults.py"
] |
[
"import numpy as np\nfrom pyfem1d.umat import Umat\n\n\nclass LinearElastic(Umat):\n parameter_values = [100]\n parameter_names = ['elastic_mod']\n\n def stress_tangent(self, dt, n, eps):\n #Get the material variables\n E = self.parameter_values[0]\n #Calculate the stress and consistent modulus\n sigl = E * eps #linear elasticity\n aal = E\n return sigl, aal\n\n\nclass NonlinearElastic(Umat):\n # parameter_values = [100, 0.03]\n parameter_values = [100, 0.0001]\n parameter_names = ['elastic_mod', 'sat_rate']\n\n def stress_tangent(self, dt, n, eps):\n E = self.parameter_values[0]\n eta = self.parameter_values[1]\n\n #Calculate the stress and consistent modulus\n sigl = E * eta * (1 - np.exp(-1 * eps / eta)) #nonlin elasticity\n aal = E * np.exp(-1 * eps / eta)\n return sigl, aal\n\n\nclass Maxwell(Umat):\n\n parameter_values = [100, 1500]\n parameter_names = ['elastic_mod', 'viscosity']\n\n def __init__(self):\n self.h1 = []\n self.hn = []\n\n def initial_cond(self, asdf):\n self.h1 = np.zeros((asdf, 1), dtype=np.float64)\n self.hn = np.zeros((asdf, 1), dtype=np.float64)\n self.nelem = asdf\n\n def update(self):\n temp = self.h1[:]\n self.h1 = self.hn[:]\n self.hn = temp[:]\n\n def stress_tangent(self, dt, n, eps):\n #Get the material variables\n E = self.parameter_values[0]\n eta = self.parameter_values[1]\n alphan = self.hn[n]\n #Calculate the stress and consistent modulus\n alpha = (alphan + eps * dt * E / eta) / (1 + dt * E / eta)\n sigl = E * (eps - alpha)\n aal = E / (1 + dt * E / eta)\n\n self.h1[n] = alpha\n\n return sigl, aal\n\n\nclass StandardLinearSolid(Umat):\n parameter_values = [1000, 4000, 20000]\n parameter_names = ['E0', 'E1', 'viscosity']\n\n def __init__(self):\n self.h1 = []\n self.hn = []\n\n def initial_cond(self, asdf):\n self.h1 = np.zeros((asdf, 1), dtype=np.float64)\n self.hn = np.zeros((asdf, 1), dtype=np.float64)\n self.nelem = asdf\n\n def update(self):\n global h1, hn\n temp = self.h1[:]\n self.h1 = self.hn[:]\n self.hn = temp[:]\n\n def stress_tangent(self, dt, n, eps):\n #Get the material variables\n E0 = self.parameter_values[0]\n E1 = self.parameter_values[1]\n eta = self.parameter_values[2]\n alphan = self.hn[n]\n\n #Calculate the stress and consistent modulus\n alpha = (alphan + eps * dt * E1 / eta) / (1 + dt * E1 / eta)\n\n sigl = E0 * eps + E1 * (eps - alpha)\n aal = E0 + E1 / (1 + dt * E1 / eta) #\n #Update history\n self.h1[n] = alpha\n\n return sigl, aal\n\n\nclass StandardLinearSolid2(Umat):\n\n parameter_values = [1000, 4000, 20000]\n parameter_names = ['E0', 'E1', 'viscosity']\n\n #step size for the five-point stencil\n fs = 0.001\n tol = 1e-10\n offset = 0.1\n\n def __init__(self):\n self.h1 = []\n self.hn = []\n\n def initial_cond(self, asdf):\n self.h1 = np.zeros((asdf, 1), dtype=np.float64)\n self.hn = np.zeros((asdf, 1), dtype=np.float64)\n self.nelem = asdf\n\n def update(self, ):\n temp = self.h1[:]\n self.h1 = self.hn[:]\n self.hn = temp[:]\n\n # Constitutive equations\n\n def sigel1(self, strain): #elastic stress\n result = strain * self.parameter_values[1]\n return result\n\n def sigel0(self, strain): #elastic stress\n result = strain * self.parameter_values[0]\n return result\n\n def epsvisdot(self, stress): # viscous strain derivative\n result = stress / self.parameter_values[2]\n return result\n\n def sig(self, eps, alpha): #total stress\n result = self.sigel0(eps) + self.sigel1(eps - alpha)\n return result\n\n # Secand iteration\n\n def nextstep(self, hist, dt, eps):\n dummy = [hist - self.offset, hist]\n #using the secant method\n #print ' ++++++++++++++++'\n while True:\n temp2 = dummy[1] - self.residual(dummy[1], hist, dt, eps) * (\n dummy[1] - dummy[0]) / (self.residual(dummy[1], hist, dt, eps)\n - self.residual(dummy[0], hist, dt,\n eps))\n dummy[0] = dummy[1]\n dummy[1] = temp2\n err = abs(dummy[0] - dummy[1])\n\n #print ' >>>>> Secant_err: %10.5e' %(err)\n if err < self.tol: break\n return dummy[1]\n\n # RESIDUAL\n def residual(self, next_, now, dt, eps):\n result = next_ - now - dt * self.epsvisdot(self.sigel1(eps - next_))\n return result\n\n # Five point stencil\n\n #def fivepoint(f,*p):\n #return (-1*f(p[0]+2*fs,p[1:])+8*f(p[0]+fs,p[1:])-8*f(p[0]-fs,p[1:])+f(p[0]-2*fs,p[1:]))/(12*fs)\n\n def stress_tangent(self, dt, n, eps):\n alphan = self.hn[n]\n\n #CALCULATE THE NEXT VISCOUS STRAIN\n alpha = self.nextstep(alphan, dt, eps)\n\n #calculate the stress and the consistent modulus\n sigl = self.sig(eps, alpha)\n #aal = fivepoint(sig,eps,alpha)\n aal = (-1 * self.sig(eps + 2 * self.fs, alpha) + 8 * self.sig(\n eps + self.fs, alpha) - 8 * self.sig(eps - self.fs, alpha) +\n self.sig(eps - 2 * self.fs, alpha)) / (12 * self.fs)\n #Update history\n self.h1[n] = alpha\n\n return sigl, aal\n"
] |
[
[
"numpy.exp",
"numpy.zeros"
]
] |
yanzihan1/IONE-Aligning-Users-across-Social-Networks-Using-Network-Embedding
|
[
"7dab01a823c9bd1933f0d0d40170bf8e1091ca27"
] |
[
"IONE_tf/IONE_tf_retrain.py"
] |
[
"from __future__ import print_function\nimport numpy as np\nimport random\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nfrom sklearn.linear_model import LogisticRegression\nfrom graph import *\nfrom src.openne.classify import Classifier, read_node_label\nfrom src.openne import double_up\nimport time\n\n\ndef parse_args():\n parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n parser.add_argument('--input', default='../../data/IONE/T.edge', #twiiter.edge\n help='Input graph file')\n parser.add_argument('--output',default='twitter.txt', #twitter的embedding\n help='Output representation file')\n parser.add_argument('--train_anchor_path', default='../../data/IONE/train',\n help='Output representation file')\n\n #==========================================================================================\n parser.add_argument('--number-walks', default=10, type=int,\n help='Number of random walks to start at each node')\n parser.add_argument('--directed', action='store_true',\n help='Treat graph as directed.')\n parser.add_argument('--walk-length', default=80, type=int,\n help='Length of the random walk started at each node')\n parser.add_argument('--workers', default=8, type=int,\n help='Number of parallel processes.')\n parser.add_argument('--representation-size', default=100, type=int,\n help='Number of latent dimensions to learn for each node.')\n parser.add_argument('--window-size', default=10, type=int,\n help='Window size of skipgram model.')\n parser.add_argument('--epochs', default=5, type=int,\n help='The training epochs of LINE and GCN')\n parser.add_argument('--p', default=1.0, type=float)\n parser.add_argument('--q', default=1.0, type=float)\n parser.add_argument('--method', default='line', help='The learning method')\n parser.add_argument('--graph-format', default='edgelist',\n help='Input graph format')\n parser.add_argument('--negative-ratio', default=5, type=int,\n help='the negative ratio of LINE')\n parser.add_argument('--weighted', action='store_true',\n help='Treat graph as weighted')\n parser.add_argument('--clf-ratio', default=0.5, type=float,\n help='The ratio of training data in the classification')\n parser.add_argument('--order', default=3, type=int,\n help='Choose the order of LINE, 1 means first order, 2 means second order, 3 means first order + second order')\n parser.add_argument('--no-auto-save', action='store_true',\n help='no save the best embeddings when training LINE')\n parser.add_argument('--dropout', default=0.5, type=float,\n help='Dropout rate (1 - keep probability)')\n parser.add_argument('--representation_size', default=100, type=int,\n help='Number of latent dimensions to learn for each node.')\n parser.add_argument('--weight-decay', type=float, default=5e-4,\n help='Weight for L2 loss on embedding matrix')\n parser.add_argument('--hidden', default=16, type=int,\n help='Number of units in hidden layer 1')\n parser.add_argument('--lr', default=0.01, type=float,\n help='learning rate')\n parser.add_argument('--network', default='F',\n help='social network')\n parser.add_argument('--label_file', default='',\n help='social network label')\n parser.add_argument('--encoder-list', default='[1000, 128]', type=str,\n help='a list of numbers of the neuron at each encoder layer, the last number is the '\n 'dimension of the output node representation')\n args = parser.parse_args()\n\n if not args.output:\n print(\"No output filename. Exit.\")\n exit(1)\n return args\n\ndef main(args,train_output_file):\n t1 = time.time()\n g = Graph()\n if args.graph_format == 'adjlist':\n g.read_adjlist(filename=args.input)\n elif args.graph_format == 'edgelist':\n g.read_edgelist(filename=args.input,\n directed=args.directed)\n\n if args.label_file and not args.no_auto_save:\n model = double_up.IONE(train_output_file,g, epoch=args.epochs, rep_size=args.representation_size, order=args.order,label_file=args.label_file, clf_ratio=args.clf_ratio,train_file=args.train_anchor_path)\n else:\n model = double_up.IONE(train_output_file,g, epoch=args.epochs,rep_size=args.representation_size, order=args.order,train_file=args.train_anchor_path)\n t2 = time.time()\n model.save_embeddings(args.output)\n if args.label_file and args.method != 'gcn':\n vectors = model.vectors\n X, Y = read_node_label(args.label_file)\n print(\"Training classifier using {:.2f}% nodes...\".format(\n args.clf_ratio*100))\n clf = Classifier(vectors=vectors, clf=LogisticRegression())\n clf.split_train_evaluate(X, Y, args.clf_ratio, seed=0)\n\ndef retrain(train_output_file):\n random.seed(123)\n np.random.seed(123)\n main(parse_args(),train_output_file)\n\n"
] |
[
[
"numpy.random.seed",
"sklearn.linear_model.LogisticRegression"
]
] |
DADADA-X/pytorch-template
|
[
"b205ad16f792fee79d553a95ecb5584b18dac946"
] |
[
"train.py"
] |
[
"import argparse\nimport collections\nimport torch\nimport numpy as np\nimport data_loader.data_loaders as module_data\nimport model.loss as module_loss\nimport model.metric as module_metric\nimport model.model as module_arch\nfrom parse_config import ConfigParser\nfrom trainer import Trainer\n\n# fix random seeds for reproducibility\nSEED = 123\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(SEED)\n\n\ndef main(config):\n logger = config.get_logger('train') # TODO 这里有一个verbosity的参数,设置的是logging级别,有012三个值,对应 warning, info, debug(default)\n\n # setup data_loader instances\n data_loader = config.init_obj('data_loader', module_data)\n valid_data_loader = data_loader.split_validation()\n\n # build model architecture, then print to console\n model = config.init_obj('arch', module_arch)\n logger.info(model) # 设置日志级别,会打印model,有名字,结构,参数个数等信息\n\n # get function handles of loss and metrics\n criterion = getattr(module_loss, config['loss'])\n metrics = [getattr(module_metric, met) for met in config['metrics']]\n\n # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler\n trainable_params = filter(lambda p: p.requires_grad, model.parameters()) # 这个tensor在前向传播过程中,就会生成一个计算图,便于进行backpropagation\n optimizer = config.init_obj('optimizer', torch.optim, trainable_params)\n\n lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler, optimizer)\n\n trainer = Trainer(model, criterion, metrics, optimizer,\n config=config,\n data_loader=data_loader,\n valid_data_loader=valid_data_loader,\n lr_scheduler=lr_scheduler)\n\n trainer.train()\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description='PyTorch Template') # 实例化对象\n args.add_argument('-c', '--config', default=None, type=str, # 添加参数\n help='config file path (default: None)')\n args.add_argument('-r', '--resume', default=None, type=str,\n help='path to latest checkpoint (default: None)')\n args.add_argument('-d', '--device', default=None, type=str,\n help='indices of GPUs to enable (default: all)')\n\n # custom cli options to modify configuration from default values given in json file.\n # 自定义命令行选项,用来修正json文件中的默认配置\n CustomArgs = collections.namedtuple('CustomArgs', 'flags type target') # flag 标识\n # TODO 这里是可以改的,会在后续添加命令行参数。打开config.json,在这里添加要修改的参数,不要在原文件中改 TODO #\n options = [\n CustomArgs(['--lr', '--learning_rate'], type=float, target='optimizer;args;lr'),\n CustomArgs(['--bs', '--batch_size'], type=int, target='data_loader;args;batch_size')\n ]\n config = ConfigParser.from_args(args, options) # options最后还是会以add_argument的形式加到args里面去\n main(config)\n"
] |
[
[
"torch.manual_seed",
"numpy.random.seed"
]
] |
anubhakabra/Calling_Out_Bluff
|
[
"0aae46449958cd26313a14bc3fcabd39e44e40f9"
] |
[
"Model4-BERT/util.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by Jiawei Liu on 2018/06/18\nimport os\nimport re\n\nimport json\nfrom openpyxl import load_workbook\nimport nltk\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow.python.estimator.estimator import Estimator\nimport yaml\nfrom spacy.lang.en.stop_words import STOP_WORDS\nimport spacy\n\nfrom bert import tokenization, modeling\nfrom bert.extract_features import model_fn_builder, convert_lst_to_features, PoolingStrategy\nfrom bert_serving.client import BertClient\n\n#from feature_extractor import FeatureExtractor\n# 加载打分系统配置, 主要是系列模型文件的路径\nwith open(\"config/sys_conf.yaml\", encoding=\"utf-8\") as conf_reader:\n sys_conf = yaml.load(conf_reader.read())\n\n# do_train: Whether to run training.\n# do_eval: Whether to run eval on the dev set.\n# do_predict: Whether to run the model in inference mode on the test set.\n# train_batch_size: Total batch size for training.\n# eval_batch_size: Total batch size for eval.\n# predict_batch_size: Total batch size for predict.\n# learning_rate: The initial learning rate for Adam.\n# num_train_epochs: Total number of training epochs to perform.\n# warmup_proportion: Proportion of training to perform linear learning rate warmup for.\n# save_checkpoints_step: How often to save the model checkpoint.\n# iterations_per_loop: How many steps to make in each estimator call.\n# prompt_id: the id of the prompt\n# train_set_prob: the Proportion examples from dataset chosen to be the train set\nwith open(\"config/train_conf.json\", \"r\") as cr:\n train_conf = json.load(cr)\n\nwith open(\"config/doc_conf.json\", \"r\") as cr:\n doc_conf = json.load(cr)\n\nspacynlp = spacy.load(\"en_core_web_sm\")\n\n# 目前所有数据集的统计数据, key代表了prompt的id, 目前1-8是asap数据集的id, 9代表雅思,\n# 之后积累数据后,会把雅思的每个prompt都用一个id表示,id当初后面的三元组(最低分,最高分,该prompt下的范文总数)\ndataset_score_range = {\n 1: (2, 12, 1783),\n 2: (1, 6, 1800),\n 3: (0, 3, 1726),\n 4: (0, 3, 1772),\n 5: (0, 4, 1805),\n 6: (0, 4, 1800),\n 7: (0, 30, 1569),\n 8: (0, 60, 723),\n 9: (0, 9, 429)\n}\n\n\nclass ScoreResource:\n \"\"\" 整个score系统需要\n\n Attributes:\n advanced_vocabulary_name:\n\n \"\"\"\n\n def __init__(self):\n self.advanced_vocabulary_name = [\"5.5\", \"5.5-6.5\", \"6.5-7.5\"]\n self.advanced_words = self.__load_ad_words(sys_conf[\"advanced_word_path\"])\n\n def __load_ad_words(self, ad_word_path):\n \"\"\" 加载高级词汇,5.5-7分的词汇等,之后会进行一定修改\n\n Args:\n ad_word_path: 存放高级词汇文件的路径\n\n Returns:\n ad_word_dict: 字典对象,key: 单词,value:单词所属的分数段\n\n \"\"\"\n assert os.path.exists(ad_word_path), \"advance words file path is not exists, please check score_conf.yaml\"\n ad_word_dict = dict()\n for dirpath, dirnames, filenames in os.walk(ad_word_path):\n for filename in filenames:\n if filename[-4:] == 'xlsx' and filename[:-5] in self.advanced_vocabulary_name:\n try:\n filepath = os.path.join(dirpath, filename)\n word_file = load_workbook(filepath)\n sheet = word_file[word_file.sheetnames[0]]\n rows = sheet.rows\n for row in rows:\n word = [col.value for col in row][0]\n if not ad_word_dict.__contains__(word):\n ad_word_dict[word] = filename[:-5]\n else:\n if float(filename[:-5].split('-')[-1]) > float(ad_word_dict[word].split('-')[-1]):\n ad_word_dict[word] = filename[:-5]\n except:\n raise ValueError(\"loading words .xlsx file error.\")\n return ad_word_dict\n\n\nsr = ScoreResource()\n\n\nclass Document:\n \"\"\" 输入给score系统的打分的对象,会根据gec传入的结果计算一篇文章的handmarked的特征\n\n Attributes:\n __title: 作文题目, 用spacy包装的对象\n __essay: 作文正文, list对象,其中每个元素都是对应sentence的spacy封装的对象\n __gec_output: gec的结果\n\n \"\"\"\n\n def __init__(self, gec_output):\n \"\"\"\n\n Args:\n gec_output: 由json.loads起来的对象,fupugec-server项目中传过来的对象。\n\n \"\"\"\n# self.__title = spacynlp(gec_output[\"title\"].lower())\n self.__essay = [spacynlp(gec_output)]\n# self.__essay = [spacynlp(gec_output[\"sentence_\" + str(index)][\"orig_sent\"].lower()) for index in range(int(gec_output[\"sent_nums\"]))]\n self.__gec_output = gec_output\n self.features = self.__features()\n self.__ad_vocab = self.__advanced_vocab()\n# self.doc_result = self.__doc_result()\n\n def __doc_info(self):\n \"\"\" 文章总体的一些指标,\n\n Returns: list对象, 包括[总词数,总字符数,平均词长,词汇数,词长方差,介词数,句数,平均句长,句长方差]\n\n \"\"\"\n process_sent = lambda sent: [(token.text, token.lemma_) for token in sent if\n not (token.is_punct or token.is_space)]\n doc_token_and_lemma = []\n doc_sent_word_leng = []\n # word level\n self.__doc_num_short_sents = 0\n self.__doc_num_long_sents = 0\n for sent in self.__essay:\n temp_process_sent = process_sent(sent)\n if len(temp_process_sent) <= doc_conf[\"num_short_sentence_word\"]:\n self.__doc_num_short_sents += 1\n if len(temp_process_sent) >= doc_conf[\"num_long_sentence_word\"]:\n self.__doc_num_long_sents += 1\n doc_token_and_lemma.extend(temp_process_sent)\n doc_sent_word_leng.append(len(temp_process_sent))\n\n self.__doc_num_words = len(doc_token_and_lemma) # 文章单词的数目\n doc_words_char_leng = [len(item[0]) for item in doc_token_and_lemma] # 文章单词的平均长度\n doc_num_characters = sum(doc_words_char_leng) # 文章总得character数目\n doc_average_word_leng = np.mean(doc_words_char_leng)\n doc_var_word_leng = np.var(doc_words_char_leng)\n\n self.__doc_vocab = set([item[1] for item in doc_token_and_lemma]) - STOP_WORDS\n self.__doc_num_vocab = len(self.__doc_vocab)\n\n prepositions = []\n preposition_detect = lambda sent: [token for token in sent if token.dep_ == \"prep\"]\n for sent in self.__essay:\n temp_prepostion = preposition_detect(sent)\n prepositions.extend(temp_prepostion)\n doc_num_prepositions = len(prepositions)\n\n # sent level\n self.__doc_num_sents = len(doc_sent_word_leng) # 句子数目\n doc_average_sent_leng = np.mean(doc_sent_word_leng) # 句子平均单词数目\n doc_var_sent_leng = np.var(doc_sent_word_leng) # 句子单词数的方差\n# clause_sent_num = 0\n# for sentence_index in range(int(self.__gec_output['sent_nums'])):\n# sentence = self.__gec_output[\"sentence_\" + str(sentence_index)]\n # -1 表示不是从句, 7表示there be句型\n# if not sentence[\"sent_type\"] in [-1, 7]:\n# clause_sent_num += 1\n\n # doc level\n# self.__doc_num_paras = self.__gec_output[\"para_nums\"]\n\n return [self.__doc_num_words, doc_num_characters, doc_average_word_leng, self.__doc_num_vocab,\n doc_var_word_leng,\n doc_num_prepositions,\n self.__doc_num_sents, doc_average_sent_leng, doc_var_sent_leng]\n\n def __error_info(self):\n \"\"\" 文章的错误信息,主要是错词率和错句率,这里没有使用具体数量,因为长文章倾向于错词数肯定要大于短文章。\n\n Returns: list对象,[ 错词率,错句率]\n\n \"\"\"\n self.__err_word = list()\n doc_num_err = 0\n self.__doc_num_err_sentence = 0\n err_sentence_sign = False\n\n for sentence_index in range(int(self.__gec_output[\"sent_nums\"])):\n sentence = self.__gec_output[\"sentence_\" + str(sentence_index)]\n if sentence[\"err_num\"] == 0:\n continue\n else:\n for edit_index in range(sentence[\"err_num\"]):\n edit = sentence[\"edit_\" + str(edit_index)]\n if edit[\"err_type\"][2:] == \"SPELL\":\n self.__err_word.append({\n \"err_word\": sentence[\"orig_sent\"].strip().split()[edit[\"start_err\"]],\n \"corr_word\": edit[\"corr_str\"]\n })\n if not edit[\"err_type\"][2:] in [\"ORTH\", \"PUNCT\"]:\n doc_num_err += 1\n err_sentence_sign = True\n if err_sentence_sign:\n self.__doc_num_err_sentence += 1\n err_sentence_sign = False\n return [doc_num_err / self.__doc_num_words, self.__doc_num_err_sentence / self.__doc_num_sents]\n\n def __advanced_vocab(self):\n \"\"\" 统计文章高级词汇\n\n Returns: 文章高级词汇字典,key->分数段,value->单词列表\n\n \"\"\"\n ad_vocab = dict()\n for key in sr.advanced_vocabulary_name:\n ad_vocab[key] = list()\n for word in self.__doc_vocab:\n if sr.advanced_words.__contains__(word):\n ad_vocab[sr.advanced_words[word]].append(word)\n return ad_vocab\n\n def __features(self):\n \"\"\" 文章所有的\n\n Returns: 一篇文章的handmarked的特征集合\n\n \"\"\"\n feature_list = self.__doc_info()\n# feature_list.extend(self.__error_info())\n return feature_list\n\n def __doc_result(self):\n \"\"\" 生成需要输出报告的文章属性字典\n\n Returns: dict, document需要输出的属性\n\n \"\"\"\n result = dict()\n result[\"num_word\"] = self.__doc_num_words\n result[\"num_sentence\"] = self.__doc_num_sents\n result['num_short_sentence'] = self.__doc_num_short_sents # 短句数\n result['num_long_sentence'] = self.__doc_num_long_sents # 长句数\n result['num_paragraph'] = self.__doc_num_paras # 段落数\n\n result[\"err_word\"] = self.__err_word\n result[\"num_err_word\"] = len(self.__err_word)\n result['num_err_sentence'] = self.__doc_num_err_sentence # 出现错误的句数\n\n result[\n 'ratio_short_sentence'] = self.__doc_num_short_sents * 1.0 / self.__doc_num_sents if self.__doc_num_sents != 0 else 0 # 短句占比\n result[\n 'ratio_long_sentence'] = self.__doc_num_long_sents * 1.0 / self.__doc_num_sents if self.__doc_num_sents != 0 else 0 # 长句占比\n result['err_rate_word'] = len(\n self.__err_word) * 1.0 / self.__doc_num_vocab if self.__doc_num_vocab != 0 else 0 # 错词占比\n result[\n 'err_rate_sentence'] = self.__doc_num_err_sentence * 1.0 / self.__doc_num_sents if self.__doc_num_sents != 0 else 0 # 错句占比\n\n result['word_5.5'] = self.__ad_vocab['5.5'] # 文中属于5.5分的词汇列表\n result['word_5.5-6.5'] = self.__ad_vocab['5.5-6.5'] # 文中属于5.5-6.5分的词汇列表\n result['word_6.5-7.5'] = self.__ad_vocab['6.5-7.5'] # 文中属于5.5分的词汇列表\n\n return result\n\n\nclass BertWorker:\n def __init__(self):\n # the pooling layer index of bert-original model\n self.pooling_layer = [-2]\n # the pooling_strategy of bert-original model\n self.pooling_strategy = PoolingStrategy.REDUCE_MEAN\n # \"The maximum total input sequence length after WordPiece tokenization. \"\n # \"Sequences longer than this will be truncated, and sequences shorter \"\n # \"than this will be padded.\"\n self.max_seq_len = 128\n\n self.bert_model_dir = sys_conf[\"bert_dir\"]\n self.config_fp = os.path.join(self.bert_model_dir, \"bert_config.json\")\n self.ckpt_fp = os.path.join(self.bert_model_dir, \"bert_model.ckpt\")\n self.vocab_fp = os.path.join(self.bert_model_dir, \"vocab.txt\")\n self.tokenizer = tokenization.FullTokenizer(vocab_file=self.vocab_fp)\n self.model_fn = model_fn_builder(\n bert_config=modeling.BertConfig.from_json_file(self.config_fp),\n init_checkpoint=self.ckpt_fp,\n pooling_strategy=self.pooling_strategy,\n pooling_layer=self.pooling_layer\n )\n self.estimator = Estimator(self.model_fn)\n\n def input_fn_builder_file_path(self, file_path):\n def gen_asap_article():\n dataset = pd.read_csv(file_path, encoding='utf-8')\n print(dataset.head())\n articles = dataset[\"essay\"]\n articles_set = dataset[\"essay_set\"]\n domain1_score = dataset[\"domain1_score\"]\n articles_id = dataset[\"essay_id\"]\n for i in range(len(articles)):\n doc = articles[i]\n sentences = sentence_tokenize(doc)\n tmp_f = list(convert_lst_to_features(sentences, self.max_seq_len, self.tokenizer))\n yield {\n \"input_ids\": [f.input_ids for f in tmp_f],\n \"input_mask\": [f.input_mask for f in tmp_f],\n \"input_type_ids\": [f.input_type_ids for f in tmp_f],\n \"article_set\": articles_set[i],\n \"domain1_score\": float(domain1_score[i]),\n \"article_id\": articles_id[i]\n }\n\n def input_fn():\n return (tf.data.Dataset.from_generator(\n gen_asap_article,\n output_types={\n \"input_ids\": tf.int32,\n \"input_mask\": tf.int32,\n \"input_type_ids\": tf.int32,\n \"article_set\": tf.int32,\n \"domain1_score\": tf.float32,\n \"article_id\": tf.int32\n },\n output_shapes={\n \"input_ids\": (None, self.max_seq_len),\n \"input_mask\": (None, self.max_seq_len),\n \"input_type_ids\": (None, self.max_seq_len),\n \"article_set\": [],\n \"domain1_score\": [],\n \"article_id\": []\n }\n ))\n\n return input_fn\n\n def inference_from_path_with_permfile(self, file_path):\n print(\"FILE PATH\", file_path)\n input_fn = self.input_fn_builder_file_path(file_path)\n for r in self.estimator.predict(input_fn, yield_single_examples=False):\n temp_sample = {\"doc_encodes\": r[\"encodes\"],\n \"article_set\": r[\"article_set\"],\n \"domain1_score\": r[\"domain1_score\"],\n \"article_id\": r[\"article_id\"]}\n yield temp_sample\n\n def input_fn_builder_eilts_path(self, essay_path, score_path):\n def gen_eilts_article():\n score = dict()\n with open(score_path, \"r\", encoding=\"utf-8\") as sr:\n for line in sr:\n score[line.split()[0]] = float(line.split()[1])\n\n for dirpath, dirnames, filenames in os.walk(essay_path):\n if filenames:\n for filename in filenames:\n filepath = os.path.join(dirpath, filename)\n with open(filepath, \"r\") as dr:\n lines = []\n for line in dr:\n if line.strip():\n lines.append(line.strip())\n title_and_doc = \" \".join(lines)\n title = title_and_doc.split(\"\\t\", 1)[0].strip()\n doc = title_and_doc.split(\"\\t\", 1)[1].strip()\n sentences = sentence_tokenize(doc)\n tmp_f = list(convert_lst_to_features(sentences, self.max_seq_len, self.tokenizer))\n yield {\n \"input_ids\": [f.input_ids for f in tmp_f],\n \"input_mask\": [f.input_mask for f in tmp_f],\n \"input_type_ids\": [f.input_type_ids for f in tmp_f],\n \"article_set\": 9,\n \"domain1_score\": float(score[filename]),\n \"article_id\": int(filename)\n }\n\n def input_fn():\n return (tf.data.Dataset.from_generator(\n gen_eilts_article,\n output_types={\n \"input_ids\": tf.int32,\n \"input_mask\": tf.int32,\n \"input_type_ids\": tf.int32,\n \"article_set\": tf.int32,\n \"domain1_score\": tf.float32,\n \"article_id\": tf.int32\n },\n output_shapes={\n \"input_ids\": (None, self.max_seq_len),\n \"input_mask\": (None, self.max_seq_len),\n \"input_type_ids\": (None, self.max_seq_len),\n \"article_set\": [],\n \"domain1_score\": [],\n \"article_id\": []\n }\n ))\n\n return input_fn\n\n def inference_from_eitls_path(self, essay_path, score_path):\n input_fn = self.input_fn_builder_eilts_path(essay_path, score_path)\n for r in self.estimator.predict(input_fn, yield_single_examples=False):\n temp_sample = {\"doc_encodes\": r[\"encodes\"],\n \"article_set\": r[\"article_set\"],\n \"domain1_score\": r[\"domain1_score\"],\n \"article_id\": r[\"article_id\"]}\n yield temp_sample\n\n def input_fn_builder_client(self):\n\n pass\n\n def inference_from_client(self):\n pass\n\n\ndef read_dataset_into_tfrecord(dataset_path, bw: BertWorker):\n dataset_positive_path = os.path.join(dataset_path, \"prompt.csv\")\n print(\"DSS\", dataset_positive_path)\n tf_record_path = os.path.join(dataset_path, \"asap_dataset_prompt.tfrecord\")\n\n # TODO(Jiawei):提取公共代码\n def flatten(object):\n for item in object:\n if isinstance(item, (list, tuple, set)):\n yield from flatten(item)\n else:\n yield item\n \n with tf.python_io.TFRecordWriter(tf_record_path) as tfrecord_writer:\n for i, item in enumerate(bw.inference_from_path_with_permfile(dataset_positive_path)):\n if i % 100 == 0:\n tf.logging.info(\"process {} docs\".format(i))\n features = {}\n value=item[\"doc_encodes\"].reshape(-1)\n value = np.nan_to_num(value.flatten())\n features[\"doc_encodes\"] = tf.train.Feature(float_list=tf.train.FloatList(value=value))\n \n value=item[\"doc_encodes\"].shape\n flattened = list(flatten(value))\n value = np.nan_to_num(flattened)\n features[\"doc_encodes_shape\"] = tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n \n value = [item[\"article_set\"]]\n flattened = list(flatten(value))\n value = np.nan_to_num(flattened)\n# value = np.nan_to_num(value.flatten())\n features[\"article_set\"] = tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n \n value = [item[\"article_id\"]]\n flattened = list(flatten(value))\n value = np.nan_to_num(flattened)\n# value = np.nan_to_num(value.flatten())\n features[\"article_id\"] = tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n \n value = [item[\"domain1_score\"]]\n flattened = list(flatten(value))\n value = np.nan_to_num(flattened)\n# value = np.nan_to_num(value.flatten())\n features[\"domain1_score\"] = tf.train.Feature(float_list=tf.train.FloatList(value=value))\n \n value = [item[\"doc_encodes\"].shape[0]]\n flattened = list(flatten(value))\n value = np.nan_to_num(flattened)\n# value = np.nan_to_num(value.flatten())\n features[\"doc_sent_num\"] = tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n tf_features = tf.train.Features(feature=features)\n tf_example = tf.train.Example(features=tf_features)\n tfrecord_writer.write(tf_example.SerializeToString())\n\n\ndef read_ielts_into_tfrecord(dataset_path, bw: BertWorker):\n essay_path = os.path.join(dataset_path, \"essay\")\n score_path = os.path.join(dataset_path, \"score\")\n tf_record_path = os.path.join(dataset_path, \"ielts.tfrecord\")\n\n with tf.python_io.TFRecordWriter(tf_record_path) as tfrecord_writer:\n for i, item in enumerate(bw.inference_from_eitls_path(essay_path, score_path)):\n if i % 100 == 0:\n tf.logging.info(\"process {} docs\".format(i))\n features = {}\n features[\"doc_encodes\"] = tf.train.Feature(\n float_list=tf.train.FloatList(value=item[\"doc_encodes\"].reshape(-1)))\n features[\"doc_encodes_shape\"] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=item[\"doc_encodes\"].shape))\n features[\"article_set\"] = tf.train.Feature(int64_list=tf.train.Int64List(value=[item[\"article_set\"]]))\n features[\"article_id\"] = tf.train.Feature(int64_list=tf.train.Int64List(value=[item[\"article_id\"]]))\n features[\"domain1_score\"] = tf.train.Feature(float_list=tf.train.FloatList(value=[item[\"domain1_score\"]]))\n features[\"doc_sent_num\"] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[item[\"doc_encodes\"].shape[0]]))\n tf_features = tf.train.Features(feature=features)\n tf_example = tf.train.Example(features=tf_features)\n tfrecord_writer.write(tf_example.SerializeToString())\n\n\ndef create_initializer(initializer_range=0.02):\n \"\"\" 创建tensorflow初始化器\n\n Args:\n initializer_range: 初始化的范围设置\n\n Returns:\n\n \"\"\"\n return tf.truncated_normal_initializer(stddev=initializer_range)\n\n\ndef create_rnn_cell(hidden_size, dropout_prob, layers_num, isbidirectional, is_training):\n \"\"\" 创建rnn cell, 包括多层和单双向的控制。\n\n Args:\n hidden_size: rnn cell 隐层的宽度\n dropout_prob: dropout的比例\n layers_num: rnn 网络的层数\n isbidirectional: 是否使用双向的cell\n is_training: 是否为训练时间段,训练时间段,cell需要使用dropout包裹\n\n Returns: cell实例\n\n \"\"\"\n\n def single_rnn_cell():\n single_cell = tf.nn.rnn_cell.LSTMCell(hidden_size)\n if is_training:\n single_cell = tf.nn.rnn_cell.DropoutWrapper(single_cell, output_keep_prob=1 - dropout_prob)\n return single_cell\n\n if isbidirectional:\n fw_cell = tf.nn.rnn_cell.MultiRNNCell([single_rnn_cell() for _ in range(layers_num)])\n bw_cell = tf.nn.rnn_cell.MultiRNNCell([single_rnn_cell() for _ in range(layers_num)])\n cell = [fw_cell, bw_cell]\n else:\n fw_cell = tf.nn.rnn_cell.MultiRNNCell([single_rnn_cell() for _ in range(layers_num)])\n cell = fw_cell\n return cell\n\n\ndef input_fn_from_client(bw: BertWorker):\n def gen():\n for item in bw.inference_from_client():\n temp_sample = {\n \"doc_encodes\": item[\"doc_encodes\"],\n \"article_set\": item[\"article_set\"],\n \"domain1_score\": item[\"domain1_score\"],\n \"article_id\": item[\"article_id\"],\n \"doc_sent_num\": item[\"doc_encodes\"].shape[0]\n }\n yield temp_sample\n\n def input_fn():\n return (tf.data.Dataset.from_generator(\n gen,\n output_types={\n \"doc_encodes\": tf.float32,\n \"article_set\": tf.int64,\n \"domain1_score\": tf.float32,\n \"article_id\": tf.int64,\n \"doc_sent_num\": tf.int64\n },\n output_shapes={\n \"doc_encodes\": [],\n \"article_set\": [],\n \"domain1_score\": [None, int(sys_conf[\"bert_emb_dim\"])],\n \"article_id\": [],\n \"doc_sent_num\": []\n }\n ))\n\n return input_fn\n\n\ndef serving_input_receiver_fn():\n \"\"\" tensorflow serving的一个输入流函数\n\n Returns: 略过,自己看\n\n \"\"\"\n features = {\n \"doc_encodes\": tf.placeholder(tf.float32, [1, None, int(sys_conf[\"bert_emb_dim\"])]),\n \"prompt_encodes\": tf.placeholder(tf.float32, [1, None, int(sys_conf[\"bert_emb_dim\"])]),\n \"article_set\": tf.placeholder(tf.int64, [None]),\n \"domain1_score\": tf.placeholder(tf.float32, [None]),\n \"article_id\": tf.placeholder(tf.int64, [None]),\n \"doc_sent_num\": tf.placeholder(tf.int64, [None])\n }\n return tf.estimator.export.build_raw_serving_input_receiver_fn(features)\n\n\ndef input_fn_from_tfrecord(tfrecord_path, batch_size, is_training, element_ids):\n \"\"\" 以tfrecord为输入,构建该模型需要的input的io\n\n Args:\n tfrecord_path: tfrecord文件的路径\n batch_size: 模型使用的batch_size,\n is_training: boolean类型标致是否处于训练阶段\n element_ids: 从tfrecord按照element_ids取出对应的所有元素\n\n Returns: input_fn的handle\n\n \"\"\"\n print(\"TFRECORD_PATH\", tfrecord_path)\n prompts_embedding_path = os.path.join(sys_conf[\"data_dir\"], \"prompt.npz\")\n print(\"PROMPTS_EMBEDDING_PATH\", prompts_embedding_path)\n if os.path.exists(prompts_embedding_path):\n prompts_embedding = np.load(prompts_embedding_path, allow_pickle=True)[\"features\"][()]\n else:\n raise ValueError(\"prompts embedding path is not exist, please check\")\n\n features_map = {\n \"doc_encodes\": tf.VarLenFeature(dtype=tf.float32),\n \"doc_encodes_shape\": tf.FixedLenFeature(shape=(2,), dtype=tf.int64),\n \"article_set\": tf.FixedLenFeature(shape=(), dtype=tf.int64),\n \"doc_sent_num\": tf.FixedLenFeature(shape=(), dtype=tf.int64),\n \"domain1_score\": tf.FixedLenFeature(shape=(), dtype=tf.float32),\n \"article_id\": tf.FixedLenFeature(shape=(), dtype=tf.int64)\n }\n\n def _decode_tfserilized(record, feature_map, shuffle=False):\n example = tf.parse_single_example(record, feature_map)\n temp_example = dict()\n temp_example[\"doc_encodes\"] = tf.sparse_tensor_to_dense(example[\"doc_encodes\"])\n temp_example[\"doc_encodes\"] = tf.reshape(temp_example[\"doc_encodes\"], example[\"doc_encodes_shape\"])\n temp_example[\"article_set\"] = tf.to_int32(example[\"article_set\"])\n temp_example[\"doc_sent_num\"] = tf.to_int32(example[\"doc_sent_num\"])\n temp_example[\"domain1_score\"] = example[\"domain1_score\"]\n temp_example[\"article_id\"] = tf.to_int32(example[\"article_id\"])\n # 只在计算prompt-relevant score的时候有用, 因为在训练prompt-relevant模型的时候是按prompt来训练的,所以把所有sample(包含正负样本)的prompt_encode都赋值一样的\n temp_example[\"prompt_encodes\"] = tf.convert_to_tensor(prompts_embedding[train_conf[\"prompt_id\"]])\n if shuffle:\n if is_training:\n temp_example[\"doc_encodes\"] = tf.random.shuffle(temp_example[\"doc_encodes\"])\n temp_example[\"article_id\"] = temp_example[\"article_id\"] + 100000\n else:\n temp_example[\"doc_encodes\"] = tf.random.shuffle(temp_example[\"doc_encodes\"], seed=1)\n temp_example[\"article_id\"] = temp_example[\"article_id\"] + 100000\n return temp_example\n\n def input_fn():\n ds = tf.data.TFRecordDataset(tfrecord_path)\n ds1 = ds.map(lambda record: _decode_tfserilized(record, features_map))\n ds2 = ds.map(lambda record: _decode_tfserilized(record, features_map, True))\n ds = ds1.concatenate(ds2)\n ds = ds.filter(predicate=lambda record: tf.math.greater(\n tf.reduce_sum(tf.cast(tf.equal(tf.to_int32(element_ids), record[\"article_id\"]), tf.int32)), 0))\n if is_training:\n ds = ds.repeat()\n ds = ds.shuffle(buffer_size=2000)\n ds = ds.padded_batch(batch_size=batch_size,\n padded_shapes={\n \"article_set\": [],\n \"doc_encodes\": [None, int(sys_conf[\"bert_emb_dim\"])],\n \"doc_sent_num\": [],\n \"domain1_score\": [],\n \"article_id\": [],\n \"prompt_encodes\": [None, int(sys_conf[\"bert_emb_dim\"])]\n },\n drop_remainder=False)\n print(ds)\n return ds\n\n return input_fn\n\n\ndef read_adv_sample():\n # asap数据集的相关参数,配置,这里做全局变量使用,方便下面三个score predictor调用\n adv_csv_file_path = os.path.join(\"AES_FinalTestcases/prompt1/contractions_aes_prompt1.csv\")\n print(\"ADV_CSV_FILE_PATH\", adv_csv_file_path)\n if not os.path.exists(adv_csv_file_path):\n raise ValueError(\"adv_file_path is invalid.\")\n adv_dataset = pd.read_csv(adv_csv_file_path, encoding='utf-8')\n adv_dataset.insert(0, 'ID', range(1, 1 + len(adv_dataset)))\n adv_dataset.insert(0, 'set', 1)\n print(adv_dataset.head())\n# asap_dataset = pd.read_csv(asap_csv_file_path, encoding='ISO-8859-1')\n articles_adv_id = list(adv_dataset[\"ID\"])\n articles_adv_set = list(adv_dataset[\"set\"])\n# domain1_score = asap_dataset[\"domain1_score\"]\n# handmark_scores = dict(zip(articles_id, domain1_score))\n set_adv_ids = {\n 1: [],\n 2: [],\n 3: [],\n 4: [],\n 5: [],\n 6: [],\n 7: [],\n 8: []\n }\n for i in range(len(articles_adv_id)):\n set_adv_ids[articles_adv_set[i]].append(articles_adv_id[i])\n \n# return articles_id, articles_set, domain1_score\n return articles_adv_id, set_adv_ids\n\ndef read_asap_dataset():\n # asap数据集的相关参数,配置,这里做全局变量使用,方便下面三个score predictor调用\n asap_csv_file_path = os.path.join(sys_conf[\"data_dir\"], \"prompt8.csv\")\n print(\"ASAP_CSV_FILE_PATH\", asap_csv_file_path)\n if not os.path.exists(asap_csv_file_path):\n raise ValueError(\"asap_file_path is invalid.\")\n asap_dataset = pd.read_csv(asap_csv_file_path, encoding='utf-8')\n# asap_dataset = pd.read_csv(asap_csv_file_path, encoding='ISO-8859-1')\n articles_id = list(asap_dataset[\"essay_id\"])\n articles_set = list(asap_dataset[\"essay_set\"])\n domain1_score = asap_dataset[\"domain1_score\"]\n handmark_scores = dict(zip(articles_id, domain1_score))\n set_ids = {\n 1: [],\n 2: [],\n 3: [],\n 4: [],\n 5: [],\n 6: [],\n 7: [],\n 8: []\n }\n for i in range(len(articles_id)):\n set_ids[articles_set[i]].append(articles_id[i])\n \n# return articles_id, articles_set, domain1_score\n return articles_id, articles_set, set_ids, handmark_scores\n\n\ndef generate_xgboost_train_set(articles_id,\n articles_set,\n domain1_scores,\n train_set_gec_result_path,\n train_set_saved_path):\n \"\"\"Generate xgboost training data set based on the result of the training set gec\n\n Args:\n articles_id: list of training set article ids\n articles_set: list of training set articles\n domain1_scores: the manually labeled scores of the articles in the training set, because the asap dataset calls this score domain1_scores\n train_set_gec_result_path: The path of the result file generated by the gec engine in the training set article, the file format is a line corresponding to the gec result of an article.\n train_set_saved_path: save as npz file type, save path of npz file\n\n Returns: None.\n\n \"\"\"\n dataset_gec_path = train_set_gec_result_path\n dataset_xgboost_train_file = train_set_saved_path\n\n # normalized_scores\n handmark_scores = dict(zip(articles_id, domain1_scores))\n\n # normalized_orgin_scores\n handmark_normalized_scores = {}\n for key, value in handmark_scores.items():\n article_set_id = articles_set[articles_id.index(key)]\n min_value = dataset_score_range[article_set_id][0]\n max_value = dataset_score_range[article_set_id][1]\n normalize_value = (value - min_value) / (max_value - min_value)\n handmark_normalized_scores[key] = normalize_value\n\n features = {}\n \n# gec_output= []\n# .insert(0, 'ID', range(1, 1 + len(adv_dataset)))\n count = 0\n with open(dataset_gec_path, encoding=\"utf-8\") as fr:\n for line in fr:\n count +=1\n# print(line[0], line[3])\n# id = line[0]\n# print(count, line)\n# line_split = line.split(\",\")\n# print(line_split)\n id = count\n print(\"ID\", id)\n# print(line_split[0], line_split[2])\n# id = int(line_split[0].strip())\n gec_output = line.strip()\n# gec_output = gec_output.encode(\"utf-8\")\n# print(gec_output)\n #feats = FeatureExtractor()\n #feats.initialize_dictionaries(gec_output)\n #features[id] = feats.gen_feats(gec_output) \n features[id] = Document(gec_output).features\n# gec_output = []\n# print(\"DONE ID: \", id)\n # TODO(Jiawei): may have bugs if basic_scores的key和features的key不一样\n# for key, value in handmark_normalized_scores.items():\n# if key in features:\n# features[key].append(value)\n\n np.savez(dataset_xgboost_train_file, features=features)\n\n\ndef sentence_tokenize(documents):\n \"\"\"分句函数,将一整段文本进行分句\n\n Args:\n documents: 待分句的document, string类型\n\n Returns: 句子组成的list\n\n \"\"\"\n # 查看\n locations = [-1]\n locations.extend([item.start() for item in re.finditer(r'[\\.\\?\\!](?=[^ \\W\\d])', documents)])\n locations.append(len(documents))\n sentences = [documents[locations[i] + 1:locations[i + 1] + 1] for i in range(len(locations) - 1)]\n pre_split_documents = \" \".join(sentences)\n\n sentences = nltk.sent_tokenize(pre_split_documents)\n return sentences\n\n\nif __name__ == \"__main__\":\n # 使用bert对prompt进行encode\n# bc = BertClient(check_length=False)\n# result = {}\n# prompt_npz = \"dataset/prompt8.npz\"\n# with open(\"dataset/prompt8.csv\", \"r\", encoding=\"ISO-8859-1\") as reader:\n# for i, line in enumerate(reader):\n# sentences = sentence_tokenize(line.strip())\n# encodes = bc.encode(sentences)\n# result[i + 1] = encodes\n# np.savez(prompt_npz, features=result)\n# bw = BertWorker()\n# bw.inference_from_path_with_permfile()\n# read_dataset_into_tfrecord(\"dataset/\", bw)\n \n# articles_id, articles_set, domain1_score = read_asap_dataset()\n# generate_xgboost_train_set(articles_id, articles_set, domain1_score, \"AES_FinalTestcases/prompt1/contractions_aes_prompt1.csv\", \"dataset/asap_xgboost_adv.npz\")\n print(\"Done\")\n"
] |
[
[
"tensorflow.data.TFRecordDataset",
"tensorflow.train.Features",
"tensorflow.train.Int64List",
"numpy.load",
"numpy.mean",
"tensorflow.reshape",
"pandas.read_csv",
"tensorflow.parse_single_example",
"tensorflow.random.shuffle",
"tensorflow.train.FloatList",
"numpy.nan_to_num",
"tensorflow.FixedLenFeature",
"tensorflow.train.Example",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.sparse_tensor_to_dense",
"tensorflow.estimator.export.build_raw_serving_input_receiver_fn",
"tensorflow.placeholder",
"tensorflow.to_int32",
"tensorflow.python.estimator.estimator.Estimator",
"tensorflow.convert_to_tensor",
"tensorflow.VarLenFeature",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.data.Dataset.from_generator",
"numpy.savez",
"tensorflow.truncated_normal_initializer",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"numpy.var"
]
] |
eclee25/flu-SDI-exploratory-age
|
[
"2f5a4d97b84d2116e179e85fe334edf4556aa946"
] |
[
"scripts/create_fluseverity_figs/F2_OR_time.py"
] |
[
"#!/usr/bin/python\n\n##############################################\n###Python template\n###Author: Elizabeth Lee\n###Date: 4/26/14\n###Function: OR of incidence in children to incidence in adults vs. week number. Incidence in children and adults is normalized by the size of the child and adult populations in the second calendar year of the flu season.\n\n###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop.csv, My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv\n\n###Command Line: python F2_OR_time.py\n##############################################\n\n### notes ###\n# Incidence per 100,000 is normalized by total population by second calendar year of the flu season\n\n### packages/modules ###\nimport csv\nimport matplotlib.pyplot as plt\n\n## local modules ##\nimport functions as fxn\n\n### data structures ###\n### functions ###\n### data files ###\nincidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')\nincid = csv.reader(incidin, delimiter=',')\npopin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')\npop = csv.reader(popin, delimiter=',')\n\n### called/local plotting parameters ###\nps = fxn.pseasons\nfw = fxn.gp_fluweeks\nsl = fxn.gp_seasonlabels\ncolvec = fxn.gp_colors\nwklab = fxn.gp_weeklabels\nfs = 24\nfssml = 16\n\n### program ###\n\n# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR\nd_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)\nd_zOR = fxn.week_zOR_processing(d_wk, d_OR)\n# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]\nd_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)\n\n# plot values\nfor s in ps:\n\tplt.plot(xrange(len(wklab)), d_OR53ls[s], marker = 'o', color = colvec[s-2], label = sl[s-2], linewidth = 2)\nplt.xlim([0, len(wklab)-1])\nplt.xticks(range(len(wklab))[::5], wklab[::5]) \nplt.ylim([0, 12])\nplt.xlabel('Week Number', fontsize=fs)\nplt.ylabel('OR, child:adult', fontsize=fs)\nplt.legend(loc='upper left')\nplt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/F2/OR_time.png', transparent=False, bbox_inches='tight', pad_inches=0)\nplt.close()\n\n\n\n\n"
] |
[
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ylabel"
]
] |
felixquinton1/TransBTS
|
[
"6992c902413ba15f40ebfe9f6d5d0e3594051033"
] |
[
"data/preprocess.py"
] |
[
"import pickle\r\nimport os\r\nimport numpy as np\r\nimport nibabel as nib\r\n\r\nmodalities = ('t1')\r\n\r\n# train\r\ntrain_set = {\r\n 'root': '/home/felix/Bureau/TransBTS/data/Train/',\r\n 'image': 'image',\r\n 'label': 'label',\r\n 'flist': 'train.txt',\r\n 'has_label': True\r\n }\r\n\r\n# test/validation data\r\nvalid_set = {\r\n 'root': '/home/felix/Bureau/TransBTS/data/Valid/',\r\n 'image': 'image',\r\n 'label': 'label',\r\n 'flist': 'valid.txt',\r\n 'has_label': True\r\n }\r\n\r\ntest_set = {\r\n 'root': '/home/felix/Bureau/TransBTS/data/Test/',\r\n 'image': 'image',\r\n 'label': 'label',\r\n 'flist': 'test.txt',\r\n 'has_label': True\r\n }\r\n\r\n\r\ndef nib_load(file_name):\r\n if not os.path.exists(file_name):\r\n print('Invalid file name, can not find the file!')\r\n\r\n proxy = nib.load(file_name)\r\n data = proxy.get_fdata()\r\n proxy.uncache()\r\n return data\r\n\r\n\r\ndef process_i16(path, has_label=True):\r\n \"\"\" Save the original 3D MRI images with dtype=int16.\r\n Noted that no normalization is used! \"\"\"\r\n label = np.array(nib_load(path + 'seg.nii.gz'), dtype='uint8', order='C')\r\n\r\n images = np.stack([\r\n np.array(nib_load(path + modal + '.nii.gz'), dtype='int16', order='C')\r\n for modal in modalities], -1)# [240,240,155]\r\n\r\n output = path + 'data_i16.pkl'\r\n\r\n with open(output, 'wb') as f:\r\n print(output)\r\n print(images.shape, type(images), label.shape, type(label)) # (240,240,155,4) , (240,240,155)\r\n pickle.dump((images, label), f)\r\n\r\n if not has_label:\r\n return\r\n\r\n\r\ndef process_f32b0(path, name, has_label=True):\r\n \"\"\" Save the data with dtype=float32.\r\n z-score is used but keep the background with zero! \"\"\"\r\n if has_label:\r\n label = np.array(nib_load(path + 'label/lb' + name[2:]), dtype='uint8', order='C')\r\n images = np.array(nib_load(path + 'image/' + name), dtype='float32', order='C') # [240,240,155]\r\n images = np.expand_dims(images, 3)\r\n # images = np.concatenate((images,images,images,images), axis=3)\r\n output = path + name[:-7] + 'data_f32b0.pkl'\r\n mask = images.sum(-1) > 0\r\n for k in range(1):\r\n\r\n x = images[..., k] #\r\n y = x[mask]\r\n\r\n # 0.8885\r\n x[mask] -= y.mean()\r\n x[mask] /= y.std()\r\n\r\n images[..., k] = x\r\n\r\n with open(output, 'wb') as f:\r\n print(output)\r\n\r\n if has_label:\r\n pickle.dump((images, label), f)\r\n else:\r\n pickle.dump(images, f)\r\n\r\n if not has_label:\r\n return\r\n\r\n\r\ndef doit(dset):\r\n root, has_label = dset['root'], dset['has_label']\r\n file_list = os.path.join(root, dset['flist'])\r\n subjects = open(file_list).read().splitlines()\r\n names = [sub.split('/')[-1] for sub in subjects]\r\n paths = [os.path.join(root, name) for sub, name in zip(subjects, names)]\r\n\r\n for name in names:\r\n\r\n process_f32b0(root, name, has_label)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n doit(train_set)\r\n doit(valid_set)\r\n doit(test_set)\r\n\r\n"
] |
[
[
"numpy.expand_dims"
]
] |
tyjyang/analysis-essentials
|
[
"dad8cff5957562b8ab1386d2e67ef37819a39751"
] |
[
"git/files/make_plot.py"
] |
[
"#!/usr/bin/env python\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\ndata = np.random.normal(4, 2, 1000)\nbins = np.linspace(0, 10, 50)\n\nplt.hist(data, bins=bins, histtype='stepfilled')\nplt.savefig('plots/first_plot.pdf')\n"
] |
[
[
"numpy.random.normal",
"numpy.linspace",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.hist"
]
] |
jvarela-zenika/sketch-code-fork
|
[
"a185364325d67818452e5c9ca8e85ef4d6324295"
] |
[
"src/classes/dataset/Dataset.py"
] |
[
"from __future__ import absolute_import\n\nimport os\nimport shutil\nimport pdb\nimport hashlib\nimport numpy as np\n\nfrom keras.preprocessing.text import Tokenizer, one_hot\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\n\nfrom .ImagePreprocessor import *\n\nVOCAB_FILE = '../vocabulary.vocab'\nTRAINING_SET_NAME = \"training_set\"\nVALIDATION_SET_NAME = \"validation_set\"\nBATCH_SIZE = 64\n\nclass Dataset:\n\n def __init__(self, data_input_folder, test_set_folder=None):\n self.data_input_folder = data_input_folder\n self.test_set_folder = test_set_folder\n\n def split_datasets(self, validation_split):\n sample_ids = self.populate_sample_ids()\n print(\"Total number of samples: \", len(sample_ids))\n\n if len(sample_ids) > 2000:\n np.random.shuffle(sample_ids)\n sample_ids = sample_ids[0:2000]\n\n print(\"Sample limitation reached, pick 2000 random sample for this training\")\n\n train_set_ids, val_set_ids, shuffled_sampled_ids = self.get_all_id_sets(validation_split, sample_ids)\n training_path, validation_path = self.split_samples(train_set_ids, val_set_ids)\n\n return training_path, validation_path\n\n def split_samples(self, train_set_ids, val_set_ids):\n training_path, validation_path = self.create_data_folders()\n self.copy_files_to_folders(train_set_ids, training_path)\n self.copy_files_to_folders(val_set_ids, validation_path)\n return training_path, validation_path\n\n def preprocess_data(self, training_path, validation_path, augment_training_data):\n train_img_preprocessor = ImagePreprocessor()\n train_img_preprocessor.build_image_dataset(training_path, augment_data=augment_training_data)\n val_img_preprocessor = ImagePreprocessor()\n val_img_preprocessor.build_image_dataset(validation_path, augment_data=0)\n\n\n\n\n ##########################################\n ####### PRIVATE METHODS ##################\n ##########################################\n\n @classmethod\n def load_vocab(cls):\n file = open(VOCAB_FILE, 'r')\n text = file.read().splitlines()[0]\n file.close()\n tokenizer = Tokenizer(filters='', split=\" \", lower=False)\n tokenizer.fit_on_texts([text])\n vocab_size = len(tokenizer.word_index) + 1\n return tokenizer, vocab_size\n\n @classmethod\n def create_generator(cls, data_input_path, max_sequences):\n img_features, text_features = Dataset.load_data(data_input_path)\n total_sequences = 0\n for text_set in text_features: total_sequences += len(text_set.split())\n steps_per_epoch = total_sequences // BATCH_SIZE\n tokenizer, vocab_size = Dataset.load_vocab()\n data_gen = Dataset.data_generator(text_features, img_features, max_sequences, tokenizer, vocab_size)\n return data_gen, steps_per_epoch\n\n @classmethod\n def data_generator(cls, text_features, img_features, max_sequences, tokenizer, vocab_size):\n while 1:\n for i in range(0, len(text_features), 1):\n Ximages, XSeq, y = list(), list(),list()\n for j in range(i, min(len(text_features), i+1)):\n image = img_features[j]\n desc = text_features[j]\n in_img, in_seq, out_word = Dataset.process_data_for_generator([desc], [image], max_sequences, tokenizer, vocab_size)\n for k in range(len(in_img)):\n Ximages.append(in_img[k])\n XSeq.append(in_seq[k])\n y.append(out_word[k])\n yield [[np.array(Ximages), np.array(XSeq)], np.array(y)]\n\n @classmethod\n def process_data_for_generator(cls, texts, features, max_sequences, tokenizer, vocab_size):\n X, y, image_data = list(), list(), list()\n sequences = tokenizer.texts_to_sequences(texts)\n for img_no, seq in enumerate(sequences):\n for i in range(1, len(seq)):\n in_seq, out_seq = seq[:i], seq[i]\n in_seq = pad_sequences([in_seq], maxlen=max_sequences)[0]\n out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]\n image_data.append(features[img_no])\n X.append(in_seq[-48:])\n y.append(out_seq)\n return np.array(image_data), np.array(X), np.array(y)\n\n @classmethod\n def load_data(cls, data_input_path):\n text = []\n images = []\n all_filenames = os.listdir(data_input_path)\n all_filenames.sort()\n for filename in all_filenames:\n if filename[-3:] == \"npz\":\n image = np.load(data_input_path+'/'+filename)\n images.append(image['features'])\n elif filename[-3:] == 'gui':\n file = open(data_input_path+'/'+filename, 'r')\n texts = file.read()\n file.close()\n syntax = '<START> ' + texts + ' <END>'\n syntax = ' '.join(syntax.split())\n syntax = syntax.replace(',', ' ,')\n text.append(syntax)\n images = np.array(images, dtype=float)\n return images, text\n\n def create_data_folders(self):\n training_path = \"{}/{}\".format(os.path.dirname(self.data_input_folder), TRAINING_SET_NAME)\n validation_path = \"{}/{}\".format(os.path.dirname(self.data_input_folder), VALIDATION_SET_NAME)\n\n self.delete_existing_folders(training_path)\n self.delete_existing_folders(validation_path)\n\n if not os.path.exists(training_path): os.makedirs(training_path)\n if not os.path.exists(validation_path): os.makedirs(validation_path)\n return training_path, validation_path\n\n def copy_files_to_folders(self, sample_ids, output_folder):\n copied_count = 0\n for sample_id in sample_ids:\n sample_id_png_path = \"{}/{}.png\".format(self.data_input_folder, sample_id)\n sample_id_gui_path = \"{}/{}.gui\".format(self.data_input_folder, sample_id)\n if os.path.exists(sample_id_png_path) and os.path.exists(sample_id_gui_path):\n output_png_path = \"{}/{}.png\".format(output_folder, sample_id)\n output_gui_path = \"{}/{}.gui\".format(output_folder, sample_id)\n shutil.copyfile(sample_id_png_path, output_png_path)\n shutil.copyfile(sample_id_gui_path, output_gui_path)\n copied_count += 1\n print(\"Moved {} files from {} to {}\".format(copied_count, self.data_input_folder, output_folder))\n\n def delete_existing_folders(self, folder_to_delete):\n if os.path.exists(folder_to_delete):\n shutil.rmtree(folder_to_delete)\n print(\"Deleted existing folder: {}\".format(folder_to_delete))\n\n def populate_sample_ids(self):\n all_sample_ids = []\n full_path = os.path.realpath(self.data_input_folder)\n for f in os.listdir(full_path):\n if f.find(\".gui\") != -1:\n file_name = f[:f.find(\".gui\")]\n if os.path.isfile(\"{}/{}.png\".format(self.data_input_folder, file_name)):\n all_sample_ids.append(file_name)\n return all_sample_ids\n\n def get_all_id_sets(self, validation_split, sample_ids):\n np.random.shuffle(sample_ids)\n val_count = int(validation_split * len(sample_ids))\n train_count = len(sample_ids) - val_count\n print(\"Splitting datasets, training samples: {}, validation samples: {}\".format(train_count, val_count))\n train_set, val_set = self.split_paths(sample_ids, train_count, val_count)\n\n return train_set, val_set, sample_ids\n\n def split_paths(self, sample_ids, train_count, val_count):\n train_set = []\n val_set = []\n hashes = []\n for sample_id in sample_ids:\n f = open(\"{}/{}.gui\".format(self.data_input_folder, sample_id), 'r', encoding='utf-8')\n\n with f:\n chars = \"\"\n for line in f:\n chars += line\n content_hash = chars.replace(\" \", \"\").replace(\"\\n\", \"\")\n content_hash = hashlib.sha256(content_hash.encode('utf-8')).hexdigest()\n\n if len(val_set) == val_count:\n train_set.append(sample_id)\n else:\n is_unique = True\n for h in hashes:\n if h is content_hash:\n is_unique = False\n break\n\n if is_unique:\n val_set.append(sample_id)\n else:\n train_set.append(sample_id)\n\n hashes.append(content_hash)\n\n assert len(val_set) == val_count\n\n return train_set, val_set\n"
] |
[
[
"numpy.array",
"numpy.load",
"numpy.random.shuffle"
]
] |
Sibimobon/Connect4
|
[
"5694c23a7dc27251f3b659ce3fd6c67e1cde8652"
] |
[
"tests/test_common.py"
] |
[
"import numpy as np\nimport pytest\n\nfrom agents.common import BoardPiece, NO_PLAYER, PLAYER1, PLAYER2, pretty_print_board, initialize_game_state, \\\n string_to_board, apply_player_action, connected_four, check_connect_topleft_bottomright\n\n\ndef test_initialize_game_state():\n\n ret = initialize_game_state()\n\n assert isinstance(ret, np.ndarray)\n assert ret.dtype == BoardPiece\n assert ret.shape == (6, 7)\n assert np.all(ret == NO_PLAYER)\n\ndef test_output_pretty_print_board():\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n ret = pretty_print_board(initialBoard)\n assert ret != ''\n\ndef test_empty_pretty_print_board():\n\n initialBoard = np.ndarray(shape=(7, 6), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n ret = pretty_print_board(initialBoard)\n assert ret == '\\n|==============|\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '|==============|\\n' \\\n '|0 1 2 3 4 5 6 |'\n\ndef test_player1_pretty_print_board():\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(PLAYER1)\n\n ret = pretty_print_board(initialBoard)\n assert ret == '\\n|==============|\\n' \\\n '|X X X X X X X |\\n' \\\n '|X X X X X X X |\\n' \\\n '|X X X X X X X |\\n' \\\n '|X X X X X X X |\\n' \\\n '|X X X X X X X |\\n' \\\n '|X X X X X X X |\\n' \\\n '|==============|\\n' \\\n '|0 1 2 3 4 5 6 |'\n\ndef test_player2_pretty_print_board():\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(PLAYER2)\n\n ret = pretty_print_board(initialBoard)\n assert ret == '\\n|==============|\\n' \\\n '|O O O O O O O |\\n' \\\n '|O O O O O O O |\\n' \\\n '|O O O O O O O |\\n' \\\n '|O O O O O O O |\\n' \\\n '|O O O O O O O |\\n' \\\n '|O O O O O O O |\\n' \\\n '|==============|\\n' \\\n '|0 1 2 3 4 5 6 |'\n\ndef test_precision_pretty_print_board():\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n initialBoard[0,0] = PLAYER1\n\n ret = pretty_print_board(initialBoard)\n assert ret == '\\n|==============|\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '|X |\\n' \\\n '|==============|\\n' \\\n '|0 1 2 3 4 5 6 |'\n\n\ndef test_dimensions_pretty_print_board():\n\n initialBoard = np.ndarray(shape=(7, 6), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n with pytest.raises(ValueError):\n ret = pretty_print_board(initialBoard)\n\ndef test_invalid_piece_pretty_print_board():\n initialBoard = np.ndarray(shape=(7, 6), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n initialBoard[0, 0] = 60\n\n with pytest.raises(ValueError):\n ret = pretty_print_board(initialBoard)\n\n\ndef test_string_to_board():\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n print = '\\n|==============|\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '| |\\n' \\\n '|==============|\\n' \\\n '|0 1 2 3 4 5 6 |'\n\n ret = string_to_board(print)\n assert ret.all() == initialBoard.all()\n\n\ndef test_drop_piece():\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n ret = apply_player_action(initialBoard, 0, PLAYER1)\n drop_board = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n drop_board.fill(NO_PLAYER)\n drop_board[0,5] = 1\n\n print(ret)\n assert ret.all() == drop_board.all()\n\ndef test_connected_four_false():\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n assert connected_four(initialBoard, PLAYER1, 5) == False\n\ndef test_connected_four_true():\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(PLAYER1)\n\n assert connected_four(initialBoard, PLAYER1, 5) == True\n\ndef test_connected_four_row_true():\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 0] = 1\n initialBoard[5, 1] = 1\n initialBoard[5, 2] = 1\n initialBoard[5, 3] = 1\n\n print(initialBoard)\n assert connected_four(initialBoard, PLAYER1, 0) == True\n\ndef test_connected_four_row_false():\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 0] = 1\n initialBoard[5, 1] = 1\n\n initialBoard[5, 3] = 1\n\n print(initialBoard)\n\n with pytest.raises(AssertionError):\n assert connected_four(initialBoard, PLAYER1, 0) == True\n\n\ndef test_connected_four_BL_TR_true():\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 0] = 1\n initialBoard[4, 1] = 1\n initialBoard[3, 2] = 1\n initialBoard[2, 3] = 1\n\n print(initialBoard)\n assert connected_four(initialBoard, PLAYER1, 0) == True\n\ndef test_connected_four_BL_TR_false():\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 0] = 1\n initialBoard[4, 1] = 1\n initialBoard[3, 2] = 1\n\n\n print(initialBoard)\n\n with pytest.raises(AssertionError):\n assert connected_four(initialBoard, PLAYER1, 0) == True\n\n\n\ndef test_connected_four_BR_TL_true():\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 5] = 1\n initialBoard[4, 4] = 1\n initialBoard[3, 3] = 1\n initialBoard[2, 2] = 1\n\n print(initialBoard)\n assert connected_four(initialBoard, PLAYER1, 5) == True\n\ndef test_connected_four_BR_TL_false():\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 5] = 1\n initialBoard[4, 4] = 1\n initialBoard[2, 2] = 1\n\n\n assert connected_four(initialBoard, PLAYER1, 5) == False\n\n\ndef test_diagonal_check_BLTR_true():\n from agents.common import diagonal_check\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 0] = 1\n initialBoard[4, 1] = 1\n initialBoard[3, 2] = 1\n initialBoard[2, 3] = 1\n\n print(initialBoard)\n\n assert diagonal_check(initialBoard, PLAYER1, 0, 5, 1, -1) == True\n\ndef test_diagonal_check_TLBR_YX_true():\n from agents.common import diagonal_check\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 4] = 1\n initialBoard[4, 3] = 1\n initialBoard[3, 2] = 1\n initialBoard[2, 1] = 1\n\n print(initialBoard)\n\n assert diagonal_check(initialBoard, PLAYER1, 4, 5, -1, -1) == True\n\ndef test_TLBR_YX_true():\n from agents.common import check_connect_topleft_bottomright\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 4] = 1\n initialBoard[4, 3] = 1\n initialBoard[3, 2] = 1\n initialBoard[2, 1] = 1\n\n print(initialBoard)\n\n assert check_connect_topleft_bottomright(initialBoard, PLAYER1, 4, 0) == True\n\ndef test_diagonal_check_TLBR_XY_true():\n from agents.common import diagonal_check\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 6] = 1\n initialBoard[4, 5] = 1\n initialBoard[3, 4] = 1\n initialBoard[2, 3] = 1\n\n print(initialBoard)\n\n assert diagonal_check(initialBoard, PLAYER1, 6, 5, -1, -1) == True\n\ndef test_TLBR_XY_true():\n from agents.common import check_connect_topleft_bottomright\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 6] = 1\n initialBoard[4, 5] = 1\n initialBoard[3, 4] = 1\n initialBoard[2, 3] = 1\n\n print(initialBoard)\n\n assert check_connect_topleft_bottomright(initialBoard, PLAYER1, 6, 0)\n\ndef test_BL_TR_true():\n from agents.common import check_connect_topright_bottomleft\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 0] = 1\n initialBoard[4, 1] = 1\n initialBoard[3, 2] = 1\n initialBoard[2, 3] = 1\n\n print(initialBoard)\n\n assert check_connect_topright_bottomleft(initialBoard, PLAYER1, 0, 0) == True\n\ndef test_BL_TR_false():\n from agents.common import check_connect_topright_bottomleft\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 0] = 1\n initialBoard[4, 1] = 1\n initialBoard[3, 2] = 1\n\n print(initialBoard)\n\n\n assert check_connect_topright_bottomleft(initialBoard, PLAYER1, 0, 0) == False\n\n\ndef test_end_state_win():\n from agents.common import check_end_state, GameState\n\n from agents.common import check_connect_topright_bottomleft\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n initialBoard[5, 0] = 1\n initialBoard[4, 1] = 1\n initialBoard[3, 2] = 1\n initialBoard[2, 3] = 1\n\n assert check_end_state(initialBoard, PLAYER1, 0) == GameState.IS_WIN\n\ndef test_end_state_still_playing():\n from agents.common import check_end_state, GameState\n\n initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)\n initialBoard.fill(NO_PLAYER)\n\n assert check_end_state(initialBoard, PLAYER1, 0) == GameState.STILL_PLAYING\n\n\ndef test_end_state_draw():\n from agents.common import check_end_state, GameState\n\n x = np.zeros((6, 7), dtype=int)\n x.fill(2)\n x[1::2, ::2] = 1\n x[::2, 1::2] = 1\n print(x)\n\n assert check_end_state(x, PLAYER1, 1) == GameState.IS_DRAW\n\n\ndef test_diagonal_neg():\n #str = \"|==============|\\n|O |\\n|X O |\\n|O X O |\\n|X X O O X |\\n|O X O X X |\\n|X O X X O |\\n|==============|\\n|0 1 2 3 4 5 6 |\"\n\n #board = string_to_board(str)\n\n board = np.zeros((6, 7), dtype=int)\n board[0, 0] = PLAYER2\n board[1, 1] = PLAYER2\n board[2, 2] = PLAYER2\n board[3, 3] = PLAYER2\n\n assert check_connect_topleft_bottomright(board, PLAYER2, 2, 3) == True\n\n\n\n\n\n\n\n"
] |
[
[
"numpy.all",
"numpy.ndarray",
"numpy.zeros"
]
] |
sibeiyang/sgmn
|
[
"f09b94707bf8094d6d63353b9e5ca0ee83423ba5"
] |
[
"tools/train_dga.py"
] |
[
"import os.path as osp\nimport sys\nimport numpy as np\nimport random\nimport torch\nimport time\n\nimport _init_paths\nfrom opt import parse_opt\nfrom datasets.factory import get_db\nfrom utils.logging import Logger\nfrom utils.meter import AverageMeter\nfrom utils.osutils import mkdir_if_missing, save_checkpoint, load_checkpoint\nfrom utils import to_numpy\nimport torch.backends.cudnn as cudnn\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n# dga model import\nfrom datasets.refdataset import RefDataset as RefDataset\nfrom dga_models.chain_reason import CR\nfrom models.model_utils import clip_gradient\nfrom crits.criterion import TripletLoss\n\nbest_prec = 0\nargs = parse_opt()\nopt = vars(args)\n\n\ndef main():\n global best_prec\n global opt\n\n if opt['id'] != '':\n model_id = opt['id']\n else:\n model_id = time.strftime(\"%m_%d_%H-%M-%S\")\n\n sys.stdout = Logger(osp.join(opt['log_dir'], 'log.' + model_id + '.txt'))\n\n # initialize\n checkpoint_dir = osp.join(opt['checkpoint_dir'], model_id)\n mkdir_if_missing(checkpoint_dir)\n\n # check gpu\n assert opt['gpus'] is not None\n\n # set random seed\n cudnn.benchmark = False\n cudnn.deterministic = True\n random.seed(opt['seed'])\n np.random.seed(opt['seed'])\n torch.manual_seed(opt['seed'])\n torch.cuda.manual_seed_all(opt['seed'])\n\n\n # load imdb\n train_refdb = get_db('refvg_train_' + opt['model_method'])\n vocab = train_refdb.load_dictionary()\n opt['vocab_size'] = len(vocab)\n val_refdb = get_db('refvg_val_'+opt['model_method'])\n\n # model, criterion, optimizer\n model = CR(opt)\n model = torch.nn.DataParallel(model).cuda()\n criterion = TripletLoss(opt['margin']).cuda()\n\n optimizer = torch.optim.Adam(list(model.parameters()) + list(criterion.parameters()),\n lr=opt['learning_rate'],\n betas=(opt['optim_alpha'], opt['optim_beta']),\n eps=opt['optim_epsilon'])\n\n scheduler = ReduceLROnPlateau(optimizer, factor=0.1,\n patience=3, mode='max')\n\n if opt['evaluate']:\n if osp.isfile(opt['model']):\n model, criterion = load_checkpoint(model, criterion, opt['model'])\n test_refdb = get_db('refvg_test_' + opt['model_method'])\n test_dataset = RefDataset(test_refdb, vocab, opt)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=opt['batch_size'], shuffle=False,\n num_workers=opt['workers'], pin_memory=True)\n test_loss, test_prec = validate(test_loader, model, criterion)\n print(test_prec)\n else:\n print(\"=> no checkpoint found at '{}'\".format(opt['model']))\n return\n\n # start training\n epoch_cur = 0\n train_dataset = RefDataset(train_refdb, vocab, opt)\n val_dataset = RefDataset(val_refdb, vocab, opt)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=opt['batch_size'], shuffle=True,\n num_workers=opt['workers'], pin_memory=True)\n val_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=opt['batch_size'], shuffle=False,\n num_workers=opt['workers'], pin_memory=True)\n\n for epoch in range(epoch_cur, opt['max_epochs']):\n train(train_loader, model, criterion, optimizer, epoch)\n val_loss, prec = validate(val_loader, model, criterion, epoch)\n scheduler.step(prec)\n for i, param_group in enumerate(optimizer.param_groups):\n print(float(param_group['lr']))\n\n is_best = prec >= best_prec\n best_prec = max(best_prec, prec)\n save_checkpoint({\n 'model_state_dict': model.state_dict(),\n 'crit_state_dict': criterion.state_dict(),\n 'optimizer': optimizer.state_dict()}, is_best, checkpoint_dir, str(epoch))\n\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n global opt\n losses = AverageMeter()\n\n # switch to train mode\n model.train()\n criterion.train()\n\n step = epoch * len(train_loader)\n pred_gt_same = []\n for i, (box, cls, feature, lfeat, lrel,\n sents, sents_gt, gt_boxes, img_ids, sent_ids) in enumerate(train_loader):\n\n step += 1\n if opt['gpus'] is not None:\n box = box.cuda()\n cls = cls.cuda()\n feature = feature.cuda()\n lfeat = lfeat.cuda()\n lrel = lrel.cuda()\n sents = sents.cuda()\n sents_gt = sents_gt.cuda()\n\n # compute output\n score = model(feature, cls, lfeat, lrel, sents)\n loss, score = criterion(score, box, cls, sents_gt)\n\n losses.update(loss.item())\n\n cls = to_numpy(cls)\n final_score = to_numpy(score.detach())\n final_score[cls == -1] = -999\n pred_ind = np.argmax(final_score, 1)\n sents_gt = to_numpy(sents_gt)\n for j in range(pred_ind.size):\n if sents_gt[j] == pred_ind[j]:\n pred_gt_same.append(1)\n else:\n pred_gt_same.append(0)\n\n # compute gradient and do Adam step\n optimizer.zero_grad()\n loss.backward()\n clip_gradient(optimizer, opt['grad_clip'])\n optimizer.step()\n\n if i % args.print_freq == 0:\n if i != 0:\n same = np.sum(pred_gt_same[-args.print_freq*opt['batch_size']:]) / float(args.print_freq*opt['batch_size'])\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec {same:.4f}'.format(\n epoch, i, len(train_loader), loss=losses, same=same))\n\n\ndef validate(val_loader, model, criterion, epoch=-1):\n global opt\n losses = AverageMeter()\n\n # switch to eval mode\n model.eval()\n criterion.eval()\n\n pred_gt_same = []\n with torch.no_grad():\n for i, (box, cls, feature, lfeat, lrel,\n sents, sents_gt, gt_boxes, img_ids, sent_ids) in enumerate(val_loader):\n\n if opt['gpus'] is not None:\n box = box.cuda()\n cls = cls.cuda()\n feature = feature.cuda()\n lfeat = lfeat.cuda()\n lrel = lrel.cuda()\n sents = sents.cuda()\n sents_gt = sents_gt.cuda()\n\n # compute output\n score = model(feature, cls, lfeat, lrel, sents)\n loss, score = criterion(score, box, cls, sents_gt)\n losses.update(loss.item())\n\n cls = to_numpy(cls)\n final_score = to_numpy(score.detach())\n final_score[cls == -1] = -999\n pred_ind = np.argmax(final_score, 1)\n sents_gt = to_numpy(sents_gt)\n for j in range(pred_ind.size):\n if sents_gt[j] == pred_ind[j]:\n pred_gt_same.append(1)\n else:\n pred_gt_same.append(0)\n\n if i % args.print_freq == 0:\n if i != 0:\n same = np.sum(pred_gt_same[-args.print_freq * opt['batch_size']:]) / float(\n args.print_freq * opt['batch_size'])\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec {same:.4f}'.format(\n epoch, i, len(val_loader), loss=losses, same=same))\n\n same = np.sum(pred_gt_same) / float(len(pred_gt_same))\n print('Epoch: [{0}]\\t'\n 'Loss {1:.4f}\\t'\n 'Prec {2:.4f}'.format(epoch, losses.avg, same))\n\n return losses.avg, same\n\n\nif __name__ == '__main__':\n main()"
] |
[
[
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"numpy.sum",
"torch.no_grad",
"torch.manual_seed",
"numpy.argmax",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.utils.data.DataLoader",
"torch.nn.DataParallel"
]
] |
vanshverma01/Geometric-Brownian-Motion
|
[
"6723987dccb9b3bda149f72551c8169a12eff6f1"
] |
[
"main.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nmu = 0.1\nn = 1000\nt = 1\nm = 100\ns0 = 100\nsigma = 0.3\ndt = t/n\nSt = np.exp(\n (mu - sigma ** 2 / 2) * dt\n + sigma * np.random.normal(0, np.sqrt(dt), size=(m, n)).T\n)\nSt = np.vstack([np.ones(m), St])\nSt = s0 * St.cumprod(axis=0)\ntime = np.linspace(0, t, n+1)\ntt = np.full(shape=(m, n+1), fill_value=time).T\nplt.plot(tt, St)\nplt.xlabel(\"Years $(t)$\")\nplt.ylabel(\"Stock Price $(S_t)$\")\nplt.show()\n"
] |
[
[
"numpy.full",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"numpy.sqrt",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
NJUDCA/NER
|
[
"bbefca537e2eae17cc0117a069c4fe4f39db0afb"
] |
[
"processor/data.py"
] |
[
"\"\"\"Created by PeterLee, on Dec. 17.\"\"\"\nimport pickle, random\nimport numpy as np\nimport logging\n\n\ndef build_embedding_source(source_path, vocab_path, embedding_path):\n n_char, n_dim = 0, 0\n char2id = {}\n with open(source_path, encoding='utf-8') as fr:\n first_line = fr.readline()\n n_char = int(first_line.strip().split()[0])\n n_dim = int(first_line.strip().split()[1])\n logging.info('n_char: {}, n_dim: {}'.format(n_char, n_dim))\n char2id['<UNK>'] = 0\n embeddings = np.float32(np.random.uniform(-0.25, 0.25, (1, n_dim)))\n new_line = fr.readline()\n while(new_line):\n elements = new_line.strip().split()\n char = elements[0]\n embedding = np.array(\n [float(x) for x in elements[1:]]\n ).astype(np.float32)\n char2id[char] = len(char2id) + 1\n embeddings = np.concatenate((embeddings, np.reshape(embedding, (1, n_dim))))\n new_line = fr.readline()\n logging.info('shape of embeddings: {}'.format(embeddings.shape))\n logging.info('size of vocabulary: {}'.format(len(char2id)))\n with open(embedding_path, 'w+') as fw:\n np.savetxt(fw, embeddings, delimiter=' ', newline='\\n')\n with open(vocab_path, 'wb+') as fw:\n pickle.dump(char2id, fw)\n\n\ndef read_corpus(corpus_path):\n data = []\n with open(corpus_path, encoding='utf-8') as fr:\n lines = fr.readlines()\n sent_, tag_ = [], []\n for line in lines:\n if line != '\\n':\n [char, label] = line.strip().split()\n sent_.append(char)\n tag_.append(label)\n else:\n data.append((sent_, tag_))\n sent_, tag_ = [], []\n return data\n\n\ndef vocab_build(vocab_path, corpus_path, min_count):\n data = read_corpus(corpus_path)\n word2id = {}\n for sent_, tag in data:\n for word in sent_:\n if word.isdigit():\n word = '<NUM>'\n elif ('\\u0041' <= word <='\\u005a') or ('\\u0061' <= word <='\\u007a'):\n word = '<ENG>'\n if word not in word2id:\n word2id[word] = [len(word2id)+1, 1]\n else:\n word2id[word][1] += 1\n low_freq_words = []\n for word, [word_id, word_freq] in word2id.items():\n if word_freq < min_count and word != '<NUM>' and word != '<ENG>':\n low_freq_words.append(word)\n for word in low_freq_words:\n del word2id[word]\n new_id = 1\n for word in word2id.keys():\n word2id[word] = new_id\n new_id += 1\n word2id['<UNK>'] = new_id\n word2id['<PAD>'] = 0\n print(len(word2id))\n with open(vocab_path, 'wb') as fw:\n pickle.dump(word2id, fw)\n\n\ndef sentence2id(sent, word2id):\n sentence_id = []\n for word in sent:\n # # Inspecting the str whether combine by number\n # if word.isdigit():\n # word = '<NUM>'\n # # Judging the english\n # elif ('\\u0041' <= word <= '\\u005a') or ('\\u0061' <= word <= '\\u007a'): \n # word = '<ENG>'\n if word not in word2id:\n # Chinese\n word = '<UNK>'\n sentence_id.append(word2id[word])\n return sentence_id\n\n\ndef read_dictionary(vocab_path):\n with open(vocab_path, 'rb') as fr:\n word2id = pickle.load(fr)\n return word2id\n\n\ndef random_embedding(vocab, embedding_dim):\n embedding_mat = np.random.uniform(-0.25, 0.25, (len(vocab), embedding_dim))\n embedding_mat = np.float32(embedding_mat)\n return embedding_mat\n\n\ndef pad_sequences(sequences, pad_mark=0):\n # padding\n max_len = max(map(lambda x: len(x), sequences))\n seq_list, seq_len_list = [], []\n for seq in sequences:\n seq = list(seq)\n seq_ = seq[:max_len] + [pad_mark] * max(max_len - len(seq), 0)\n seq_list.append(seq_)\n # The length of seq\n seq_len_list.append(min(len(seq), max_len))\n return seq_list, seq_len_list\n\n\ndef batch_yield(data, batch_size, vocab, tag2label, shuffle=False):\n if shuffle:\n # Random data\n random.shuffle(data)\n seqs, labels = [], []\n\n for (sent_, tag_) in data:\n sent_ = sentence2id(sent_, vocab)\n label_ = [tag2label[tag] for tag in tag_]\n\n if len(seqs) == batch_size:\n yield seqs, labels\n seqs, labels = [], []\n seqs.append(sent_)\n labels.append(label_)\n\n if len(seqs) != 0:\n # Return an iteration\n yield seqs, labels\n"
] |
[
[
"numpy.float32",
"numpy.random.uniform",
"numpy.savetxt",
"numpy.reshape"
]
] |
kracon7/lcp-physics
|
[
"463d9602350b854464a027b2c57faae412fa2691",
"463d9602350b854464a027b2c57faae412fa2691"
] |
[
"lcp_physics/physics/constraints.py",
"tests/test_action_constraint.py"
] |
[
"import pygame\n\nimport torch\n\nfrom .utils import Indices, Defaults, cart_to_polar, polar_to_cart\n\n\nX = Indices.X\nY = Indices.Y\nDIM = Defaults.DIM\n\n\nclass Joint:\n \"\"\"Revolute joint.\n \"\"\"\n def __init__(self, body1, body2, pos):\n self.static = False\n self.num_constraints = 2\n self.body1 = body1\n self.body2 = body2\n self.pos = body1.p.new_tensor(pos)\n self.pos1 = self.pos - self.body1.pos\n self.r1, self.rot1 = cart_to_polar(self.pos1)\n self.rot2 = None\n if body2 is not None:\n self.pos2 = self.pos - self.body2.pos\n self.r2, self.rot2 = cart_to_polar(self.pos2)\n\n def J(self):\n J1 = torch.cat([torch.cat([-self.pos1[Y:Y+1], self.pos1[X:X+1]]).unsqueeze(1),\n torch.eye(DIM).type_as(self.pos)], dim=1)\n J2 = None\n if self.body2 is not None:\n J2 = torch.cat([torch.cat([self.pos2[Y:Y+1], -self.pos2[X:X+1]]).unsqueeze(1),\n -torch.eye(DIM).type_as(self.pos)], dim=1)\n return J1, J2\n\n def move(self, dt):\n self.rot1 = self.rot1 + self.body1.v[0] * dt\n if self.body2 is not None:\n self.rot2 = self.rot2 + self.body2.v[0] * dt\n self.update_pos()\n\n def update_pos(self):\n self.pos1 = polar_to_cart(self.r1, self.rot1)\n self.pos = self.body1.pos + self.pos1\n if self.body2 is not None:\n # keep position on body1 as reference\n self.pos2 = self.pos - self.body2.pos\n\n def draw(self, screen, pixels_per_meter=1):\n pos = (self.pos.detach().cpu().numpy() * pixels_per_meter).astype(int)\n return [pygame.draw.circle(screen, (0, 255, 0), pos, 2)]\n\n\nclass FixedJoint:\n \"\"\"Fixed joint, fixes two bodies together.\"\"\"\n def __init__(self, body1, body2):\n self.static = False\n self.num_constraints = 3\n self.body1 = body1\n self.body2 = body2\n self.pos = body1.pos\n self.pos1 = self.pos - self.body1.pos\n self.rot1 = self.pos.new_tensor(0)\n self.rot2 = None\n self.pos2 = self.pos - self.body2.pos\n self.rot2 = self.body2.p[0] - self.body1.p[0] # inverted sign?\n\n def J(self):\n J1 = torch.cat([torch.cat([-self.pos1[Y:Y+1], self.pos1[X:X+1]]).unsqueeze(1),\n torch.eye(DIM).type_as(self.pos)], dim=1)\n J1 = torch.cat([J1, J1.new_tensor([1, 0, 0]).unsqueeze(0)], dim=0)\n J2 = torch.cat([torch.cat([self.pos2[Y:Y+1], -self.pos2[X:X+1]]).unsqueeze(1),\n -torch.eye(DIM).type_as(self.pos)], dim=1)\n J2 = torch.cat([J2, J2.new_tensor([-1, 0, 0]).unsqueeze(0)], dim=0)\n return J1, J2\n\n def move(self, dt):\n self.update_pos()\n\n def update_pos(self):\n self.pos = self.body1.pos\n self.pos1 = self.pos - self.body1.pos\n if self.body2 is not None:\n # keep position on body1 as reference\n self.pos2 = self.pos - self.body2.pos\n\n def draw(self, screen, pixels_per_meter=1):\n return []\n # start = (self.body1.pos.detach().cpu().numpy() * pixels_per_meter).astype(int)\n # end = (self.body2.pos.detach().cpu().numpy() * pixels_per_meter).astype(int)\n # return [pygame.draw.line(screen, (0, 255, 0), start, end, 2)]\n\n\nclass YConstraint:\n \"\"\"Prevents motion in the Y axis.\n \"\"\"\n def __init__(self, body1):\n self.static = True\n self.num_constraints = 1\n self.body1 = body1\n self.pos = body1.pos\n self.rot1 = self.body1.p[0]\n self.body2 = self.rot2 = None\n\n def J(self):\n J = self.pos.new_tensor([0, 0, 1]).unsqueeze(0)\n return J, None\n\n def move(self, dt):\n self.update_pos()\n\n def update_pos(self):\n self.pos = self.body1.pos\n self.rot1 = self.body1.p[0]\n\n def draw(self, screen, pixels_per_meter=1):\n pos = (self.pos.detach().cpu().numpy() * pixels_per_meter).astype(int)\n return [pygame.draw.line(screen, (0, 255, 0), pos - [5, 0], pos + [5, 0], 2)]\n\n\nclass XConstraint:\n \"\"\"Prevents motion in the X axis.\n \"\"\"\n def __init__(self, body1):\n self.static = True\n self.num_constraints = 1\n self.body1 = body1\n self.pos = body1.pos\n self.rot1 = self.body1.p[0]\n self.body2 = self.rot2 = None\n\n def J(self):\n J = self.pos.new_tensor([0, 1, 0]).unsqueeze(0)\n return J, None\n\n def move(self, dt):\n self.update_pos()\n\n def update_pos(self):\n self.pos = self.body1.pos\n self.rot1 = self.body1.p[0]\n\n def draw(self, screen, pixels_per_meter=1):\n pos = (self.pos.detach().cpu().numpy() * pixels_per_meter).astype(int)\n return [pygame.draw.line(screen, (0, 255, 0), pos - [0, 5], pos + [0, 5], 2)]\n\n\nclass RotConstraint:\n \"\"\"Prevents rotational motion.\n \"\"\"\n def __init__(self, body1):\n self.static = True\n self.num_constraints = 1\n self.body1 = body1\n self.pos = body1.pos\n self.rot1 = self.body1.p[0]\n self.body2 = self.rot2 = None\n\n def J(self):\n J = self.pos.new_tensor([1, 0, 0]).unsqueeze(0)\n return J, None\n\n def move(self, dt):\n self.update_pos()\n\n def update_pos(self):\n self.pos = self.body1.pos\n self.rot1 = self.body1.p[0]\n\n def draw(self, screen, pixels_per_meter=1):\n pos = (self.pos.detach().cpu().numpy() * pixels_per_meter).astype(int)\n return [pygame.draw.circle(screen, (0, 255, 0), pos, 5, 1)]\n\n\nclass TotalConstraint:\n \"\"\"Prevents all motion.\n \"\"\"\n def __init__(self, body1):\n self.static = True\n self.num_constraints = 3\n self.body1 = body1\n self.pos = body1.pos\n self.pos1 = self.pos - self.body1.pos\n self.r1, self.rot1 = cart_to_polar(self.pos1)\n\n self.body2 = self.rot2 = None\n self.eye = torch.eye(self.num_constraints).type_as(self.pos)\n\n def J(self):\n J = self.eye\n return J, None\n\n def move(self, dt):\n self.rot1 = self.rot1 + self.body1.v[0] * dt\n self.update_pos()\n\n def update_pos(self):\n self.pos1 = polar_to_cart(self.r1, self.rot1)\n self.pos = self.body1.pos + self.pos1\n\n def draw(self, screen, pixels_per_meter=1):\n pos = (self.pos.detach().cpu().numpy() * pixels_per_meter).astype(int)\n return [pygame.draw.circle(screen, (0, 255, 0), pos + 1, 5, 1),\n pygame.draw.line(screen, (0, 255, 0), pos - [5, 0], pos + [5, 0], 2),\n pygame.draw.line(screen, (0, 255, 0), pos - [0, 5], pos + [0, 5], 2)]\n",
"'''\nPushing single circle with uncertain mass\nInitialize k hypothesis and maintain k sequence of estimations \nKeep track of their mean and variance\n'''\n\nimport os\nimport sys\n\nimport time\nimport math\nfrom math import sin, cos\nimport cv2\nimport pygame\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib import path\nimport torch\nfrom torch.autograd import Variable\n\nfrom lcp_physics.physics.constraints import TotalConstraint, FixedJoint\nfrom lcp_physics.physics.forces import ExternalForce, Gravity, vert_impulse, hor_impulse\nfrom lcp_physics.physics.utils import Defaults, plot, reset_screen, Recorder\nfrom lcp_physics.physics.world import World, run_world\nfrom lcp_physics.physics.action import build_mesh, random_action\nfrom lcp_physics.physics.sim import SimSingle\n\n\nTIME = 2\nDT = Defaults.DT\nDEVICE = Defaults.DEVICE\nROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nnp.random.seed(1)\ntorch.random.manual_seed(0)\n\n\ndef plot_mass_stat(mask, gt, mean, var, save_path=None):\n I0, I1, I2 = np.zeros_like(mask).astype('float'), \\\n np.zeros_like(mask).astype('float'), \\\n np.zeros_like(mask).astype('float')\n I0[mask], I1[mask], I2[mask] = gt.numpy(), \\\n mean.detach().cpu().numpy(), \\\n var.detach().cpu().numpy()\n\n fig, ax = plt.subplots(1,3)\n im0 = ax[0].imshow(I0, vmin=0, vmax=0.3, cmap='plasma')\n im1 = ax[1].imshow(I1, vmin=0, vmax=0.3, cmap='plasma')\n im2 = ax[2].imshow(I2, vmin=0, vmax=0.1, cmap='plasma')\n\n divider = make_axes_locatable(ax[2])\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n\n plt.colorbar(im2, cax=cax)\n if save_path:\n plt.savefig(save_path)\n\n plt.close()\n\n\ndef get_stat(data):\n '''\n compute the mean and variace of different sequences of estimations\n Input:\n data -- list of estimated mass\n Output:\n mean -- torch tensor\n var -- torch tensor\n '''\n data = torch.stack(data)\n var, mean = torch.var_mean(data, 0)\n return torch.sqrt(var), mean\n\n\ndef main():\n\n obj_name = 'hammer'\n mass_img_path = os.path.join(ROOT, 'fig/%s_mass.png'%obj_name)\n bottom_fric_img_path = os.path.join(ROOT, 'fig/%s_fric.png'%obj_name)\n default_actions = {'rod1': {'action_mag': 15, 'force_time': 0.2},\n 'drill': {'action_mag': 20, 'force_time': 0.3},\n 'hammer': {'action_mag': 20, 'force_time': 0.2}}\n\n sim = SimSingle.from_img(mass_img_path, bottom_fric_img_path, particle_radius=10, \n hand_radius=5)\n sim.action_mag = default_actions[obj_name]['action_mag']\n sim.force_time = default_actions[obj_name]['force_time']\n gt_mean = sim.mass_gt.mean()\n sim.mass_est = 0.06 * torch.rand(sim.N) - 0.03 + gt_mean\n sim.mass_est.requires_grad = True\n sim.bottom_fric_est = sim.bottom_fric_gt\n\n # compute the action start point coordinates\n s = 20 # action map size\n mask = sim.mask.numpy()\n x_cord, y_cord = np.where(mask)\n mask_origin = np.stack([x_cord[0], y_cord[0]])\n xx, yy = np.arange(s) - mask_origin[0], np.arange(s) - mask_origin[1]\n xx = np.interp(np.linspace(xx[0], xx[-1], 10*s), xx, xx)\n yy = np.interp(np.linspace(yy[0], yy[-1], 10*s), yy, yy)\n\n xx, yy = np.tile(xx, (xx.shape[0], 1)).T, np.tile(yy, (yy.shape[0], 1))\n\n action_start_pts = 2 * sim.particle_radius * np.stack([xx, yy], axis=-1)\n\n # check whether start points are inside the polygon\n polygon = path.Path(sim.polygon_coord[sim.polygon[:,0]])\n inside = polygon.contains_points(action_start_pts.reshape(-1,2)).reshape(xx.shape[0], yy.shape[0])\n\n boundary = np.zeros_like(inside)\n for i in range(1, inside.shape[0]-1):\n for j in range(1, inside.shape[1]-1):\n if not inside[i,j]:\n if inside[i+1,j] or inside[i-1,j] or inside[i,j+1] or inside[i,j-1] or \\\n inside[i+1,j+1] or inside[i-1,j+1] or inside[i+1,j-1] or inside[i-1,j-1]:\n boundary[i, j] = 1\n\n fig, ax = plt.subplots(1,4)\n ax[0].imshow(sim.mask)\n ax[1].imshow(inside)\n ax[2].imshow(boundary)\n\n # ax[3].axis('square')\n polygon, polygon_coord, normals = sim.polygon, sim.polygon_coord, sim.normals\n for i in range(polygon.shape[0]):\n pt_coord = polygon_coord[polygon[i, 0]]\n ax[3].plot([pt_coord[0], pt_coord[0] + 3*normals[i,0]], \n [pt_coord[1], pt_coord[1] + 3*normals[i,1]], color='r')\n ax[3].plot([polygon_coord[0,0], polygon_coord[-1,0]], \n [polygon_coord[0,1], polygon_coord[-1,1]], color='deepskyblue')\n ax[3].plot(polygon_coord[:, 0], polygon_coord[:,1], color='deepskyblue')\n\n valid_start_pts = action_start_pts[boundary>0]\n for pt in valid_start_pts:\n ax[3].plot(pt[0], pt[1], 'yx')\n\n ax[3].invert_yaxis()\n ax[3].set_xlabel('x (same in pygame)')\n ax[3].set_ylabel('y (same in pygame)')\n\n \n\n plt.show()\n\nif __name__ == '__main__':\n main()"
] |
[
[
"torch.cat",
"torch.eye"
],
[
"matplotlib.path.Path",
"matplotlib.pyplot.colorbar",
"numpy.zeros_like",
"torch.stack",
"torch.sqrt",
"torch.rand",
"numpy.random.seed",
"matplotlib.pyplot.savefig",
"torch.var_mean",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"numpy.tile",
"torch.random.manual_seed",
"numpy.where",
"numpy.stack",
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
dsctt/habitat-lab
|
[
"acecaf9b709e08f3d303f624bae43305d66185cd"
] |
[
"habitat/tasks/rearrange/rearrange_sim.py"
] |
[
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections import defaultdict\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport magnum as mn\nimport numpy as np\n\nimport habitat_sim\nfrom habitat.config.default import Config\nfrom habitat.core.registry import registry\nfrom habitat.core.simulator import Observations\nfrom habitat.sims.habitat_simulator.habitat_simulator import HabitatSim\nfrom habitat.tasks.rearrange.marker_info import MarkerInfo\nfrom habitat.tasks.rearrange.utils import (\n IkHelper,\n get_nav_mesh_settings,\n is_pb_installed,\n make_render_only,\n)\nfrom habitat_sim.physics import MotionType\n\n# flake8: noqa\nfrom habitat_sim.robots import FetchRobot, FetchRobotNoWheels\n\n\n@registry.register_simulator(name=\"RearrangeSim-v0\")\nclass RearrangeSim(HabitatSim):\n def __init__(self, config: Config):\n super().__init__(config)\n\n agent_config = self.habitat_config\n self.navmesh_settings = get_nav_mesh_settings(self._get_agent_config())\n self.first_setup = True\n self._should_render_debug = False\n self.ep_info: Optional[Config] = None\n self.prev_loaded_navmesh = None\n self.prev_scene_id = None\n self._is_pb_installed = is_pb_installed()\n\n # Number of physics updates per action\n self.ac_freq_ratio = agent_config.AC_FREQ_RATIO\n # The physics update time step.\n self.ctrl_freq = agent_config.CTRL_FREQ\n # Effective control speed is (ctrl_freq/ac_freq_ratio)\n self._concur_render = self.habitat_config.get(\"CONCUR_RENDER\", False)\n self._auto_sleep = self.habitat_config.get(\"AUTO_SLEEP\", False)\n self._debug_render = self.habitat_config.get(\"DEBUG_RENDER\", False)\n\n self.art_objs: List[habitat_sim.physics.ManagedArticulatedObject] = []\n self._start_art_states: Dict[\n habitat_sim.physics.ManagedArticulatedObject, List[float]\n ] = {}\n self._prev_obj_names: Optional[List[str]] = None\n self.scene_obj_ids: List[int] = []\n # Used to get data from the RL environment class to sensors.\n self._goal_pos = None\n self.viz_ids: Dict[Any, Any] = defaultdict(lambda: None)\n self.ref_handle_to_rigid_obj_id: Optional[Dict[str, int]] = None\n robot_cls = eval(self.habitat_config.ROBOT_TYPE)\n self.robot = robot_cls(self.habitat_config.ROBOT_URDF, self)\n self._orig_robot_js_start = np.array(self.robot.params.arm_init_params)\n self._markers: Dict[str, MarkerInfo] = {}\n\n self._ik_helper: Optional[IkHelper] = None\n\n # Disables arm control. Useful if you are hiding the arm to perform\n # some scene sensing.\n self.ctrl_arm = True\n\n from habitat.tasks.rearrange.rearrange_grasp_manager import (\n RearrangeGraspManager,\n )\n\n self.grasp_mgr: RearrangeGraspManager = RearrangeGraspManager(\n self, self.habitat_config\n )\n\n def _get_target_trans(self):\n \"\"\"\n This is how the target transforms should be accessed since\n multiprocessing does not allow pickling.\n \"\"\"\n # Preprocess the ep_info making necessary datatype conversions.\n target_trans = []\n rom = self.get_rigid_object_manager()\n for target_handle, trans in self.ep_info[\"targets\"].items():\n targ_idx = self.scene_obj_ids.index(\n rom.get_object_by_handle(target_handle).object_id\n )\n target_trans.append((targ_idx, mn.Matrix4(trans)))\n return target_trans\n\n def _try_acquire_context(self):\n if self._concur_render:\n self.renderer.acquire_gl_context()\n\n def sleep_all_objects(self):\n \"\"\"\n De-activate (sleep) all rigid objects in the scene, assuming they are already in a dynamically stable state.\n \"\"\"\n rom = self.get_rigid_object_manager()\n for _, ro in rom.get_objects_by_handle_substring().items():\n ro.awake = False\n aom = self.get_articulated_object_manager()\n for _, ao in aom.get_objects_by_handle_substring().items():\n ao.awake = False\n\n def add_markers(self, ep_info: Config):\n self._markers = {}\n aom = self.get_articulated_object_manager()\n for marker in ep_info[\"markers\"]:\n p = marker[\"params\"]\n ao = aom.get_object_by_handle(p[\"object\"])\n name_to_link = {}\n name_to_link_id = {}\n for i in range(ao.num_links):\n name = ao.get_link_name(i)\n link = ao.get_link_scene_node(i)\n name_to_link[name] = link\n name_to_link_id[name] = i\n\n self._markers[marker[\"name\"]] = MarkerInfo(\n p[\"offset\"],\n name_to_link[p[\"link\"]],\n ao,\n name_to_link_id[p[\"link\"]],\n )\n\n def get_marker(self, name: str) -> MarkerInfo:\n return self._markers[name]\n\n def get_all_markers(self):\n return self._markers\n\n def _update_markers(self) -> None:\n for m in self._markers.values():\n m.update()\n\n @property\n def ik_helper(self):\n if not self._is_pb_installed:\n raise ImportError(\n \"Need to install PyBullet to use IK (`pip install pybullet==3.0.4`)\"\n )\n return self._ik_helper\n\n def reconfigure(self, config: Config):\n ep_info = config[\"ep_info\"][0]\n self.instance_handle_to_ref_handle = ep_info[\"info\"][\"object_labels\"]\n\n config[\"SCENE\"] = ep_info[\"scene_id\"]\n\n super().reconfigure(config)\n self.ref_handle_to_rigid_obj_id = {}\n\n self.ep_info = ep_info\n self._try_acquire_context()\n\n if self.prev_scene_id != ep_info[\"scene_id\"]:\n self.grasp_mgr.reconfigure()\n # add and initialize the robot\n ao_mgr = self.get_articulated_object_manager()\n if self.robot.sim_obj is not None and self.robot.sim_obj.is_alive:\n ao_mgr.remove_object_by_id(self.robot.sim_obj.object_id)\n\n self.robot.reconfigure()\n self._prev_obj_names = None\n\n self.grasp_mgr.reset()\n\n # Only remove and re-add objects if we have a new set of objects.\n obj_names = [x[0] for x in ep_info[\"rigid_objs\"]]\n should_add_objects = self._prev_obj_names != obj_names\n self._prev_obj_names = obj_names\n\n self._clear_objects(should_add_objects)\n\n self.prev_scene_id = ep_info[\"scene_id\"]\n self._viz_templates: Dict[float, int] = {}\n\n # Set the default articulated object joint state.\n for ao, set_joint_state in self._start_art_states.items():\n ao.clear_joint_states()\n ao.joint_positions = set_joint_state\n\n # Load specified articulated object states from episode config\n self._set_ao_states_from_ep(ep_info)\n\n use_arm_start = self._orig_robot_js_start + (\n self.habitat_config.get(\"ROBOT_JOINT_START_NOISE\", 0.0)\n * np.random.randn(self._orig_robot_js_start.shape[0])\n )\n self.robot.params.arm_init_params = use_arm_start\n self.robot.reset()\n\n # consume a fixed position from SIMUALTOR.AGENT_0 if configured\n if self.habitat_config.AGENT_0.IS_SET_START_STATE:\n self.robot.base_pos = mn.Vector3(\n self.habitat_config.AGENT_0.START_POSITION\n )\n agent_rot = self.habitat_config.AGENT_0.START_ROTATION\n self.robot.sim_obj.rotation = mn.Quaternion(\n mn.Vector3(agent_rot[:3]), agent_rot[3]\n )\n\n if \"RENDER_CAMERA_OFFSET\" in self.habitat_config:\n self.robot.params.cameras[\n \"robot_third\"\n ].cam_offset_pos = mn.Vector3(\n self.habitat_config.RENDER_CAMERA_OFFSET\n )\n if \"RENDER_CAMERA_LOOKAT\" in self.habitat_config:\n self.robot.params.cameras[\n \"robot_third\"\n ].cam_look_at_pos = mn.Vector3(\n self.habitat_config.RENDER_CAMERA_LOOKAT\n )\n\n # add episode clutter objects additional to base scene objects\n self._add_objs(ep_info, should_add_objects)\n\n self.add_markers(ep_info)\n\n # auto-sleep rigid objects as optimization\n if self._auto_sleep:\n self.sleep_all_objects()\n\n # recompute the NavMesh once the scene is loaded\n # NOTE: because ReplicaCADv3_sc scenes, for example, have STATIC objects with no accompanying NavMesh files\n self._recompute_navmesh()\n\n # Get the starting positions of the target objects.\n rom = self.get_rigid_object_manager()\n scene_pos = self.get_scene_pos()\n self.target_start_pos = np.array(\n [\n scene_pos[\n self.scene_obj_ids.index(\n rom.get_object_by_handle(t_handle).object_id\n )\n ]\n for t_handle, _ in self.ep_info[\"targets\"].items()\n ]\n )\n\n if self.first_setup:\n self.first_setup = False\n ik_arm_urdf = self.habitat_config.get(\"IK_ARM_URDF\", None)\n if ik_arm_urdf is not None and self._is_pb_installed:\n self._ik_helper = IkHelper(\n self.habitat_config.IK_ARM_URDF,\n np.array(self.robot.params.arm_init_params),\n )\n # Capture the starting art states\n self._start_art_states = {\n ao: ao.joint_positions for ao in self.art_objs\n }\n\n def _recompute_navmesh(self):\n \"\"\"Generates the navmesh on the fly. This must be called\n AFTER adding articulated objects to the scene.\n \"\"\"\n\n # cache current motiontype and set to STATIC for inclusion in the NavMesh computation\n motion_types = []\n for art_obj in self.art_objs:\n motion_types.append(art_obj.motion_type)\n art_obj.motion_type = MotionType.STATIC\n # compute new NavMesh\n self.recompute_navmesh(\n self.pathfinder,\n self.navmesh_settings,\n include_static_objects=True,\n )\n # optionally save the new NavMesh\n if self.habitat_config.get(\"SAVE_NAVMESH\", False):\n scene_name = self.ep_info[\"scene_id\"]\n inferred_path = scene_name.split(\".glb\")[0] + \".navmesh\"\n self.pathfinder.save_nav_mesh(inferred_path)\n # reset cached MotionTypes\n for art_obj, motion_type in zip(self.art_objs, motion_types):\n art_obj.motion_type = motion_type\n\n def _clear_objects(self, should_add_objects: bool) -> None:\n if should_add_objects:\n rom = self.get_rigid_object_manager()\n for scene_obj_id in self.scene_obj_ids:\n if rom.get_library_has_id(scene_obj_id):\n rom.remove_object_by_id(scene_obj_id)\n self.scene_obj_ids = []\n\n # Do not remove the articulated objects from the scene, these are\n # managed by the underlying sim.\n self.art_objs = []\n\n def _set_ao_states_from_ep(self, ep_info: Config) -> None:\n \"\"\"\n Sets the ArticulatedObject states for the episode which are differ from base scene state.\n \"\"\"\n aom = self.get_articulated_object_manager()\n # NOTE: ep_info[\"ao_states\"]: Dict[str, Dict[int, float]] : {instance_handle -> {link_ix, state}}\n for aoi_handle, joint_states in ep_info[\"ao_states\"].items():\n ao = aom.get_object_by_handle(aoi_handle)\n ao_pose = ao.joint_positions\n for link_ix, joint_state in joint_states.items():\n joint_position_index = ao.get_link_joint_pos_offset(\n int(link_ix)\n )\n ao_pose[joint_position_index] = joint_state\n ao.joint_positions = ao_pose\n\n def _add_objs(self, ep_info: Config, should_add_objects: bool) -> None:\n # Load clutter objects:\n # NOTE: ep_info[\"rigid_objs\"]: List[Tuple[str, np.array]] # list of objects, each with (handle, transform)\n rom = self.get_rigid_object_manager()\n obj_counts: Dict[str, int] = defaultdict(int)\n\n for i, (obj_handle, transform) in enumerate(ep_info[\"rigid_objs\"]):\n if should_add_objects:\n obj_attr_mgr = self.get_object_template_manager()\n matching_templates = (\n obj_attr_mgr.get_templates_by_handle_substring(obj_handle)\n )\n assert (\n len(matching_templates.values()) == 1\n ), \"Duplicate object attributes matched to shortened handle. TODO: relative paths as handles should fix this. For now, try renaming objects to avoid collision.\"\n ro = rom.add_object_by_template_handle(\n list(matching_templates.keys())[0]\n )\n else:\n ro = rom.get_object_by_id(self.scene_obj_ids[i])\n\n # The saved matrices need to be flipped when reloading.\n ro.transformation = mn.Matrix4(\n [[transform[j][i] for j in range(4)] for i in range(4)]\n )\n\n other_obj_handle = (\n obj_handle.split(\".\")[0] + f\"_:{obj_counts[obj_handle]:04d}\"\n )\n\n if other_obj_handle in self.instance_handle_to_ref_handle:\n ref_handle = self.instance_handle_to_ref_handle[\n other_obj_handle\n ]\n # self.ref_handle_to_rigid_obj_id[ref_handle] = ro.object_id\n rel_idx = len(self.scene_obj_ids)\n self.ref_handle_to_rigid_obj_id[ref_handle] = rel_idx\n obj_counts[obj_handle] += 1\n\n if should_add_objects:\n self.scene_obj_ids.append(ro.object_id)\n\n ao_mgr = self.get_articulated_object_manager()\n for aoi_handle in ao_mgr.get_object_handles():\n self.art_objs.append(ao_mgr.get_object_by_handle(aoi_handle))\n\n def _create_obj_viz(self, ep_info: Config) -> None:\n if self._debug_render:\n for marker_name, m in self._markers.items():\n m_T = m.get_current_transform()\n self.viz_ids[marker_name] = self.visualize_position(\n m_T.translation, self.viz_ids[marker_name]\n )\n\n # TODO: refactor this\n # target_name_pos = [\n # (ep_info[\"static_objs\"][idx][0], self.scene_objs[idx], pos)\n # for idx, pos in self._get_target_trans()\n # ]\n # self.viz_obj_ids = place_viz_objs(\n # target_name_pos, self, self.viz_obj_ids\n # )\n\n def capture_state(self, with_robot_js=False) -> Dict[str, Any]:\n \"\"\"\n Record and return a dict of state info.\n\n :param with_robot_js: If true, state dict includes robot joint positions in addition.\n\n State info dict includes:\n - Robot transform\n - a list of ArticulatedObject transforms\n - a list of RigidObject transforms\n - a list of ArticulatedObject joint states\n - the object id of currently grasped object (or None)\n - (optionally) the robot's joint positions\n \"\"\"\n # Don't need to capture any velocity information because this will\n # automatically be set to 0 in `set_state`.\n robot_T = self.robot.sim_obj.transformation\n art_T = [ao.transformation for ao in self.art_objs]\n rom = self.get_rigid_object_manager()\n static_T = [\n rom.get_object_by_id(i).transformation for i in self.scene_obj_ids\n ]\n art_pos = [ao.joint_positions for ao in self.art_objs]\n robot_js = self.robot.sim_obj.joint_positions\n\n ret = {\n \"robot_T\": robot_T,\n \"art_T\": art_T,\n \"static_T\": static_T,\n \"art_pos\": art_pos,\n \"obj_hold\": self.grasp_mgr.snap_idx,\n }\n if with_robot_js:\n ret[\"robot_js\"] = robot_js\n return ret\n\n def set_state(self, state: Dict[str, Any], set_hold=False) -> None:\n \"\"\"\n Sets the simulation state from a cached state info dict. See capture_state().\n\n :param set_hold: If true this will set the snapped object from the `state`.\n TODO: This should probably be True by default, but I am not sure the effect\n it will have.\n \"\"\"\n rom = self.get_rigid_object_manager()\n if state[\"robot_T\"] is not None:\n self.robot.sim_obj.transformation = state[\"robot_T\"]\n n_dof = len(self.robot.sim_obj.joint_forces)\n self.robot.sim_obj.joint_forces = np.zeros(n_dof)\n self.robot.sim_obj.joint_velocities = np.zeros(n_dof)\n\n if \"robot_js\" in state:\n self.robot.sim_obj.joint_positions = state[\"robot_js\"]\n\n for T, ao in zip(state[\"art_T\"], self.art_objs):\n ao.transformation = T\n\n for T, i in zip(state[\"static_T\"], self.scene_obj_ids):\n # reset object transform\n obj = rom.get_object_by_id(i)\n obj.transformation = T\n obj.linear_velocity = mn.Vector3()\n obj.angular_velocity = mn.Vector3()\n\n for p, ao in zip(state[\"art_pos\"], self.art_objs):\n ao.joint_positions = p\n\n if set_hold:\n if state[\"obj_hold\"] is not None:\n self.internal_step(-1)\n self.grasp_mgr.snap_to_obj(state[\"obj_hold\"])\n else:\n self.grasp_mgr.desnap(True)\n\n def step(self, action: Union[str, int]) -> Observations:\n rom = self.get_rigid_object_manager()\n\n self._update_markers()\n\n if self._should_render_debug:\n self._try_acquire_context()\n for obj_handle, _ in self.ep_info[\"targets\"].items():\n self.set_object_bb_draw(\n False, rom.get_object_by_handle(obj_handle).object_id\n )\n\n add_back_viz_objs = {}\n for name, viz_id in self.viz_ids.items():\n if viz_id is None:\n continue\n rom = self.get_rigid_object_manager()\n viz_obj = rom.get_object_by_id(viz_id)\n before_pos = viz_obj.translation\n rom.remove_object_by_id(viz_id)\n add_back_viz_objs[name] = before_pos\n self.viz_ids = defaultdict(lambda: None)\n self.grasp_mgr.update()\n\n if self._concur_render:\n self._prev_sim_obs = self.start_async_render()\n\n for _ in range(self.ac_freq_ratio):\n self.internal_step(-1)\n # self.internal_step(0.008 * self.ac_freq_ratio)\n\n self._prev_sim_obs = self.get_sensor_observations_async_finish()\n obs = self._sensor_suite.get_observations(self._prev_sim_obs)\n else:\n for _ in range(self.ac_freq_ratio):\n self.internal_step(-1)\n # self.internal_step(0.008 * self.ac_freq_ratio)\n self._prev_sim_obs = self.get_sensor_observations()\n obs = self._sensor_suite.get_observations(self._prev_sim_obs)\n\n # TODO: Make debug cameras more flexible\n if \"robot_third_rgb\" in obs:\n self._should_render_debug = True\n self._try_acquire_context()\n for k, pos in add_back_viz_objs.items():\n self.viz_ids[k] = self.visualize_position(pos, self.viz_ids[k])\n\n # Also render debug information\n self._create_obj_viz(self.ep_info)\n\n # Always draw the target\n for obj_handle, _ in self.ep_info[\"targets\"].items():\n self.set_object_bb_draw(\n True, rom.get_object_by_handle(obj_handle).object_id\n )\n\n debug_obs = self.get_sensor_observations()\n obs[\"robot_third_rgb\"] = debug_obs[\"robot_third_rgb\"][:, :, :3]\n\n if self.habitat_config.HABITAT_SIM_V0.get(\n \"ENABLE_GFX_REPLAY_SAVE\", False\n ):\n self.gfx_replay_manager.save_keyframe()\n\n return obs\n\n def visualize_position(\n self,\n position: np.ndarray,\n viz_id: Optional[int] = None,\n r: float = 0.05,\n ) -> int:\n \"\"\"Adds the sphere object to the specified position for visualization purpose.\n\n :param position: global position of the visual sphere\n :param viz_id: provided if moving an existing visual sphere instead of creating a new one\n :param r: radius of the visual sphere\n\n :return: Object id of the newly added sphere. -1 if failed.\n \"\"\"\n\n rom = self.get_object_template_manager()\n viz_obj = None\n if viz_id is None:\n if r not in self._viz_templates:\n # create and register a new template for this novel sphere scaling\n template = rom.get_template_by_handle(\n rom.get_template_handles(\"sphere\")[0]\n )\n template.scale = mn.Vector3(r, r, r)\n self._viz_templates[r] = rom.register_template(\n template, \"ball_new_viz\" + str(r)\n )\n viz_obj = rom.add_object_by_template_id(self._viz_templates[r])\n make_render_only(viz_obj, self)\n else:\n viz_obj = rom.get_object_by_id(viz_id)\n\n viz_obj.translation = mn.Vector3(*position)\n return viz_obj.object_id\n\n def internal_step(self, dt: Union[int, float]) -> None:\n \"\"\"Step the world and update the robot.\n\n :param dt: Timestep by which to advance the world. Multiple physics substeps can be excecuted within a single timestep. -1 indicates a single physics substep.\n\n Never call sim.step_world directly or miss updating the robot.\n \"\"\"\n\n # optionally step physics and update the robot for benchmarking purposes\n if self.habitat_config.get(\"STEP_PHYSICS\", True):\n self.step_world(dt)\n if self.robot is not None and self.habitat_config.get(\n \"UPDATE_ROBOT\", True\n ):\n self.robot.update()\n\n def get_targets(self) -> Tuple[List[int], np.ndarray]:\n \"\"\"Get a mapping of object ids to goal positions for rearrange targets.\n\n :return: ([idx: int], [goal_pos: list]) The index of the target object\n in self.scene_obj_ids and the 3D goal POSITION, rotation is IGNORED.\n Note that goal_pos is the desired position of the object, not the\n starting position.\n \"\"\"\n targ_idx, targ_trans = list(zip(*self._get_target_trans()))\n\n a, b = np.array(targ_idx), [\n np.array(x.translation) for x in targ_trans\n ]\n return a, np.array(b)\n\n def get_n_targets(self) -> int:\n \"\"\"Get the number of rearrange targets.\"\"\"\n return len(self.ep_info[\"targets\"])\n\n def get_target_objs_start(self) -> np.ndarray:\n \"\"\"Get the initial positions of all objects targeted for rearrangement as a numpy array.\"\"\"\n return np.array(self.target_start_pos)\n\n def get_scene_pos(self) -> np.ndarray:\n \"\"\"Get the positions of all clutter RigidObjects in the scene as a numpy array.\"\"\"\n rom = self.get_rigid_object_manager()\n return np.array(\n [\n rom.get_object_by_id(idx).translation\n for idx in self.scene_obj_ids\n ]\n )\n"
] |
[
[
"numpy.array",
"numpy.random.randn",
"numpy.zeros"
]
] |
charlesjhill/lightning-flash
|
[
"2b19acbb5d627c609f2f7e13b48006e157781718",
"2b19acbb5d627c609f2f7e13b48006e157781718"
] |
[
"tests/data/test_data_pipeline.py",
"tests/data/test_process.py"
] |
[
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nimport torch\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.trainer.states import RunningStage\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch import Tensor, tensor\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data._utils.collate import default_collate\n\nfrom flash.core.data.auto_dataset import AutoDataset, IterableAutoDataset\nfrom flash.core.data.batch import _PostProcessor, _PreProcessor\nfrom flash.core.data.data_module import DataModule\nfrom flash.core.data.data_pipeline import _StageOrchestrator, DataPipeline\nfrom flash.core.data.data_source import DataSource\nfrom flash.core.data.process import DefaultPreprocess, Postprocess, Preprocess\nfrom flash.core.model import Task\nfrom flash.core.utilities.imports import _IMAGE_AVAILABLE\n\nif _IMAGE_AVAILABLE:\n import torchvision.transforms as T\n from PIL import Image\n\n\nclass DummyDataset(torch.utils.data.Dataset):\n\n def __getitem__(self, index: int) -> Tuple[Tensor, Tensor]:\n return torch.rand(1), torch.rand(1)\n\n def __len__(self) -> int:\n return 5\n\n\n@pytest.mark.parametrize(\"use_preprocess\", [False, True])\n@pytest.mark.parametrize(\"use_postprocess\", [False, True])\ndef test_data_pipeline_init_and_assignement(use_preprocess, use_postprocess, tmpdir):\n\n class CustomModel(Task):\n\n def __init__(self, postprocess: Optional[Postprocess] = None):\n super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())\n self._postprocess = postprocess\n\n def train_dataloader(self) -> Any:\n return DataLoader(DummyDataset())\n\n class SubPreprocess(DefaultPreprocess):\n pass\n\n class SubPostprocess(Postprocess):\n pass\n\n data_pipeline = DataPipeline(\n preprocess=SubPreprocess() if use_preprocess else None,\n postprocess=SubPostprocess() if use_postprocess else None,\n )\n assert isinstance(data_pipeline._preprocess_pipeline, SubPreprocess if use_preprocess else DefaultPreprocess)\n assert isinstance(data_pipeline._postprocess_pipeline, SubPostprocess if use_postprocess else Postprocess)\n\n model = CustomModel(postprocess=Postprocess())\n model.data_pipeline = data_pipeline\n # TODO: the line below should make the same effect but it's not\n # data_pipeline._attach_to_model(model)\n\n if use_preprocess:\n assert isinstance(model._preprocess, SubPreprocess)\n else:\n assert model._preprocess is None or isinstance(model._preprocess, Preprocess)\n\n if use_postprocess:\n assert isinstance(model._postprocess, SubPostprocess)\n else:\n assert model._postprocess is None or isinstance(model._postprocess, Postprocess)\n\n\ndef test_data_pipeline_is_overriden_and_resolve_function_hierarchy(tmpdir):\n\n class CustomPreprocess(DefaultPreprocess):\n\n def val_pre_tensor_transform(self, *_, **__):\n pass\n\n def predict_to_tensor_transform(self, *_, **__):\n pass\n\n def train_post_tensor_transform(self, *_, **__):\n pass\n\n def test_collate(self, *_, **__):\n pass\n\n def val_per_sample_transform_on_device(self, *_, **__):\n pass\n\n def train_per_batch_transform_on_device(self, *_, **__):\n pass\n\n def test_per_batch_transform_on_device(self, *_, **__):\n pass\n\n preprocess = CustomPreprocess()\n data_pipeline = DataPipeline(preprocess=preprocess)\n\n train_func_names: Dict[str, str] = {\n k: data_pipeline._resolve_function_hierarchy(\n k, data_pipeline._preprocess_pipeline, RunningStage.TRAINING, Preprocess\n )\n for k in data_pipeline.PREPROCESS_FUNCS\n }\n val_func_names: Dict[str, str] = {\n k: data_pipeline._resolve_function_hierarchy(\n k, data_pipeline._preprocess_pipeline, RunningStage.VALIDATING, Preprocess\n )\n for k in data_pipeline.PREPROCESS_FUNCS\n }\n test_func_names: Dict[str, str] = {\n k: data_pipeline._resolve_function_hierarchy(\n k, data_pipeline._preprocess_pipeline, RunningStage.TESTING, Preprocess\n )\n for k in data_pipeline.PREPROCESS_FUNCS\n }\n predict_func_names: Dict[str, str] = {\n k: data_pipeline._resolve_function_hierarchy(\n k, data_pipeline._preprocess_pipeline, RunningStage.PREDICTING, Preprocess\n )\n for k in data_pipeline.PREPROCESS_FUNCS\n }\n\n # pre_tensor_transform\n assert train_func_names[\"pre_tensor_transform\"] == \"pre_tensor_transform\"\n assert val_func_names[\"pre_tensor_transform\"] == \"val_pre_tensor_transform\"\n assert test_func_names[\"pre_tensor_transform\"] == \"pre_tensor_transform\"\n assert predict_func_names[\"pre_tensor_transform\"] == \"pre_tensor_transform\"\n\n # to_tensor_transform\n assert train_func_names[\"to_tensor_transform\"] == \"to_tensor_transform\"\n assert val_func_names[\"to_tensor_transform\"] == \"to_tensor_transform\"\n assert test_func_names[\"to_tensor_transform\"] == \"to_tensor_transform\"\n assert predict_func_names[\"to_tensor_transform\"] == \"predict_to_tensor_transform\"\n\n # post_tensor_transform\n assert train_func_names[\"post_tensor_transform\"] == \"train_post_tensor_transform\"\n assert val_func_names[\"post_tensor_transform\"] == \"post_tensor_transform\"\n assert test_func_names[\"post_tensor_transform\"] == \"post_tensor_transform\"\n assert predict_func_names[\"post_tensor_transform\"] == \"post_tensor_transform\"\n\n # collate\n assert train_func_names[\"collate\"] == \"collate\"\n assert val_func_names[\"collate\"] == \"collate\"\n assert test_func_names[\"collate\"] == \"test_collate\"\n assert predict_func_names[\"collate\"] == \"collate\"\n\n # per_sample_transform_on_device\n assert train_func_names[\"per_sample_transform_on_device\"] == \"per_sample_transform_on_device\"\n assert val_func_names[\"per_sample_transform_on_device\"] == \"val_per_sample_transform_on_device\"\n assert test_func_names[\"per_sample_transform_on_device\"] == \"per_sample_transform_on_device\"\n assert predict_func_names[\"per_sample_transform_on_device\"] == \"per_sample_transform_on_device\"\n\n # per_batch_transform_on_device\n assert train_func_names[\"per_batch_transform_on_device\"] == \"train_per_batch_transform_on_device\"\n assert val_func_names[\"per_batch_transform_on_device\"] == \"per_batch_transform_on_device\"\n assert test_func_names[\"per_batch_transform_on_device\"] == \"test_per_batch_transform_on_device\"\n assert predict_func_names[\"per_batch_transform_on_device\"] == \"per_batch_transform_on_device\"\n\n train_worker_preprocessor = data_pipeline.worker_preprocessor(RunningStage.TRAINING)\n val_worker_preprocessor = data_pipeline.worker_preprocessor(RunningStage.VALIDATING)\n test_worker_preprocessor = data_pipeline.worker_preprocessor(RunningStage.TESTING)\n predict_worker_preprocessor = data_pipeline.worker_preprocessor(RunningStage.PREDICTING)\n\n _seq = train_worker_preprocessor.per_sample_transform\n assert _seq.pre_tensor_transform.func == preprocess.pre_tensor_transform\n assert _seq.to_tensor_transform.func == preprocess.to_tensor_transform\n assert _seq.post_tensor_transform.func == preprocess.train_post_tensor_transform\n assert train_worker_preprocessor.collate_fn.func == preprocess.collate\n assert train_worker_preprocessor.per_batch_transform.func == preprocess.per_batch_transform\n\n _seq = val_worker_preprocessor.per_sample_transform\n assert _seq.pre_tensor_transform.func == preprocess.val_pre_tensor_transform\n assert _seq.to_tensor_transform.func == preprocess.to_tensor_transform\n assert _seq.post_tensor_transform.func == preprocess.post_tensor_transform\n assert val_worker_preprocessor.collate_fn.func == DataPipeline._identity\n assert val_worker_preprocessor.per_batch_transform.func == preprocess.per_batch_transform\n\n _seq = test_worker_preprocessor.per_sample_transform\n assert _seq.pre_tensor_transform.func == preprocess.pre_tensor_transform\n assert _seq.to_tensor_transform.func == preprocess.to_tensor_transform\n assert _seq.post_tensor_transform.func == preprocess.post_tensor_transform\n assert test_worker_preprocessor.collate_fn.func == preprocess.test_collate\n assert test_worker_preprocessor.per_batch_transform.func == preprocess.per_batch_transform\n\n _seq = predict_worker_preprocessor.per_sample_transform\n assert _seq.pre_tensor_transform.func == preprocess.pre_tensor_transform\n assert _seq.to_tensor_transform.func == preprocess.predict_to_tensor_transform\n assert _seq.post_tensor_transform.func == preprocess.post_tensor_transform\n assert predict_worker_preprocessor.collate_fn.func == preprocess.collate\n assert predict_worker_preprocessor.per_batch_transform.func == preprocess.per_batch_transform\n\n\nclass CustomPreprocess(DefaultPreprocess):\n\n def train_per_sample_transform(self, *_, **__):\n pass\n\n def train_per_batch_transform_on_device(self, *_, **__):\n pass\n\n def test_per_sample_transform(self, *_, **__):\n pass\n\n def test_per_batch_transform(self, *_, **__):\n pass\n\n def test_per_sample_transform_on_device(self, *_, **__):\n pass\n\n def test_per_batch_transform_on_device(self, *_, **__):\n pass\n\n def val_per_batch_transform(self, *_, **__):\n pass\n\n def val_per_sample_transform_on_device(self, *_, **__):\n pass\n\n def predict_per_sample_transform(self, *_, **__):\n pass\n\n def predict_per_sample_transform_on_device(self, *_, **__):\n pass\n\n def predict_per_batch_transform_on_device(self, *_, **__):\n pass\n\n\ndef test_data_pipeline_predict_worker_preprocessor_and_device_preprocessor():\n\n preprocess = CustomPreprocess()\n data_pipeline = DataPipeline(preprocess=preprocess)\n\n data_pipeline.worker_preprocessor(RunningStage.TRAINING)\n with pytest.raises(MisconfigurationException, match=\"are mutually exclusive\"):\n data_pipeline.worker_preprocessor(RunningStage.VALIDATING)\n with pytest.raises(MisconfigurationException, match=\"are mutually exclusive\"):\n data_pipeline.worker_preprocessor(RunningStage.TESTING)\n data_pipeline.worker_preprocessor(RunningStage.PREDICTING)\n\n\ndef test_detach_preprocessing_from_model(tmpdir):\n\n class CustomModel(Task):\n\n def __init__(self, postprocess: Optional[Postprocess] = None):\n super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())\n self._postprocess = postprocess\n\n def train_dataloader(self) -> Any:\n return DataLoader(DummyDataset())\n\n preprocess = CustomPreprocess()\n data_pipeline = DataPipeline(preprocess=preprocess)\n model = CustomModel()\n model.data_pipeline = data_pipeline\n\n assert model.train_dataloader().collate_fn == default_collate\n assert model.transfer_batch_to_device.__self__ == model\n model.on_train_dataloader()\n assert isinstance(model.train_dataloader().collate_fn, _PreProcessor)\n assert isinstance(model.transfer_batch_to_device, _StageOrchestrator)\n model.on_fit_end()\n assert model.transfer_batch_to_device.__self__ == model\n assert model.train_dataloader().collate_fn == default_collate\n\n\nclass TestPreprocess(DefaultPreprocess):\n\n def train_per_sample_transform(self, *_, **__):\n pass\n\n def train_per_batch_transform_on_device(self, *_, **__):\n pass\n\n def test_per_sample_transform(self, *_, **__):\n pass\n\n def test_per_sample_transform_on_device(self, *_, **__):\n pass\n\n def test_per_batch_transform_on_device(self, *_, **__):\n pass\n\n def val_per_sample_transform_on_device(self, *_, **__):\n pass\n\n def predict_per_sample_transform(self, *_, **__):\n pass\n\n def predict_per_sample_transform_on_device(self, *_, **__):\n pass\n\n def predict_per_batch_transform_on_device(self, *_, **__):\n pass\n\n\ndef test_attaching_datapipeline_to_model(tmpdir):\n\n class SubPreprocess(DefaultPreprocess):\n pass\n\n preprocess = SubPreprocess()\n data_pipeline = DataPipeline(preprocess=preprocess)\n\n class CustomModel(Task):\n\n def __init__(self):\n super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())\n self._postprocess = Postprocess()\n\n def training_step(self, batch: Any, batch_idx: int) -> Any:\n pass\n\n def validation_step(self, batch: Any, batch_idx: int) -> Any:\n pass\n\n def test_step(self, batch: Any, batch_idx: int) -> Any:\n pass\n\n def train_dataloader(self) -> Any:\n return DataLoader(DummyDataset())\n\n def val_dataloader(self) -> Any:\n return DataLoader(DummyDataset())\n\n def test_dataloader(self) -> Any:\n return DataLoader(DummyDataset())\n\n def predict_dataloader(self) -> Any:\n return DataLoader(DummyDataset())\n\n class TestModel(CustomModel):\n\n stages = [RunningStage.TRAINING, RunningStage.VALIDATING, RunningStage.TESTING, RunningStage.PREDICTING]\n on_train_start_called = False\n on_val_start_called = False\n on_test_start_called = False\n on_predict_start_called = False\n\n def on_fit_start(self):\n assert self.predict_step.__self__ == self\n self._saved_predict_step = self.predict_step\n\n def _compare_pre_processor(self, p1, p2):\n p1_seq = p1.per_sample_transform\n p2_seq = p2.per_sample_transform\n assert p1_seq.pre_tensor_transform.func == p2_seq.pre_tensor_transform.func\n assert p1_seq.to_tensor_transform.func == p2_seq.to_tensor_transform.func\n assert p1_seq.post_tensor_transform.func == p2_seq.post_tensor_transform.func\n assert p1.collate_fn.func == p2.collate_fn.func\n assert p1.per_batch_transform.func == p2.per_batch_transform.func\n\n def _assert_stage_orchestrator_state(\n self, stage_mapping: Dict, current_running_stage: RunningStage, cls=_PreProcessor\n ):\n assert isinstance(stage_mapping[current_running_stage], cls)\n assert stage_mapping[current_running_stage]\n\n def on_train_dataloader(self) -> None:\n current_running_stage = RunningStage.TRAINING\n self.on_train_dataloader_called = True\n collate_fn = self.train_dataloader().collate_fn # noqa F811\n assert collate_fn == default_collate\n assert not isinstance(self.transfer_batch_to_device, _StageOrchestrator)\n super().on_train_dataloader()\n collate_fn = self.train_dataloader().collate_fn # noqa F811\n assert collate_fn.stage == current_running_stage\n self._compare_pre_processor(collate_fn, self.data_pipeline.worker_preprocessor(current_running_stage))\n assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)\n self._assert_stage_orchestrator_state(self.transfer_batch_to_device._stage_mapping, current_running_stage)\n\n def on_val_dataloader(self) -> None:\n current_running_stage = RunningStage.VALIDATING\n self.on_val_dataloader_called = True\n collate_fn = self.val_dataloader().collate_fn # noqa F811\n assert collate_fn == default_collate\n assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)\n super().on_val_dataloader()\n collate_fn = self.val_dataloader().collate_fn # noqa F811\n assert collate_fn.stage == current_running_stage\n self._compare_pre_processor(collate_fn, self.data_pipeline.worker_preprocessor(current_running_stage))\n assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)\n self._assert_stage_orchestrator_state(self.transfer_batch_to_device._stage_mapping, current_running_stage)\n\n def on_test_dataloader(self) -> None:\n current_running_stage = RunningStage.TESTING\n self.on_test_dataloader_called = True\n collate_fn = self.test_dataloader().collate_fn # noqa F811\n assert collate_fn == default_collate\n assert not isinstance(self.transfer_batch_to_device, _StageOrchestrator)\n super().on_test_dataloader()\n collate_fn = self.test_dataloader().collate_fn # noqa F811\n assert collate_fn.stage == current_running_stage\n self._compare_pre_processor(collate_fn, self.data_pipeline.worker_preprocessor(current_running_stage))\n assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)\n self._assert_stage_orchestrator_state(self.transfer_batch_to_device._stage_mapping, current_running_stage)\n\n def on_predict_dataloader(self) -> None:\n current_running_stage = RunningStage.PREDICTING\n self.on_predict_dataloader_called = True\n collate_fn = self.predict_dataloader().collate_fn # noqa F811\n assert collate_fn == default_collate\n assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)\n assert self.predict_step == self._saved_predict_step\n super().on_predict_dataloader()\n collate_fn = self.predict_dataloader().collate_fn # noqa F811\n assert collate_fn.stage == current_running_stage\n self._compare_pre_processor(collate_fn, self.data_pipeline.worker_preprocessor(current_running_stage))\n assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)\n assert isinstance(self.predict_step, _StageOrchestrator)\n self._assert_stage_orchestrator_state(self.transfer_batch_to_device._stage_mapping, current_running_stage)\n self._assert_stage_orchestrator_state(\n self.predict_step._stage_mapping, current_running_stage, cls=_PostProcessor\n )\n\n def on_fit_end(self) -> None:\n super().on_fit_end()\n assert self.train_dataloader().collate_fn == default_collate\n assert self.val_dataloader().collate_fn == default_collate\n assert self.test_dataloader().collate_fn == default_collate\n assert self.predict_dataloader().collate_fn == default_collate\n assert not isinstance(self.transfer_batch_to_device, _StageOrchestrator)\n assert self.predict_step == self._saved_predict_step\n\n model = TestModel()\n model.data_pipeline = data_pipeline\n trainer = Trainer(fast_dev_run=True)\n trainer.fit(model)\n trainer.test(model)\n trainer.predict(model)\n\n assert model.on_train_dataloader_called\n assert model.on_val_dataloader_called\n assert model.on_test_dataloader_called\n assert model.on_predict_dataloader_called\n\n\ndef test_stage_orchestrator_state_attach_detach(tmpdir):\n\n model = CustomModel()\n preprocess = TestPreprocess()\n\n _original_predict_step = model.predict_step\n\n class CustomDataPipeline(DataPipeline):\n\n def _attach_postprocess_to_model(self, model: 'Task', _postprocesssor: _PostProcessor) -> 'Task':\n model.predict_step = self._model_predict_step_wrapper(model.predict_step, _postprocesssor, model)\n return model\n\n data_pipeline = CustomDataPipeline(preprocess=preprocess)\n _postprocesssor = data_pipeline._create_uncollate_postprocessors(RunningStage.PREDICTING)\n data_pipeline._attach_postprocess_to_model(model, _postprocesssor)\n assert model.predict_step._original == _original_predict_step\n assert model.predict_step._stage_mapping[RunningStage.PREDICTING] == _postprocesssor\n data_pipeline._detach_postprocess_from_model(model)\n assert model.predict_step == _original_predict_step\n\n\nclass LamdaDummyDataset(torch.utils.data.Dataset):\n\n def __init__(self, fx: Callable):\n self.fx = fx\n\n def __getitem__(self, index: int) -> Any:\n return self.fx()\n\n def __len__(self) -> int:\n return 5\n\n\nclass TestPreprocessTransformationsDataSource(DataSource):\n\n def __init__(self):\n super().__init__()\n\n self.train_load_data_called = False\n self.val_load_data_called = False\n self.val_load_sample_called = False\n self.test_load_data_called = False\n self.predict_load_data_called = False\n\n @staticmethod\n def fn_train_load_data() -> Tuple:\n return (\n 0,\n 1,\n 2,\n 3,\n )\n\n def train_load_data(self, sample) -> LamdaDummyDataset:\n assert self.training\n assert self.current_fn == \"load_data\"\n self.train_load_data_called = True\n return LamdaDummyDataset(self.fn_train_load_data)\n\n def val_load_data(self, sample, dataset) -> List[int]:\n assert self.validating\n assert self.current_fn == \"load_data\"\n self.val_load_data_called = True\n return list(range(5))\n\n def val_load_sample(self, sample) -> Dict[str, Tensor]:\n assert self.validating\n assert self.current_fn == \"load_sample\"\n self.val_load_sample_called = True\n return {\"a\": sample, \"b\": sample + 1}\n\n @staticmethod\n def fn_test_load_data() -> List[torch.Tensor]:\n return [torch.rand(1), torch.rand(1)]\n\n def test_load_data(self, sample) -> LamdaDummyDataset:\n assert self.testing\n assert self.current_fn == \"load_data\"\n self.test_load_data_called = True\n return LamdaDummyDataset(self.fn_test_load_data)\n\n @staticmethod\n def fn_predict_load_data() -> List[str]:\n return ([\"a\", \"b\"])\n\n def predict_load_data(self, sample) -> LamdaDummyDataset:\n assert self.predicting\n assert self.current_fn == \"load_data\"\n self.predict_load_data_called = True\n return LamdaDummyDataset(self.fn_predict_load_data)\n\n\nclass TestPreprocessTransformations(DefaultPreprocess):\n\n def __init__(self):\n super().__init__(data_sources={\"default\": TestPreprocessTransformationsDataSource()})\n\n self.train_pre_tensor_transform_called = False\n self.train_collate_called = False\n self.train_per_batch_transform_on_device_called = False\n self.val_to_tensor_transform_called = False\n self.val_collate_called = False\n self.val_per_batch_transform_on_device_called = False\n self.test_to_tensor_transform_called = False\n self.test_post_tensor_transform_called = False\n\n def train_pre_tensor_transform(self, sample: Any) -> Any:\n assert self.training\n assert self.current_fn == \"pre_tensor_transform\"\n self.train_pre_tensor_transform_called = True\n return sample + (5, )\n\n def train_collate(self, samples) -> Tensor:\n assert self.training\n assert self.current_fn == \"collate\"\n self.train_collate_called = True\n return tensor([list(s) for s in samples])\n\n def train_per_batch_transform_on_device(self, batch: Any) -> Any:\n assert self.training\n assert self.current_fn == \"per_batch_transform_on_device\"\n self.train_per_batch_transform_on_device_called = True\n assert torch.equal(batch, tensor([[0, 1, 2, 3, 5], [0, 1, 2, 3, 5]]))\n\n def val_to_tensor_transform(self, sample: Any) -> Tensor:\n assert self.validating\n assert self.current_fn == \"to_tensor_transform\"\n self.val_to_tensor_transform_called = True\n return sample\n\n def val_collate(self, samples) -> Dict[str, Tensor]:\n assert self.validating\n assert self.current_fn == \"collate\"\n self.val_collate_called = True\n _count = samples[0]['a']\n assert samples == [{'a': _count, 'b': _count + 1}, {'a': _count + 1, 'b': _count + 2}]\n return {'a': tensor([0, 1]), 'b': tensor([1, 2])}\n\n def val_per_batch_transform_on_device(self, batch: Any) -> Any:\n assert self.validating\n assert self.current_fn == \"per_batch_transform_on_device\"\n self.val_per_batch_transform_on_device_called = True\n if isinstance(batch, list):\n batch = batch[0]\n assert torch.equal(batch[\"a\"], tensor([0, 1]))\n assert torch.equal(batch[\"b\"], tensor([1, 2]))\n return [False]\n\n def test_to_tensor_transform(self, sample: Any) -> Tensor:\n assert self.testing\n assert self.current_fn == \"to_tensor_transform\"\n self.test_to_tensor_transform_called = True\n return sample\n\n def test_post_tensor_transform(self, sample: Tensor) -> Tensor:\n assert self.testing\n assert self.current_fn == \"post_tensor_transform\"\n self.test_post_tensor_transform_called = True\n return sample\n\n\nclass TestPreprocessTransformations2(TestPreprocessTransformations):\n\n def val_to_tensor_transform(self, sample: Any) -> Tensor:\n self.val_to_tensor_transform_called = True\n return {\"a\": tensor(sample[\"a\"]), \"b\": tensor(sample[\"b\"])}\n\n\nclass CustomModel(Task):\n\n def __init__(self):\n super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())\n\n def training_step(self, batch, batch_idx):\n assert batch is None\n\n def validation_step(self, batch, batch_idx):\n if isinstance(batch, list):\n batch = batch[0]\n assert batch is False\n\n def test_step(self, batch, batch_idx):\n assert len(batch) == 2\n assert batch[0].shape == torch.Size([2, 1])\n\n def predict_step(self, batch, batch_idx, dataloader_idx):\n assert batch[0][0] == 'a'\n assert batch[0][1] == 'a'\n assert batch[1][0] == 'b'\n assert batch[1][1] == 'b'\n return tensor([0, 0, 0])\n\n\ndef test_datapipeline_transformations(tmpdir):\n\n datamodule = DataModule.from_data_source(\n \"default\", 1, 1, 1, 1, batch_size=2, num_workers=0, preprocess=TestPreprocessTransformations()\n )\n\n assert datamodule.train_dataloader().dataset[0] == (0, 1, 2, 3)\n batch = next(iter(datamodule.train_dataloader()))\n assert torch.equal(batch, tensor([[0, 1, 2, 3, 5], [0, 1, 2, 3, 5]]))\n\n assert datamodule.val_dataloader().dataset[0] == {'a': 0, 'b': 1}\n assert datamodule.val_dataloader().dataset[1] == {'a': 1, 'b': 2}\n with pytest.raises(MisconfigurationException, match=\"When ``to_tensor_transform``\"):\n batch = next(iter(datamodule.val_dataloader()))\n\n datamodule = DataModule.from_data_source(\n \"default\", 1, 1, 1, 1, batch_size=2, num_workers=0, preprocess=TestPreprocessTransformations2()\n )\n batch = next(iter(datamodule.val_dataloader()))\n assert torch.equal(batch[\"a\"], tensor([0, 1]))\n assert torch.equal(batch[\"b\"], tensor([1, 2]))\n\n model = CustomModel()\n trainer = Trainer(\n max_epochs=1,\n limit_train_batches=2,\n limit_val_batches=1,\n limit_test_batches=2,\n limit_predict_batches=2,\n num_sanity_val_steps=1\n )\n trainer.fit(model, datamodule=datamodule)\n trainer.test(model)\n trainer.predict(model)\n\n preprocess = model._preprocess\n data_source = preprocess.data_source_of_name(\"default\")\n assert data_source.train_load_data_called\n assert preprocess.train_pre_tensor_transform_called\n assert preprocess.train_collate_called\n assert preprocess.train_per_batch_transform_on_device_called\n assert data_source.val_load_data_called\n assert data_source.val_load_sample_called\n assert preprocess.val_to_tensor_transform_called\n assert preprocess.val_collate_called\n assert preprocess.val_per_batch_transform_on_device_called\n assert data_source.test_load_data_called\n assert preprocess.test_to_tensor_transform_called\n assert preprocess.test_post_tensor_transform_called\n assert data_source.predict_load_data_called\n\n\ndef test_is_overriden_recursive(tmpdir):\n\n class TestPreprocess(DefaultPreprocess):\n\n def collate(self, *_):\n pass\n\n def val_collate(self, *_):\n pass\n\n preprocess = TestPreprocess()\n assert DataPipeline._is_overriden_recursive(\"collate\", preprocess, Preprocess, prefix=\"val\")\n assert DataPipeline._is_overriden_recursive(\"collate\", preprocess, Preprocess, prefix=\"train\")\n assert not DataPipeline._is_overriden_recursive(\n \"per_batch_transform_on_device\", preprocess, Preprocess, prefix=\"train\"\n )\n assert not DataPipeline._is_overriden_recursive(\"per_batch_transform_on_device\", preprocess, Preprocess)\n with pytest.raises(MisconfigurationException, match=\"This function doesn't belong to the parent class\"):\n assert not DataPipeline._is_overriden_recursive(\"chocolate\", preprocess, Preprocess)\n\n\n@pytest.mark.skipif(not _IMAGE_AVAILABLE, reason=\"image libraries aren't installed.\")\n@mock.patch(\"torch.save\") # need to mock torch.save or we get pickle error\ndef test_dummy_example(tmpdir):\n\n class ImageDataSource(DataSource):\n\n def load_data(self, folder: str):\n # from folder -> return files paths\n return [\"a.jpg\", \"b.jpg\"]\n\n def load_sample(self, path: str) -> Image.Image:\n # from a file path, load the associated image\n img8Bit = np.uint8(np.random.uniform(0, 1, (64, 64, 3)) * 255.0)\n return Image.fromarray(img8Bit)\n\n class ImageClassificationPreprocess(DefaultPreprocess):\n\n def __init__(\n self,\n train_transform=None,\n val_transform=None,\n test_transform=None,\n predict_transform=None,\n to_tensor_transform=None,\n train_per_sample_transform_on_device=None,\n ):\n super().__init__(\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_sources={\"default\": ImageDataSource()},\n )\n self._to_tensor = to_tensor_transform\n self._train_per_sample_transform_on_device = train_per_sample_transform_on_device\n\n def to_tensor_transform(self, pil_image: Image.Image) -> Tensor:\n # convert pil image into a tensor\n return self._to_tensor(pil_image)\n\n def train_per_sample_transform_on_device(self, sample: Any) -> Any:\n # apply an augmentation per sample on gpu for train only\n return self._train_per_sample_transform_on_device(sample)\n\n class CustomModel(Task):\n\n def __init__(self):\n super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())\n\n def training_step(self, batch, batch_idx):\n assert batch.shape == torch.Size([2, 3, 64, 64])\n\n def validation_step(self, batch, batch_idx):\n assert batch.shape == torch.Size([2, 3, 64, 64])\n\n def test_step(self, batch, batch_idx):\n assert batch.shape == torch.Size([2, 3, 64, 64])\n\n class CustomDataModule(DataModule):\n\n preprocess_cls = ImageClassificationPreprocess\n\n datamodule = CustomDataModule.from_data_source(\n \"default\",\n \"train_folder\",\n \"val_folder\",\n \"test_folder\",\n None,\n batch_size=2,\n to_tensor_transform=T.ToTensor(),\n train_per_sample_transform_on_device=T.RandomHorizontalFlip(),\n )\n\n assert isinstance(datamodule.train_dataloader().dataset[0], Image.Image)\n batch = next(iter(datamodule.train_dataloader()))\n assert batch[0].shape == torch.Size([3, 64, 64])\n\n model = CustomModel()\n trainer = Trainer(\n max_epochs=1,\n limit_train_batches=2,\n limit_val_batches=1,\n limit_test_batches=2,\n limit_predict_batches=2,\n num_sanity_val_steps=1\n )\n trainer.fit(model, datamodule=datamodule)\n trainer.test(model)\n\n\ndef test_preprocess_transforms(tmpdir):\n \"\"\"\n This test makes sure that when a preprocess is being provided transforms as dictionaries,\n checking is done properly, and collate_in_worker_from_transform is properly extracted.\n \"\"\"\n\n with pytest.raises(MisconfigurationException, match=\"Transform should be a dict.\"):\n DefaultPreprocess(train_transform=\"choco\")\n\n with pytest.raises(MisconfigurationException, match=\"train_transform contains {'choco'}. Only\"):\n DefaultPreprocess(train_transform={\"choco\": None})\n\n preprocess = DefaultPreprocess(train_transform={\"to_tensor_transform\": torch.nn.Linear(1, 1)})\n # keep is None\n assert preprocess._train_collate_in_worker_from_transform is True\n assert preprocess._val_collate_in_worker_from_transform is None\n assert preprocess._test_collate_in_worker_from_transform is None\n assert preprocess._predict_collate_in_worker_from_transform is None\n\n with pytest.raises(MisconfigurationException, match=\"`per_batch_transform` and `per_sample_transform_on_device`\"):\n preprocess = DefaultPreprocess(\n train_transform={\n \"per_batch_transform\": torch.nn.Linear(1, 1),\n \"per_sample_transform_on_device\": torch.nn.Linear(1, 1)\n }\n )\n\n preprocess = DefaultPreprocess(\n train_transform={\"per_batch_transform\": torch.nn.Linear(1, 1)},\n predict_transform={\"per_sample_transform_on_device\": torch.nn.Linear(1, 1)}\n )\n # keep is None\n assert preprocess._train_collate_in_worker_from_transform is True\n assert preprocess._val_collate_in_worker_from_transform is None\n assert preprocess._test_collate_in_worker_from_transform is None\n assert preprocess._predict_collate_in_worker_from_transform is False\n\n train_preprocessor = DataPipeline(preprocess=preprocess).worker_preprocessor(RunningStage.TRAINING)\n val_preprocessor = DataPipeline(preprocess=preprocess).worker_preprocessor(RunningStage.VALIDATING)\n test_preprocessor = DataPipeline(preprocess=preprocess).worker_preprocessor(RunningStage.TESTING)\n predict_preprocessor = DataPipeline(preprocess=preprocess).worker_preprocessor(RunningStage.PREDICTING)\n\n assert train_preprocessor.collate_fn.func == preprocess.collate\n assert val_preprocessor.collate_fn.func == preprocess.collate\n assert test_preprocessor.collate_fn.func == preprocess.collate\n assert predict_preprocessor.collate_fn.func == DataPipeline._identity\n\n class CustomPreprocess(DefaultPreprocess):\n\n def per_sample_transform_on_device(self, sample: Any) -> Any:\n return super().per_sample_transform_on_device(sample)\n\n def per_batch_transform(self, batch: Any) -> Any:\n return super().per_batch_transform(batch)\n\n preprocess = CustomPreprocess(\n train_transform={\"per_batch_transform\": torch.nn.Linear(1, 1)},\n predict_transform={\"per_sample_transform_on_device\": torch.nn.Linear(1, 1)}\n )\n # keep is None\n assert preprocess._train_collate_in_worker_from_transform is True\n assert preprocess._val_collate_in_worker_from_transform is None\n assert preprocess._test_collate_in_worker_from_transform is None\n assert preprocess._predict_collate_in_worker_from_transform is False\n\n data_pipeline = DataPipeline(preprocess=preprocess)\n\n train_preprocessor = data_pipeline.worker_preprocessor(RunningStage.TRAINING)\n with pytest.raises(MisconfigurationException, match=\"`per_batch_transform` and `per_sample_transform_on_device`\"):\n val_preprocessor = data_pipeline.worker_preprocessor(RunningStage.VALIDATING)\n with pytest.raises(MisconfigurationException, match=\"`per_batch_transform` and `per_sample_transform_on_device`\"):\n test_preprocessor = data_pipeline.worker_preprocessor(RunningStage.TESTING)\n predict_preprocessor = data_pipeline.worker_preprocessor(RunningStage.PREDICTING)\n\n assert train_preprocessor.collate_fn.func == preprocess.collate\n assert predict_preprocessor.collate_fn.func == DataPipeline._identity\n\n\ndef test_iterable_auto_dataset(tmpdir):\n\n class CustomDataSource(DataSource):\n\n def load_sample(self, index: int) -> Dict[str, int]:\n return {\"index\": index}\n\n ds = IterableAutoDataset(range(10), data_source=CustomDataSource(), running_stage=RunningStage.TRAINING)\n\n for index, v in enumerate(ds):\n assert v == {\"index\": index}\n\n\nclass CustomPreprocessHyperparameters(DefaultPreprocess):\n\n def __init__(self, token: str, *args, **kwargs):\n self.token = token\n super().__init__(*args, **kwargs)\n\n @classmethod\n def load_from_state_dict(cls, state_dict: Dict[str, Any]):\n return cls(state_dict[\"token\"])\n\n def state_dict(self) -> Dict[str, Any]:\n return {\"token\": self.token}\n\n\ndef local_fn(x):\n return x\n\n\ndef test_save_hyperparemeters(tmpdir):\n\n kwargs = {\"train_transform\": {\"pre_tensor_transform\": local_fn}}\n preprocess = CustomPreprocessHyperparameters(\"token\", **kwargs)\n state_dict = preprocess.state_dict()\n torch.save(state_dict, os.path.join(tmpdir, \"state_dict.pt\"))\n state_dict = torch.load(os.path.join(tmpdir, \"state_dict.pt\"))\n preprocess = CustomPreprocessHyperparameters.load_from_state_dict(state_dict)\n assert isinstance(preprocess, CustomPreprocessHyperparameters)\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom unittest.mock import Mock\n\nimport pytest\nimport torch\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import DataLoader\n\nfrom flash import Task, Trainer\nfrom flash.core.classification import Labels, LabelsState\nfrom flash.core.data.data_module import DataModule\nfrom flash.core.data.data_pipeline import DataPipeline, DataPipelineState, DefaultPreprocess\nfrom flash.core.data.data_source import DefaultDataSources\nfrom flash.core.data.process import Serializer, SerializerMapping\nfrom flash.core.data.properties import ProcessState, Properties\n\n\ndef test_properties_data_pipeline_state():\n \"\"\"Tests that ``get_state`` and ``set_state`` work for properties and that ``DataPipelineState`` is attached\n correctly.\"\"\"\n\n class MyProcessState1(ProcessState):\n pass\n\n class MyProcessState2(ProcessState):\n pass\n\n class OtherProcessState(ProcessState):\n pass\n\n my_properties = Properties()\n my_properties.set_state(MyProcessState1())\n assert my_properties._state == {MyProcessState1: MyProcessState1()}\n assert my_properties.get_state(OtherProcessState) is None\n\n data_pipeline_state = DataPipelineState()\n data_pipeline_state.set_state(OtherProcessState())\n my_properties.attach_data_pipeline_state(data_pipeline_state)\n assert my_properties.get_state(OtherProcessState) == OtherProcessState()\n\n my_properties.set_state(MyProcessState2())\n assert data_pipeline_state.get_state(MyProcessState2) == MyProcessState2()\n\n\ndef test_serializer():\n \"\"\"Tests that ``Serializer`` can be enabled and disabled correctly.\"\"\"\n\n my_serializer = Serializer()\n\n assert my_serializer.serialize('test') == 'test'\n my_serializer.serialize = Mock()\n\n my_serializer.disable()\n assert my_serializer('test') == 'test'\n my_serializer.serialize.assert_not_called()\n\n my_serializer.enable()\n my_serializer('test')\n my_serializer.serialize.assert_called_once()\n\n\ndef test_serializer_mapping():\n \"\"\"Tests that ``SerializerMapping`` correctly passes its inputs to the underlying serializers. Also checks that\n state is retrieved / loaded correctly.\"\"\"\n\n serializer1 = Serializer()\n serializer1.serialize = Mock(return_value='test1')\n\n class Serializer1State(ProcessState):\n pass\n\n serializer2 = Serializer()\n serializer2.serialize = Mock(return_value='test2')\n\n class Serializer2State(ProcessState):\n pass\n\n serializer_mapping = SerializerMapping({'key1': serializer1, 'key2': serializer2})\n assert serializer_mapping({'key1': 'serializer1', 'key2': 'serializer2'}) == {'key1': 'test1', 'key2': 'test2'}\n serializer1.serialize.assert_called_once_with('serializer1')\n serializer2.serialize.assert_called_once_with('serializer2')\n\n with pytest.raises(ValueError, match='output must be a mapping'):\n serializer_mapping('not a mapping')\n\n serializer1_state = Serializer1State()\n serializer2_state = Serializer2State()\n\n serializer1.set_state(serializer1_state)\n serializer2.set_state(serializer2_state)\n\n data_pipeline_state = DataPipelineState()\n serializer_mapping.attach_data_pipeline_state(data_pipeline_state)\n\n assert serializer1._data_pipeline_state is data_pipeline_state\n assert serializer2._data_pipeline_state is data_pipeline_state\n\n assert data_pipeline_state.get_state(Serializer1State) is serializer1_state\n assert data_pipeline_state.get_state(Serializer2State) is serializer2_state\n\n\ndef test_saving_with_serializers(tmpdir):\n\n checkpoint_file = os.path.join(tmpdir, 'tmp.ckpt')\n\n class CustomModel(Task):\n\n def __init__(self):\n super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())\n\n serializer = Labels([\"a\", \"b\"])\n model = CustomModel()\n trainer = Trainer(fast_dev_run=True)\n data_pipeline = DataPipeline(preprocess=DefaultPreprocess(), serializer=serializer)\n data_pipeline.initialize()\n model.data_pipeline = data_pipeline\n assert isinstance(model.preprocess, DefaultPreprocess)\n dummy_data = DataLoader(list(zip(torch.arange(10, dtype=torch.float), torch.arange(10, dtype=torch.float))))\n trainer.fit(model, train_dataloader=dummy_data)\n trainer.save_checkpoint(checkpoint_file)\n model = CustomModel.load_from_checkpoint(checkpoint_file)\n assert isinstance(model._data_pipeline_state, DataPipelineState)\n assert model._data_pipeline_state._state[LabelsState] == LabelsState([\"a\", \"b\"])\n\n\nclass CustomPreprocess(DefaultPreprocess):\n\n def __init__(self):\n super().__init__(\n data_sources={\n \"test\": Mock(return_value=\"test\"),\n DefaultDataSources.TENSORS: Mock(return_value=\"tensors\"),\n },\n default_data_source=\"test\",\n )\n\n\ndef test_data_source_of_name():\n\n preprocess = CustomPreprocess()\n\n assert preprocess.data_source_of_name(\"test\")() == \"test\"\n assert preprocess.data_source_of_name(DefaultDataSources.TENSORS)() == \"tensors\"\n assert preprocess.data_source_of_name(\"tensors\")() == \"tensors\"\n assert preprocess.data_source_of_name(\"default\")() == \"test\"\n\n with pytest.raises(MisconfigurationException, match=\"available data sources are: test, tensor\"):\n preprocess.data_source_of_name(\"not available\")\n\n\ndef test_available_data_sources():\n\n preprocess = CustomPreprocess()\n\n assert DefaultDataSources.TENSORS in preprocess.available_data_sources()\n assert \"test\" in preprocess.available_data_sources()\n assert len(preprocess.available_data_sources()) == 3\n\n data_module = DataModule(preprocess=preprocess)\n\n assert DefaultDataSources.TENSORS in data_module.available_data_sources()\n assert \"test\" in data_module.available_data_sources()\n assert len(data_module.available_data_sources()) == 3\n"
] |
[
[
"torch.Size",
"torch.rand",
"torch.nn.Linear",
"torch.nn.MSELoss",
"numpy.random.uniform",
"torch.tensor"
],
[
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.arange"
]
] |
Pasha62/refactoring-task-at-19
|
[
"50e67e4c3004e255869e18bfdf1be7c059486ce1"
] |
[
"filter_1.py"
] |
[
"from PIL import Image\nimport numpy as np\nimg = Image.open(\"img2.jpg\")\narr = np.array(img)\na = len(arr)\na1 = len(arr[1])\ni = 0\nwhile i < a:\n j = 0\n while j < a1:\n s = 0\n for n in range(i, min(i + 10, a - 1)):\n for d1 in range(j, j + 10):\n n1 = arr[n][d1][0]\n n2 = arr[n][d1][1]\n n3 = arr[n][d1][2]\n M = int(n1)\n M = (M + n2 + n3) // 3\n s += M\n s = int(s // 100)\n # print(s)\n for n in range(i, min(i + 10, a - 1)):\n for d1 in range(j, j + 10):\n arr[n][d1][0] = int(s // 50) * 50\n arr[n][d1][1] = int(s // 50) * 50\n arr[n][d1][2] = int(s // 50) * 50\n j = j + 10\n i = i + 10\nres = Image.fromarray(arr)\nres.save('res.jpg')\n"
] |
[
[
"numpy.array"
]
] |
micha7a/surface-reconstruction
|
[
"00094419bb7967ea0cd781c1520ed62926fcc848"
] |
[
"simulate_alpha_less_than_alpha_c.py"
] |
[
"import numpy as np\nfrom unwarping_functions import *\n\n# --------------------------------------------------------#\n# Settings\n# --------------------------------------------------------#\n\nK = 5\nnp.random.seed(100)\niter_max = 10000\nnoise_num = 100\nSNR = np.linspace(-20, 100, noise_num)\natLeastOneSol = True\na_orig = np.random.uniform(-1, 1, 2*K+1)\na_orig = (a_orig + a_orig[::-1]) / 2.\nredundancy = 1000\nx = np.linspace(-100, 100, 1000000)\nw = np.linspace(-np.pi, np.pi, 1000000)\nb_orig = 4\nT = (2 * K + 1) / (2 * np.pi * b_orig) * dirichlet_inverse((2 * (2 * K + 1)) / (3 * np.pi), K)\nT = T / 3\nalpha_orig = 2 * np.pi / (2 * K + 1) * T * b_orig\nb_orig_periodized = (2 * K + 1) * np.abs(periodize_angle(alpha_orig)) / (2 * np.pi * T)\n\nn = np.arange(np.int(np.min(x) / T), np.int(np.max(x) / T) + 1)\nsample_points = n * T\nh_n = g_fun(b_orig * sample_points, a_orig, K)\n\n# --------------------------------------------------------#\n# Noise study\n# --------------------------------------------------------#\n\nnoise_sig_list = np.std(h_n) / (10**(SNR / 20.))\nb_error_closest_b = np.zeros(len(noise_sig_list))\nh_error_closest_b = np.zeros(len(noise_sig_list))\nb_error_closest_h_n = np.zeros(len(noise_sig_list))\nh_error_closest_h_n = np.zeros(len(noise_sig_list))\nzero_sol_cases = np.zeros(len(noise_sig_list))\nmultiple_sol_cases = np.zeros(len(noise_sig_list))\n\nfor noise_ind, noise_sig in enumerate(noise_sig_list):\n print('computing noise number ' + str(noise_ind+1) + ' out of ' + str(noise_num))\n counter = 0\n for iter in range(iter_max):\n noise = np.random.normal(loc=0, scale=noise_sig, size=h_n.shape)\n h_n_noisy = h_n + noise\n unwarped = unwarp(h_n_noisy, K, T, sample_points, redundancy, atLeastOneSol)\n b_estimated = unwarped[0]\n a_estimated = unwarped[1]\n assert(len(b_estimated) == len(a_estimated))\n if len(b_estimated) == 0:\n zero_sol_cases[noise_ind] += 1\n elif len(b_estimated) >= 1:\n b_error = np.array([np.abs(b_est - b_orig_periodized) / np.abs(b_orig_periodized) for b_est in b_estimated])\n h_n_hat = np.array([compute_h_n_hat(a_est, b_est, K, sample_points) for a_est, b_est in zip(a_estimated,\n b_estimated)])\n h_error = np.array([np.linalg.norm(h_n - h_n_h, 2) / np.linalg.norm(h_n, 2) for h_n_h in h_n_hat])\n\n b_error_closest_b[noise_ind] += b_error[np.argmin(b_error)]\n h_error_closest_b[noise_ind] += h_error[np.argmin(b_error)]\n b_error_closest_h_n[noise_ind] += b_error[np.argmin(h_error)]\n h_error_closest_h_n[noise_ind] += h_error[np.argmin(h_error)]\n\n counter += 1\n if len(b_estimated) > 1:\n multiple_sol_cases[noise_ind] += 1\n\n b_error_closest_b[noise_ind] /= counter\n h_error_closest_b[noise_ind] /= counter\n b_error_closest_h_n[noise_ind] /= counter\n h_error_closest_h_n[noise_ind] /= counter\n\nnp.savez('noise_vars_unique.npz', zero_sol_cases=zero_sol_cases, multiple_sol_cases=multiple_sol_cases,\n iter_max=iter_max, h_n=h_n, SNR=SNR, b_error_closest_b=b_error_closest_b, h_error_closest_b=h_error_closest_b,\n b_error_closest_h_n=b_error_closest_h_n, h_error_closest_h_n=h_error_closest_h_n, a_orig=a_orig, b_orig=b_orig,\n T=T)"
] |
[
[
"numpy.max",
"numpy.random.normal",
"numpy.linalg.norm",
"numpy.argmin",
"numpy.random.seed",
"numpy.min",
"numpy.std",
"numpy.random.uniform",
"numpy.savez",
"numpy.abs",
"numpy.linspace"
]
] |
mrlooi/maskrcnn-benchmark
|
[
"135168ddda9436eead21fc945c192cffd8421e6a",
"135168ddda9436eead21fc945c192cffd8421e6a"
] |
[
"maskrcnn_benchmark/modeling/roi_heads/keypoint_head/keypoint_head.py",
"tests/test_rpn_post_processor.py"
] |
[
"import torch\n\nfrom .roi_keypoint_feature_extractors import make_roi_keypoint_feature_extractor\nfrom .roi_keypoint_predictors import make_roi_keypoint_predictor\nfrom .inference import make_roi_keypoint_post_processor\nfrom .loss import make_roi_keypoint_loss_evaluator\n\n\nclass ROIKeypointHead(torch.nn.Module):\n def __init__(self, cfg, in_channels):\n super(ROIKeypointHead, self).__init__()\n self.cfg = cfg.clone()\n self.feature_extractor = make_roi_keypoint_feature_extractor(cfg, in_channels)\n self.predictor = make_roi_keypoint_predictor(\n cfg, self.feature_extractor.out_channels)\n self.post_processor = make_roi_keypoint_post_processor(cfg)\n self.loss_evaluator = make_roi_keypoint_loss_evaluator(cfg)\n\n def forward(self, features, proposals, targets=None):\n \"\"\"\n Arguments:\n features (list[Tensor]): feature-maps from possibly several levels\n proposals (list[BoxList]): proposal boxes\n targets (list[BoxList], optional): the ground-truth targets.\n\n Returns:\n x (Tensor): the result of the feature extractor\n proposals (list[BoxList]): during training, the original proposals\n are returned. During testing, the predicted boxlists are returned\n with the `mask` field set\n losses (dict[Tensor]): During training, returns the losses for the\n head. During testing, returns an empty dict.\n \"\"\"\n if self.training:\n with torch.no_grad():\n proposals = self.loss_evaluator.subsample(proposals, targets)\n if self.training and self.cfg.MODEL.ROI_KEYPOINT_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:\n raise NotImplementedError(\"SHARE_BOX_FEATURE_EXTRACTOR is not implemented\")\n # x = list(features)\n # pos_inds = torch.cat(positive_inds, dim=0)\n # x[0] = x[0][pos_inds]\n # x[1] = x[1][pos_inds]\n x = self.feature_extractor(features, proposals)\n kp_logits = self.predictor(x)\n\n if not self.training:\n result = self.post_processor(kp_logits, proposals)\n return x, result, {}\n\n loss_kp = self.loss_evaluator(proposals, kp_logits)\n\n return x, proposals, dict(loss_kp=loss_kp)\n\n\ndef build_roi_keypoint_head(cfg, in_channels):\n return ROIKeypointHead(cfg, in_channels)\n",
"import numpy as np\nimport torch\n# import cv2\n\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\n\nfrom maskrcnn_benchmark.modeling.rrpn.anchor_generator import \\\n make_anchor_generator as make_rrpn_anchor_generator, convert_rect_to_pts2\nfrom maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask\n\n\ndef get_image_list_and_feature_maps(image, feature_strides):\n N, C, H, W = image.shape\n image_list = to_image_list(image, cfg.DATALOADER.SIZE_DIVISIBILITY)\n\n feature_maps = [torch.zeros(N,1,H//s,W//s, device=image.device) for s in feature_strides]\n return image_list, feature_maps\n\n\n\ndef test_rpn_post_processor(image_tensor, targets_data):\n from maskrcnn_benchmark.modeling.rpn.anchor_generator import make_anchor_generator\n from maskrcnn_benchmark.modeling.rpn.inference import make_rpn_postprocessor\n from maskrcnn_benchmark.modeling.rpn.loss import make_rpn_loss_evaluator\n\n N, C, H, W = image_tensor.shape\n targets = [BoxList(td, (W,H), mode=\"xyxy\") for td in targets_data]\n\n device = image_tensor.device\n\n REGRESSION_CN = 4\n\n USE_FPN = False\n CFG_RPN = cfg.MODEL.RPN\n\n if USE_FPN:\n CFG_RPN.ANCHOR_STRIDE = tuple(np.array(CFG_RPN.ANCHOR_SIZES) // 8)\n\n image_list, feature_maps = get_image_list_and_feature_maps(image_tensor, CFG_RPN.ANCHOR_STRIDE)\n\n anchor_generator = make_anchor_generator(cfg)\n num_anchors = anchor_generator.num_anchors_per_location()\n\n anchors = anchor_generator.forward(image_list, feature_maps)\n\n objectness = []\n box_regression = []\n for ix, fm in enumerate(feature_maps):\n n_anchors = num_anchors[ix]\n N,_,h,w = fm.shape\n objectness.append(torch.rand(N, n_anchors, h, w, device=device))\n box_regression.append(torch.rand(N, n_anchors*REGRESSION_CN, h, w, device=device))\n\n # train mode\n postprocessor_train = make_rpn_postprocessor(cfg, rpn_box_coder=None, is_train=True)\n postprocessor_train.train()\n\n result = postprocessor_train.forward(anchors, objectness, box_regression, targets=targets)\n\n # check loss\n loss_evaluator = make_rpn_loss_evaluator(cfg, postprocessor_train.box_coder)\n loss_objectness, loss_rpn_box_reg = loss_evaluator(anchors, objectness, box_regression, targets)\n\n # test mode\n postprocessor_test = make_rpn_postprocessor(cfg, rpn_box_coder=None, is_train=False)\n postprocessor_test.eval()\n\n result = postprocessor_test.forward(anchors, objectness, box_regression)\n\n\ndef test_rrpn_post_processor(image_tensor, targets_data):\n from maskrcnn_benchmark.modeling.rrpn.inference import make_rpn_postprocessor, REGRESSION_CN\n from maskrcnn_benchmark.modeling.rrpn.loss import make_rpn_loss_evaluator\n\n N, C, H, W = image_tensor.shape\n\n\n targets = []\n for ix, td in enumerate(targets_data):\n rect_pts = convert_rect_to_pts2(td)#.reshape((len(td), 8))\n nn = len(rect_pts)\n bboxes = np.zeros((nn, 4), dtype=np.float32)\n bboxes[:, :2] = np.min(rect_pts, axis=1)\n bboxes[:, 2:] = np.max(rect_pts, axis=1)\n boxlist = BoxList(bboxes, (W,H), mode=\"xyxy\")\n mm = SegmentationMask(rect_pts.reshape(nn, 1, 8).tolist(), (W,H), mode='poly')\n boxlist.add_field(\"masks\", mm)\n targets.append(boxlist)\n\n device = image_tensor.device\n\n\n USE_FPN = False\n cfg.MODEL.ROTATED = True\n CFG_RPN = cfg.MODEL.RPN\n\n CFG_RPN.ANCHOR_ANGLES = (-90, -54, -18, 18, 54)\n CFG_RPN.ANCHOR_SIZES = (48, 84, 128, 224)\n CFG_RPN.ANCHOR_STRIDE = (16,)\n CFG_RPN.ASPECT_RATIOS = (1.0, 2.0)\n\n if USE_FPN:\n CFG_RPN.ANCHOR_STRIDE = tuple(np.array(CFG_RPN.ANCHOR_SIZES) // 8)\n CFG_RPN.POST_NMS_TOP_N_TRAIN = 100\n\n image_list, feature_maps = get_image_list_and_feature_maps(image_tensor, CFG_RPN.ANCHOR_STRIDE)\n\n anchor_generator = make_rrpn_anchor_generator(cfg)\n num_anchors = anchor_generator.num_anchors_per_location()\n\n anchors = anchor_generator.forward(image_list, feature_maps)\n\n objectness = []\n box_regression = []\n for ix, fm in enumerate(feature_maps):\n n_anchors = num_anchors[ix]\n N,_,h,w = fm.shape\n objectness.append(torch.rand(N, n_anchors, h, w, device=device))\n box_regression.append(torch.rand(N, n_anchors*REGRESSION_CN, h, w, device=device))\n\n # train mode\n postprocessor_train = make_rpn_postprocessor(cfg, rpn_box_coder=None, is_train=True)\n postprocessor_train.train()\n\n # result = postprocessor_train.forward(anchors, objectness, box_regression, targets=targets)\n\n # check loss\n loss_evaluator = make_rpn_loss_evaluator(cfg, postprocessor_train.box_coder)\n loss_objectness, loss_rpn_box_reg = loss_evaluator(anchors, objectness, box_regression, targets)\n\n # test mode\n postprocessor_test = make_rpn_postprocessor(cfg, rpn_box_coder=None, is_train=False)\n postprocessor_test.eval()\n\n result = postprocessor_test.forward(anchors, objectness, box_regression)\n\n\n\nif __name__ == '__main__':\n N = 1\n C = 3\n H = 160\n W = 240\n\n device = 'cpu'\n image = torch.zeros(N,C,H,W, device=device)\n targets = np.array([\n [50, 50, 100, 100, 0],\n [50, 50, 50, 50, -90]\n ], dtype=np.float32)\n bbox_targets = np.array([\n [0, 0, 100, 100],\n [25, 25, 75, 75]\n ], dtype=np.float32)\n\n targets = [targets for ix in range(N)]\n bbox_targets = [bbox_targets for ix in range(N)]\n\n test_rpn_post_processor(image, bbox_targets)\n test_rrpn_post_processor(image, targets)\n\n from maskrcnn_benchmark.modeling.rrpn.utils import get_segmentation_mask_rotated_rect_tensor\n tt = []\n for ix, td in enumerate(targets):\n rect_pts = convert_rect_to_pts2(td)#.reshape((len(td), 8))\n nn = len(rect_pts)\n bboxes = np.zeros((nn, 4), dtype=np.float32)\n bboxes[:, :2] = np.min(rect_pts, axis=1)\n bboxes[:, 2:] = np.max(rect_pts, axis=1)\n boxlist = BoxList(bboxes, (W,H), mode=\"xyxy\")\n mm = SegmentationMask(rect_pts.reshape(nn, 1, 8).tolist(), (W,H), mode='poly')\n boxlist.add_field(\"masks\", mm)\n tt.append(boxlist)\n\n rrect_tensor = get_segmentation_mask_rotated_rect_tensor(mm)\n"
] |
[
[
"torch.no_grad"
],
[
"torch.zeros",
"numpy.max",
"numpy.array",
"torch.rand",
"numpy.zeros",
"numpy.min"
]
] |
kim66003/ML4QS_group25
|
[
"cd7f838e95f1583701892175670d7d0c8da0e1be"
] |
[
"Python3Code/Chapter7/Evaluation.py"
] |
[
"##############################################################\n# #\n# Mark Hoogendoorn and Burkhardt Funk (2017) #\n# Machine Learning for the Quantified Self #\n# Springer #\n# Chapter 7 #\n# #\n##############################################################\n\nfrom sklearn import metrics\nimport pandas as pd\nimport numpy as np\nimport math\n\n# Class for evaluation metrics of classification problems.\nclass ClassificationEvaluation:\n\n # Returns the accuracy given the true and predicted values.\n def accuracy(self, y_true, y_pred):\n return metrics.accuracy_score(y_true, y_pred)\n\n # Returns the precision given the true and predicted values.\n # Note that it returns the precision per class.\n def precision(self, y_true, y_pred):\n return metrics.precision_score(y_true, y_pred, average=None)\n\n # Returns the recall given the true and predicted values.\n # Note that it returns the recall per class.\n def recall(self, y_true, y_pred):\n return metrics.recall_score(y_true, y_pred, average=None)\n\n # Returns the f1 given the true and predicted values.\n # Note that it returns the recall per class.\n def f1(self, y_true, y_pred):\n return metrics.f1_score(y_true, y_pred, average=None)\n\n # Returns the area under the curve given the true and predicted values.\n # Note: we expect a binary classification problem here(!)\n def auc(self, y_true, y_pred_prob):\n return metrics.roc_auc_score(y_true, y_pred_prob)\n\n # Returns the confusion matrix given the true and predicted values.\n def confusion_matrix(self, y_true, y_pred, labels):\n return metrics.confusion_matrix(y_true, y_pred, labels=labels)\n\n# Class for evaluation metrics of regression problems.\nclass RegressionEvaluation:\n\n # Returns the mean squared error between the true and predicted values.\n def mean_squared_error(self, y_true, y_pred):\n return metrics.mean_squared_error(y_true, y_pred)\n\n # Returns the mean squared error between the true and predicted values.\n def mean_squared_error_with_std(self, y_true, y_pred):\n y_true = np.array(y_true)\n y_pred = np.array(y_pred)\n errors = np.square(y_true-y_pred)\n mse = errors.mean()\n std = errors.std()\n return mse.mean(), std.mean()\n\n # Returns the mean absolute error between the true and predicted values.\n def mean_absolute_error(self, y_true, y_pred):\n return metrics.mean_absolute_error(y_true, y_pred)\n\n # Return the mean absolute error between the true and predicted values\n # as well as its standard deviation.\n def mean_absolute_error_with_std(self, y_true, y_pred):\n errors = np.absolute((y_pred - y_true))\n return errors.mean(), errors.std()\n"
] |
[
[
"numpy.square",
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.mean_squared_error",
"numpy.array",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.mean_absolute_error",
"numpy.absolute",
"sklearn.metrics.precision_score",
"sklearn.metrics.f1_score",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.recall_score"
]
] |
xDiaym/poom
|
[
"8f0e59bc0acc39b77fe761f9c1e2386e37bc6d78"
] |
[
"setup.py"
] |
[
"import numpy\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nfrom Cython.Build import cythonize # isort: skip strange build system bug\n\n\ndef read(filename: str) -> str:\n with open(filename, \"r\", encoding=\"utf8\") as fp:\n return fp.read()\n\n\nextensions = [\n Extension(\n name=\"poom.pooma.ray_march\",\n sources=[\"poom/pooma/ray_march.pyx\"],\n define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n ),\n Extension(name=\"poom.pooma.math\", sources=[\"poom/pooma/math.pyx\"]),\n]\n\nsetup(\n name=\"poom\",\n description=read(\"README.md\"),\n ext_modules=cythonize(extensions, language_level=3),\n include_dirs=numpy.get_include(),\n)\n"
] |
[
[
"numpy.get_include"
]
] |
PandoraLS/SpeechEnhancement
|
[
"f548eaafbe524a40c8cfd2221f7adf3a444b7a7d"
] |
[
"joint_train.py"
] |
[
"# -*- coding: utf-8 -*-\n# Author:sen\n# Date:2020/3/22 15:47\n\nimport argparse\nimport os\nimport json5\nimport torch\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom util.utils import initialize_config\nfrom trainer.trainer import JointTrainer\n\n# TODO 目前还未将joint_loss_function写成一个总的Class,只是嵌入到了JointTrainer中了,\n# 下一步需要调整为一个大的class,并且匹配BaseTrainer中的loss_function\n# TODO 训练过程实际上可以修改为学习率逐渐减小的过程\n# TODO 增强后的语音如何保存下来\n# TODO UNet系列方法的原理与效果\ndef main(config, resume):\n \n torch.manual_seed(int(config[\"seed\"])) # both CPU and CUDA\n np.random.seed(config[\"seed\"])\n\n train_dataloader = DataLoader(\n dataset=initialize_config(config[\"train_dataset\"]),\n batch_size=config[\"train_dataloader\"][\"batch_size\"],\n num_workers=config[\"train_dataloader\"][\"num_workers\"],\n shuffle=config[\"train_dataloader\"][\"shuffle\"],\n pin_memory=config[\"train_dataloader\"][\"pin_memory\"] # Very small data set False\n )\n\n validation_dataloader = DataLoader(\n dataset=initialize_config(config[\"validation_dataset\"]),\n batch_size=1,\n num_workers=1\n )\n\n model = initialize_config(config[\"model\"])\n\n optimizer = torch.optim.Adam(\n params=model.parameters(),\n lr=config[\"optimizer\"][\"lr\"],\n betas=(config[\"optimizer\"][\"beta1\"], config[\"optimizer\"][\"beta2\"])\n )\n\n loss_function = initialize_config(config[\"loss_function\"])\n\n trainer = JointTrainer(\n config=config,\n resume=resume,\n model=model,\n optim=optimizer,\n loss_function=loss_function,\n train_dl=train_dataloader,\n validation_dl=validation_dataloader\n )\n\n trainer.train()\n\n\nif __name__ == '__main__':\n # parser = argparse.ArgumentParser(description=\"SimpleCNN\")\n # parser.add_argument(\"-C\", \"--configuration\", required=True, type=str, default='config/simple_cnn.json5',\n # help=\"Configuration (*.json).\")\n # parser.add_argument(\"-R\", \"--resume\", action=\"store_true\", default=False,\n # help=\"Resume experiment from latest checkpoint.\")\n # args = parser.parse_args()\n \n config_path = \"config/20200323_joint_simple_cnn.json5\"\n \n configuration = json5.load(open(config_path))\n configuration[\"experiment_name\"], _ = os.path.splitext(os.path.basename(config_path))\n configuration[\"config_path\"] = config_path\n\n main(configuration, resume=False)\n"
] |
[
[
"numpy.random.seed"
]
] |
Vita98/EnergyLoadForecasting
|
[
"759fe9a64234230453cec1805c01f5aa182ec7b5"
] |
[
"grid_search.py"
] |
[
"# grid search sarima hyperparameters\nfrom math import sqrt\nfrom multiprocessing import cpu_count\nfrom joblib import Parallel\nfrom joblib import delayed\nfrom warnings import catch_warnings\nfrom warnings import filterwarnings\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nfrom sklearn.metrics import mean_squared_error\nfrom pandas import read_csv\nfrom datetime import datetime\nimport numpy as np\nimport pandas as pd\n \n# one-step sarima forecast\ndef sarima_forecast(history, config):\n\torder, sorder = config\n\t# define model\n\tmodel = SARIMAX(history, order=order, seasonal_order=sorder)\n\t# fit model\n\tmodel_fit = model.fit(disp=False)\n\t# make one step forecast\n\tyhat = model_fit.predict(len(history), len(history))\n\treturn yhat[0]\n \n# root mean squared error or rmse\ndef measure_rmse(actual, predicted):\n\treturn sqrt(mean_squared_error(actual, predicted))\n \n# split a univariate dataset into train/test sets\ndef train_test_split(data, n_test):\n\treturn data[:-n_test], data[-n_test:]\n \n# walk-forward validation for univariate data\ndef walk_forward_validation(data, n_test, cfg):\n\ttrain, test = train_test_split(data, n_test)\n\thistory = [x for x in train]\n\torder, sorder = cfg\n\t# define model\n\tmodel = SARIMAX(history, order=order, seasonal_order=sorder)\n\tmodel_fit = model.fit(disp=False)\n\t# plot forecasts against actual outcomes\n\tyhat = model_fit.predict(start=0, end=len(test))\n\t# print(yhat)\n\tpredictions = list()\n\n\tfor value in yhat[1:]:\n\t\tpredictions.append(value)\n\n\treturn measure_rmse(test, predictions)\n\t''' \n\tpredictions = list()\n\t# split dataset\n\ttrain, test = train_test_split(data, n_test)\n\t# seed history with training dataset\n\thistory = [x for x in train]\n\t# step over each time-step in the test set\n\tfor i in range(len(test)):\n\t\t# fit model and make forecast for history\n\t\tyhat = sarima_forecast(history, cfg)\n\t\t# store forecast in list of predictions\n\t\tpredictions.append(yhat)\n\t\t# add actual observation to history for the next loop\n\t\thistory.append(test[i])\n\t# estimate prediction error\n\terror = measure_rmse(test, predictions) '''\n\t#return error\n \n# score a model, return None on failure\ndef score_model(data, n_test, cfg, debug=False):\n\tresult = None\n\t# convert config to a key\n\tkey = str(cfg)\n\t# show all warnings and fail on exception if debugging\n\tif debug:\n\t\tresult = walk_forward_validation(data, n_test, cfg)\n\telse:\n\t\t# one failure during model validation suggests an unstable config\n\t\ttry:\n\t\t\t# never show warnings when grid searching, too noisy\n\t\t\twith catch_warnings():\n\t\t\t\tfilterwarnings(\"ignore\")\n\t\t\t\tresult = walk_forward_validation(data, n_test, cfg)\n\t\texcept:\n\t\t\terror = None\n\t# check for an interesting result\n\tif result is not None:\n\t\tprint(' > Model[%s] %.3f' % (key, result))\n\treturn (key, result)\n \n# grid search configs\ndef grid_search(data, cfg_list, n_test, parallel=True):\n\tscores = None\n\tif parallel:\n\t\t# execute configs in parallel\n\t\texecutor = Parallel(n_jobs=cpu_count(), backend='multiprocessing')\n\t\ttasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list)\n\t\tscores = executor(tasks)\n\telse:\n\t\tscores = [score_model(data, n_test, cfg) for cfg in cfg_list]\n\t# remove empty results\n\tscores = [r for r in scores if r[1] != None]\n\t# sort configs by error, asc\n\tscores.sort(key=lambda tup: tup[1])\n\treturn scores\n \n# create a set of sarima configs to try\ndef sarima_configs(seasonal=[150,180,210,240,270,300,360,720]):\n\t#\n\t#seasonal = [12]\n\tmodels = list()\n\n\t'''\n\t# define config lists\n\tp_params = [0, 1, 2]\n\td_params = [0, 1]\n\tq_params = [0, 1, 2]\n\tt_params = ['n','c','t','ct']\n\tP_params = [0, 1, 2]\n\tD_params = [0, 1]\n\tQ_params = [0, 1, 2]'''\n\tm_params = seasonal\n\n\tfor m in m_params:\n\t\tcfg = [(1, 0, 1), (0, 0, 1, m)]\n\t\tmodels.append(cfg)\n\t'''\n\t# create config instances\n\tfor p in p_params:\n\t\tfor d in d_params:\n\t\t\tfor q in q_params:\n\t\t\t\tfor t in t_params:\n\t\t\t\t\tfor P in P_params:\n\t\t\t\t\t\tfor D in D_params:\n\t\t\t\t\t\t\tfor Q in Q_params:\n\t\t\t\t\t\t\t\tfor m in m_params:\n\t\t\t\t\t\t\t\t\tcfg = [(p,d,q), (P,D,Q,m), t]\n\t\t\t\t\t\t\t\t\tmodels.append(cfg)'''\n\treturn models\n\n#Parser for the read_csv\ndef parser(x):\n\treturn datetime.strptime(x, '%y-%m-%d %H:%M:%S')\n\n\n\n\n\nimport enum\nclass TrainignTimeType(enum.IntEnum):\n\tONE_WEEK = 10080\n\tONE_MONTH = 43200\n\nclass TestingTimeType(enum.IntEnum):\n\tONE_DAY = 1440\n\n\n\n\n\n\n\n'''\n\tPUT HERE THE CONFIGURATION VALUES\n\t\t\t\t\t\t\t\t\t\t'''\ntrainSize = TrainignTimeType.ONE_WEEK\ntestSize = TestingTimeType.ONE_DAY\nshiftRow = 1\n\noriginFileName = \"ukdale_def4.csv\"\nseriesName = \"Tv_Dvd_Lamp\"\n\n\n\n\n\n\n \nif __name__ == '__main__':\n\t# define dataset\n\n\tnumbersOfRowToRead = int(trainSize) + int(testSize) + shiftRow\n\n\t#Reading the series from the dataset file\n\tdata = read_csv(\"Dataset/\" + originFileName,header=0,index_col=0,nrows=numbersOfRowToRead,skiprows=range(1,shiftRow))\n\n\tdata = data[seriesName]\n\n\t\n\n\t# data split\n\tn_test = int(testSize)\n\t# model configs\n\tcfg_list = sarima_configs()\n\t# grid search\n\tscores = grid_search(data, cfg_list, n_test)\n\tprint('done')\n\t# list top 3 configs\n\tfor cfg, error in scores[:3]:\n\t\tprint(cfg, error)"
] |
[
[
"sklearn.metrics.mean_squared_error"
]
] |
JPVentura135/astropy
|
[
"cb6588a246235d69f5ae929e27e8cc528faa038b",
"cb6588a246235d69f5ae929e27e8cc528faa038b"
] |
[
"astropy/io/fits/tests/test_table.py",
"astropy/coordinates/tests/test_solar_system.py"
] |
[
"# Licensed under a 3-clause BSD style license - see PYFITS.rst\n\nimport contextlib\nimport copy\nimport gc\nimport pickle\nimport re\n\nimport pytest\nimport numpy as np\nfrom numpy import char as chararray\n\ntry:\n import objgraph\n HAVE_OBJGRAPH = True\nexcept ImportError:\n HAVE_OBJGRAPH = False\n\nfrom astropy.io import fits\nfrom astropy.tests.helper import catch_warnings, ignore_warnings\nfrom astropy.utils.compat import NUMPY_LT_1_14_1, NUMPY_LT_1_14_2\nfrom astropy.utils.exceptions import AstropyDeprecationWarning\n\nfrom astropy.io.fits.column import Delayed, NUMPY2FITS\nfrom astropy.io.fits.util import decode_ascii\nfrom astropy.io.fits.verify import VerifyError\nfrom . import FitsTestCase\n\n\ndef comparefloats(a, b):\n \"\"\"\n Compare two float scalars or arrays and see if they are consistent\n\n Consistency is determined ensuring the difference is less than the\n expected amount. Return True if consistent, False if any differences.\n \"\"\"\n\n aa = a\n bb = b\n # compute expected precision\n if aa.dtype.name == 'float32' or bb.dtype.name == 'float32':\n precision = 0.000001\n else:\n precision = 0.0000000000000001\n precision = 0.00001 # until precision problem is fixed in astropy.io.fits\n diff = np.absolute(aa - bb)\n mask0 = aa == 0\n masknz = aa != 0.\n if np.any(mask0):\n if diff[mask0].max() != 0.:\n return False\n if np.any(masknz):\n if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:\n return False\n return True\n\n\ndef comparerecords(a, b):\n \"\"\"\n Compare two record arrays\n\n Does this field by field, using approximation testing for float columns\n (Complex not yet handled.)\n Column names not compared, but column types and sizes are.\n \"\"\"\n\n nfieldsa = len(a.dtype.names)\n nfieldsb = len(b.dtype.names)\n if nfieldsa != nfieldsb:\n print(\"number of fields don't match\")\n return False\n for i in range(nfieldsa):\n fielda = a.field(i)\n fieldb = b.field(i)\n if fielda.dtype.char == 'S':\n fielda = decode_ascii(fielda)\n if fieldb.dtype.char == 'S':\n fieldb = decode_ascii(fieldb)\n if (not isinstance(fielda, type(fieldb)) and not\n isinstance(fieldb, type(fielda))):\n print(\"type(fielda): \", type(fielda), \" fielda: \", fielda)\n print(\"type(fieldb): \", type(fieldb), \" fieldb: \", fieldb)\n print('field {0} type differs'.format(i))\n return False\n if len(fielda) and isinstance(fielda[0], np.floating):\n if not comparefloats(fielda, fieldb):\n print(\"fielda: \", fielda)\n print(\"fieldb: \", fieldb)\n print('field {0} differs'.format(i))\n return False\n elif (isinstance(fielda, fits.column._VLF) or\n isinstance(fieldb, fits.column._VLF)):\n for row in range(len(fielda)):\n if np.any(fielda[row] != fieldb[row]):\n print('fielda[{0}]: {1}'.format(row, fielda[row]))\n print('fieldb[{0}]: {1}'.format(row, fieldb[row]))\n print('field {0} differs in row {1}'.format(i, row))\n else:\n if np.any(fielda != fieldb):\n print(\"fielda: \", fielda)\n print(\"fieldb: \", fieldb)\n print('field {0} differs'.format(i))\n return False\n return True\n\n\nclass TestTableFunctions(FitsTestCase):\n def test_constructor_copies_header(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153\n\n Ensure that a header from one HDU is copied when used to initialize new\n HDU.\n\n This is like the test of the same name in test_image, but tests this\n for tables as well.\n \"\"\"\n\n ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])\n thdr = ifd[1].header\n thdr['FILENAME'] = 'labq01i3q_rawtag.fits'\n\n thdu = fits.BinTableHDU(header=thdr)\n ofd = fits.HDUList(thdu)\n ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'\n\n # Original header should be unchanged\n assert thdr['FILENAME'] == 'labq01i3q_rawtag.fits'\n\n def test_open(self):\n # open some existing FITS files:\n tt = fits.open(self.data('tb.fits'))\n fd = fits.open(self.data('test0.fits'))\n\n # create some local arrays\n a1 = chararray.array(['abc', 'def', 'xx'])\n r1 = np.array([11., 12., 13.], dtype=np.float32)\n\n # create a table from scratch, using a mixture of columns from existing\n # tables and locally created arrays:\n\n # first, create individual column definitions\n\n c1 = fits.Column(name='abc', format='3A', array=a1)\n c2 = fits.Column(name='def', format='E', array=r1)\n a3 = np.array([3, 4, 5], dtype='i2')\n c3 = fits.Column(name='xyz', format='I', array=a3)\n a4 = np.array([1, 2, 3], dtype='i2')\n c4 = fits.Column(name='t1', format='I', array=a4)\n a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype='c8')\n c5 = fits.Column(name='t2', format='C', array=a5)\n\n # Note that X format must be two-D array\n a6 = np.array([[0], [1], [0]], dtype=np.uint8)\n c6 = fits.Column(name='t3', format='X', array=a6)\n a7 = np.array([101, 102, 103], dtype='i4')\n c7 = fits.Column(name='t4', format='J', array=a7)\n a8 = np.array([[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],\n [0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],\n [1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]], dtype=np.uint8)\n c8 = fits.Column(name='t5', format='11X', array=a8)\n\n # second, create a column-definitions object for all columns in a table\n\n x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])\n\n tbhdu = fits.BinTableHDU.from_columns(x)\n\n # another way to create a table is by using existing table's\n # information:\n\n x2 = fits.ColDefs(tt[1])\n t2 = fits.BinTableHDU.from_columns(x2, nrows=2)\n ra = np.rec.array([\n (1, 'abc', 3.7000002861022949, 0),\n (2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')\n\n assert comparerecords(t2.data, ra)\n\n # the table HDU's data is a subclass of a record array, so we can\n # access one row like this:\n\n assert tbhdu.data[1][0] == a1[1]\n assert tbhdu.data[1][1] == r1[1]\n assert tbhdu.data[1][2] == a3[1]\n assert tbhdu.data[1][3] == a4[1]\n assert tbhdu.data[1][4] == a5[1]\n assert (tbhdu.data[1][5] == a6[1].view('bool')).all()\n assert tbhdu.data[1][6] == a7[1]\n assert (tbhdu.data[1][7] == a8[1]).all()\n\n # and a column like this:\n assert str(tbhdu.data.field('abc')) == \"['abc' 'def' 'xx']\"\n\n # An alternative way to create a column-definitions object is from an\n # existing table.\n xx = fits.ColDefs(tt[1])\n\n # now we write out the newly created table HDU to a FITS file:\n fout = fits.HDUList(fits.PrimaryHDU())\n fout.append(tbhdu)\n fout.writeto(self.temp('tableout1.fits'), overwrite=True)\n\n with fits.open(self.temp('tableout1.fits')) as f2:\n temp = f2[1].data.field(7)\n assert (temp[0] == [True, True, False, True, False, True,\n True, True, False, False, True]).all()\n\n # An alternative way to create an output table FITS file:\n fout2 = fits.open(self.temp('tableout2.fits'), 'append')\n fout2.append(fd[0])\n fout2.append(tbhdu)\n fout2.close()\n tt.close()\n fd.close()\n\n def test_binary_table(self):\n # binary table:\n t = fits.open(self.data('tb.fits'))\n assert t[1].header['tform1'] == '1J'\n\n info = {'name': ['c1', 'c2', 'c3', 'c4'],\n 'format': ['1J', '3A', '1E', '1L'],\n 'unit': ['', '', '', ''],\n 'null': [-2147483647, '', '', ''],\n 'bscale': ['', '', 3, ''],\n 'bzero': ['', '', 0.4, ''],\n 'disp': ['I11', 'A3', 'G15.7', 'L6'],\n 'start': ['', '', '', ''],\n 'dim': ['', '', '', ''],\n 'coord_inc': ['', '', '', ''],\n 'coord_type': ['', '', '', ''],\n 'coord_unit': ['', '', '', ''],\n 'coord_ref_point': ['', '', '', ''],\n 'coord_ref_value': ['', '', '', ''],\n 'time_ref_pos': ['', '', '', '']}\n\n assert t[1].columns.info(output=False) == info\n\n ra = np.rec.array([\n (1, 'abc', 3.7000002861022949, 0),\n (2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')\n\n assert comparerecords(t[1].data, ra[:2])\n\n # Change scaled field and scale back to the original array\n t[1].data.field('c4')[0] = 1\n t[1].data._scale_back()\n assert str(np.rec.recarray.field(t[1].data, 'c4')) == '[84 84]'\n\n # look at data column-wise\n assert (t[1].data.field(0) == np.array([1, 2])).all()\n\n # When there are scaled columns, the raw data are in data._parent\n\n t.close()\n\n def test_ascii_table(self):\n # ASCII table\n a = fits.open(self.data('ascii.fits'))\n ra1 = np.rec.array([\n (10.123000144958496, 37),\n (5.1999998092651367, 23),\n (15.609999656677246, 17),\n (0.0, 0),\n (345.0, 345)], names='c1, c2')\n assert comparerecords(a[1].data, ra1)\n\n # Test slicing\n a2 = a[1].data[2:][2:]\n ra2 = np.rec.array([(345.0, 345)], names='c1, c2')\n\n assert comparerecords(a2, ra2)\n\n assert (a2.field(1) == np.array([345])).all()\n\n ra3 = np.rec.array([\n (10.123000144958496, 37),\n (15.609999656677246, 17),\n (345.0, 345)\n ], names='c1, c2')\n\n assert comparerecords(a[1].data[::2], ra3)\n\n # Test Start Column\n\n a1 = chararray.array(['abcd', 'def'])\n r1 = np.array([11., 12.])\n c1 = fits.Column(name='abc', format='A3', start=19, array=a1)\n c2 = fits.Column(name='def', format='E', start=3, array=r1)\n c3 = fits.Column(name='t1', format='I', array=[91, 92, 93])\n hdu = fits.TableHDU.from_columns([c2, c1, c3])\n\n assert (dict(hdu.data.dtype.fields) ==\n {'abc': (np.dtype('|S3'), 18),\n 'def': (np.dtype('|S15'), 2),\n 't1': (np.dtype('|S10'), 21)})\n hdu.writeto(self.temp('toto.fits'), overwrite=True)\n hdul = fits.open(self.temp('toto.fits'))\n assert comparerecords(hdu.data, hdul[1].data)\n hdul.close()\n\n # Test Scaling\n\n r1 = np.array([11., 12.])\n c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3,\n bzero=0.6)\n hdu = fits.TableHDU.from_columns([c2])\n hdu.writeto(self.temp('toto.fits'), overwrite=True)\n with open(self.temp('toto.fits')) as f:\n assert '4.95652173913043548D+00' in f.read()\n with fits.open(self.temp('toto.fits')) as hdul:\n assert comparerecords(hdu.data, hdul[1].data)\n\n a.close()\n\n def test_endianness(self):\n x = np.ndarray((1,), dtype=object)\n channelsIn = np.array([3], dtype='uint8')\n x[0] = channelsIn\n col = fits.Column(name=\"Channels\", format=\"PB()\", array=x)\n cols = fits.ColDefs([col])\n tbhdu = fits.BinTableHDU.from_columns(cols)\n tbhdu.name = \"RFI\"\n tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)\n hduL = fits.open(self.temp('testendian.fits'))\n rfiHDU = hduL['RFI']\n data = rfiHDU.data\n channelsOut = data.field('Channels')[0]\n assert (channelsIn == channelsOut).all()\n hduL.close()\n\n def test_column_endianness(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77\n (Astropy doesn't preserve byte order of non-native order column arrays)\n \"\"\"\n\n a = [1., 2., 3., 4.]\n a1 = np.array(a, dtype='<f8')\n a2 = np.array(a, dtype='>f8')\n\n col1 = fits.Column(name='a', format='D', array=a1)\n col2 = fits.Column(name='b', format='D', array=a2)\n cols = fits.ColDefs([col1, col2])\n tbhdu = fits.BinTableHDU.from_columns(cols)\n\n assert (tbhdu.data['a'] == a1).all()\n assert (tbhdu.data['b'] == a2).all()\n\n # Double check that the array is converted to the correct byte-order\n # for FITS (big-endian).\n tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)\n with fits.open(self.temp('testendian.fits')) as hdul:\n assert (hdul[1].data['a'] == a2).all()\n assert (hdul[1].data['b'] == a2).all()\n\n def test_recarray_to_bintablehdu(self):\n bright = np.rec.array(\n [(1, 'Serius', -1.45, 'A1V'),\n (2, 'Canopys', -0.73, 'F0Ib'),\n (3, 'Rigil Kent', -0.1, 'G2V')],\n formats='int16,a20,float32,a10',\n names='order,name,mag,Sp')\n hdu = fits.BinTableHDU(bright)\n assert comparerecords(hdu.data, bright)\n hdu.writeto(self.temp('toto.fits'), overwrite=True)\n hdul = fits.open(self.temp('toto.fits'))\n assert comparerecords(hdu.data, hdul[1].data)\n assert comparerecords(bright, hdul[1].data)\n hdul.close()\n\n def test_numpy_ndarray_to_bintablehdu(self):\n desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],\n 'formats': ['int', 'S20', 'float32', 'S10']})\n a = np.array([(1, 'Serius', -1.45, 'A1V'),\n (2, 'Canopys', -0.73, 'F0Ib'),\n (3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)\n hdu = fits.BinTableHDU(a)\n assert comparerecords(hdu.data, a.view(fits.FITS_rec))\n hdu.writeto(self.temp('toto.fits'), overwrite=True)\n hdul = fits.open(self.temp('toto.fits'))\n assert comparerecords(hdu.data, hdul[1].data)\n hdul.close()\n\n def test_numpy_ndarray_to_bintablehdu_with_unicode(self):\n desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],\n 'formats': ['int', 'U20', 'float32', 'U10']})\n a = np.array([(1, u'Serius', -1.45, u'A1V'),\n (2, u'Canopys', -0.73, u'F0Ib'),\n (3, u'Rigil Kent', -0.1, u'G2V')], dtype=desc)\n hdu = fits.BinTableHDU(a)\n assert comparerecords(hdu.data, a.view(fits.FITS_rec))\n hdu.writeto(self.temp('toto.fits'), overwrite=True)\n hdul = fits.open(self.temp('toto.fits'))\n assert comparerecords(hdu.data, hdul[1].data)\n hdul.close()\n\n def test_new_table_from_recarray(self):\n bright = np.rec.array([(1, 'Serius', -1.45, 'A1V'),\n (2, 'Canopys', -0.73, 'F0Ib'),\n (3, 'Rigil Kent', -0.1, 'G2V')],\n formats='int16,a20,float64,a10',\n names='order,name,mag,Sp')\n hdu = fits.TableHDU.from_columns(bright, nrows=2)\n\n # Verify that all ndarray objects within the HDU reference the\n # same ndarray.\n assert (id(hdu.data._coldefs.columns[0].array) ==\n id(hdu.data._coldefs._arrays[0]))\n assert (id(hdu.data._coldefs.columns[0].array) ==\n id(hdu.columns.columns[0].array))\n assert (id(hdu.data._coldefs.columns[0].array) ==\n id(hdu.columns._arrays[0]))\n\n # Ensure I can change the value of one data element and it effects\n # all of the others.\n hdu.data[0][0] = 213\n\n assert hdu.data[0][0] == 213\n assert hdu.data._coldefs._arrays[0][0] == 213\n assert hdu.data._coldefs.columns[0].array[0] == 213\n assert hdu.columns._arrays[0][0] == 213\n assert hdu.columns.columns[0].array[0] == 213\n\n hdu.data._coldefs._arrays[0][0] = 100\n\n assert hdu.data[0][0] == 100\n assert hdu.data._coldefs._arrays[0][0] == 100\n assert hdu.data._coldefs.columns[0].array[0] == 100\n assert hdu.columns._arrays[0][0] == 100\n assert hdu.columns.columns[0].array[0] == 100\n\n hdu.data._coldefs.columns[0].array[0] = 500\n assert hdu.data[0][0] == 500\n assert hdu.data._coldefs._arrays[0][0] == 500\n assert hdu.data._coldefs.columns[0].array[0] == 500\n assert hdu.columns._arrays[0][0] == 500\n assert hdu.columns.columns[0].array[0] == 500\n\n hdu.columns._arrays[0][0] = 600\n assert hdu.data[0][0] == 600\n assert hdu.data._coldefs._arrays[0][0] == 600\n assert hdu.data._coldefs.columns[0].array[0] == 600\n assert hdu.columns._arrays[0][0] == 600\n assert hdu.columns.columns[0].array[0] == 600\n\n hdu.columns.columns[0].array[0] = 800\n assert hdu.data[0][0] == 800\n assert hdu.data._coldefs._arrays[0][0] == 800\n assert hdu.data._coldefs.columns[0].array[0] == 800\n assert hdu.columns._arrays[0][0] == 800\n assert hdu.columns.columns[0].array[0] == 800\n\n assert (hdu.data.field(0) ==\n np.array([800, 2], dtype=np.int16)).all()\n assert hdu.data[0][1] == 'Serius'\n assert hdu.data[1][1] == 'Canopys'\n assert (hdu.data.field(2) ==\n np.array([-1.45, -0.73], dtype=np.float64)).all()\n assert hdu.data[0][3] == 'A1V'\n assert hdu.data[1][3] == 'F0Ib'\n\n with ignore_warnings():\n hdu.writeto(self.temp('toto.fits'), overwrite=True)\n\n with fits.open(self.temp('toto.fits')) as hdul:\n assert (hdul[1].data.field(0) ==\n np.array([800, 2], dtype=np.int16)).all()\n assert hdul[1].data[0][1] == 'Serius'\n assert hdul[1].data[1][1] == 'Canopys'\n assert (hdul[1].data.field(2) ==\n np.array([-1.45, -0.73], dtype=np.float64)).all()\n assert hdul[1].data[0][3] == 'A1V'\n assert hdul[1].data[1][3] == 'F0Ib'\n del hdul\n\n hdu = fits.BinTableHDU.from_columns(bright, nrows=2)\n tmp = np.rec.array([(1, 'Serius', -1.45, 'A1V'),\n (2, 'Canopys', -0.73, 'F0Ib')],\n formats='int16,a20,float64,a10',\n names='order,name,mag,Sp')\n assert comparerecords(hdu.data, tmp)\n with ignore_warnings():\n hdu.writeto(self.temp('toto.fits'), overwrite=True)\n with fits.open(self.temp('toto.fits')) as hdul:\n assert comparerecords(hdu.data, hdul[1].data)\n\n def test_new_fitsrec(self):\n \"\"\"\n Tests creating a new FITS_rec object from a multi-field ndarray.\n \"\"\"\n\n with fits.open(self.data('tb.fits')) as h:\n data = h[1].data\n new_data = np.array([(3, 'qwe', 4.5, False)], dtype=data.dtype)\n appended = np.append(data, new_data).view(fits.FITS_rec)\n assert repr(appended).startswith('FITS_rec(')\n # This test used to check the entire string representation of FITS_rec,\n # but that has problems between different numpy versions. Instead just\n # check that the FITS_rec was created, and we'll let subsequent tests\n # worry about checking values and such\n\n def test_appending_a_column(self):\n counts = np.array([312, 334, 308, 317])\n names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.writeto(self.temp('table1.fits'))\n\n counts = np.array([412, 434, 408, 417])\n names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.writeto(self.temp('table2.fits'))\n\n # Append the rows of table 2 after the rows of table 1\n # The column definitions are assumed to be the same\n\n # Open the two files we want to append\n t1 = fits.open(self.temp('table1.fits'))\n t2 = fits.open(self.temp('table2.fits'))\n\n # Get the number of rows in the table from the first file\n nrows1 = t1[1].data.shape[0]\n\n # Get the total number of rows in the resulting appended table\n nrows = t1[1].data.shape[0] + t2[1].data.shape[0]\n\n assert (t1[1].columns._arrays[1] is t1[1].columns.columns[1].array)\n\n # Create a new table that consists of the data from the first table\n # but has enough space in the ndarray to hold the data from both tables\n hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)\n\n # For each column in the tables append the data from table 2 after the\n # data from table 1.\n for i in range(len(t1[1].columns)):\n hdu.data.field(i)[nrows1:] = t2[1].data.field(i)\n\n hdu.writeto(self.temp('newtable.fits'))\n\n info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),\n (1, '', 1, 'BinTableHDU', 19, '8R x 5C', '[10A, J, 10A, 5E, L]',\n '')]\n\n assert fits.info(self.temp('newtable.fits'), output=False) == info\n\n z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)\n array = np.rec.array(\n [('NGC1', 312, '', z, True),\n ('NGC2', 334, '', z, False),\n ('NGC3', 308, '', z, True),\n ('NCG4', 317, '', z, True),\n ('NGC5', 412, '', z, False),\n ('NGC6', 434, '', z, True),\n ('NGC7', 408, '', z, False),\n ('NCG8', 417, '', z, False)],\n formats='a10,u4,a10,5f4,l')\n\n assert comparerecords(hdu.data, array)\n\n # Verify that all of the references to the data point to the same\n # numarray\n hdu.data[0][1] = 300\n assert hdu.data._coldefs._arrays[1][0] == 300\n assert hdu.data._coldefs.columns[1].array[0] == 300\n assert hdu.columns._arrays[1][0] == 300\n assert hdu.columns.columns[1].array[0] == 300\n assert hdu.data[0][1] == 300\n\n hdu.data._coldefs._arrays[1][0] = 200\n assert hdu.data._coldefs._arrays[1][0] == 200\n assert hdu.data._coldefs.columns[1].array[0] == 200\n assert hdu.columns._arrays[1][0] == 200\n assert hdu.columns.columns[1].array[0] == 200\n assert hdu.data[0][1] == 200\n\n hdu.data._coldefs.columns[1].array[0] = 100\n assert hdu.data._coldefs._arrays[1][0] == 100\n assert hdu.data._coldefs.columns[1].array[0] == 100\n assert hdu.columns._arrays[1][0] == 100\n assert hdu.columns.columns[1].array[0] == 100\n assert hdu.data[0][1] == 100\n\n hdu.columns._arrays[1][0] = 90\n assert hdu.data._coldefs._arrays[1][0] == 90\n assert hdu.data._coldefs.columns[1].array[0] == 90\n assert hdu.columns._arrays[1][0] == 90\n assert hdu.columns.columns[1].array[0] == 90\n assert hdu.data[0][1] == 90\n\n hdu.columns.columns[1].array[0] = 80\n assert hdu.data._coldefs._arrays[1][0] == 80\n assert hdu.data._coldefs.columns[1].array[0] == 80\n assert hdu.columns._arrays[1][0] == 80\n assert hdu.columns.columns[1].array[0] == 80\n assert hdu.data[0][1] == 80\n\n # Same verification from the file\n hdul = fits.open(self.temp('newtable.fits'))\n hdu = hdul[1]\n hdu.data[0][1] = 300\n assert hdu.data._coldefs._arrays[1][0] == 300\n assert hdu.data._coldefs.columns[1].array[0] == 300\n assert hdu.columns._arrays[1][0] == 300\n assert hdu.columns.columns[1].array[0] == 300\n assert hdu.data[0][1] == 300\n\n hdu.data._coldefs._arrays[1][0] = 200\n assert hdu.data._coldefs._arrays[1][0] == 200\n assert hdu.data._coldefs.columns[1].array[0] == 200\n assert hdu.columns._arrays[1][0] == 200\n assert hdu.columns.columns[1].array[0] == 200\n assert hdu.data[0][1] == 200\n\n hdu.data._coldefs.columns[1].array[0] = 100\n assert hdu.data._coldefs._arrays[1][0] == 100\n assert hdu.data._coldefs.columns[1].array[0] == 100\n assert hdu.columns._arrays[1][0] == 100\n assert hdu.columns.columns[1].array[0] == 100\n assert hdu.data[0][1] == 100\n\n hdu.columns._arrays[1][0] = 90\n assert hdu.data._coldefs._arrays[1][0] == 90\n assert hdu.data._coldefs.columns[1].array[0] == 90\n assert hdu.columns._arrays[1][0] == 90\n assert hdu.columns.columns[1].array[0] == 90\n assert hdu.data[0][1] == 90\n\n hdu.columns.columns[1].array[0] = 80\n assert hdu.data._coldefs._arrays[1][0] == 80\n assert hdu.data._coldefs.columns[1].array[0] == 80\n assert hdu.columns._arrays[1][0] == 80\n assert hdu.columns.columns[1].array[0] == 80\n assert hdu.data[0][1] == 80\n\n t1.close()\n t2.close()\n hdul.close()\n\n def test_adding_a_column(self):\n # Tests adding a column to a table.\n counts = np.array([312, 334, 308, 317])\n names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])\n coldefs = fits.ColDefs([c1, c2, c3, c4])\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n\n assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']\n coldefs1 = coldefs + c5\n\n tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)\n assert tbhdu1.columns.names == ['target', 'counts', 'notes',\n 'spectrum', 'flag']\n\n z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)\n array = np.rec.array(\n [('NGC1', 312, '', z, True),\n ('NGC2', 334, '', z, False),\n ('NGC3', 308, '', z, True),\n ('NCG4', 317, '', z, True)],\n formats='a10,u4,a10,5f4,l')\n assert comparerecords(tbhdu1.data, array)\n\n def test_merge_tables(self):\n counts = np.array([312, 334, 308, 317])\n names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.writeto(self.temp('table1.fits'))\n\n counts = np.array([412, 434, 408, 417])\n names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])\n c1 = fits.Column(name='target1', format='10A', array=names)\n c2 = fits.Column(name='counts1', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes1', format='A10')\n c4 = fits.Column(name='spectrum1', format='5E')\n c5 = fits.Column(name='flag1', format='L', array=[0, 1, 0, 0])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.writeto(self.temp('table2.fits'))\n\n # Merge the columns of table 2 after the columns of table 1\n # The column names are assumed to be different\n\n # Open the two files we want to append\n t1 = fits.open(self.temp('table1.fits'))\n t2 = fits.open(self.temp('table2.fits'))\n\n hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)\n\n z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)\n array = np.rec.array(\n [('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),\n ('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),\n ('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),\n ('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],\n formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')\n assert comparerecords(hdu.data, array)\n\n hdu.writeto(self.temp('newtable.fits'))\n\n # Verify that all of the references to the data point to the same\n # numarray\n hdu.data[0][1] = 300\n assert hdu.data._coldefs._arrays[1][0] == 300\n assert hdu.data._coldefs.columns[1].array[0] == 300\n assert hdu.columns._arrays[1][0] == 300\n assert hdu.columns.columns[1].array[0] == 300\n assert hdu.data[0][1] == 300\n\n hdu.data._coldefs._arrays[1][0] = 200\n assert hdu.data._coldefs._arrays[1][0] == 200\n assert hdu.data._coldefs.columns[1].array[0] == 200\n assert hdu.columns._arrays[1][0] == 200\n assert hdu.columns.columns[1].array[0] == 200\n assert hdu.data[0][1] == 200\n\n hdu.data._coldefs.columns[1].array[0] = 100\n assert hdu.data._coldefs._arrays[1][0] == 100\n assert hdu.data._coldefs.columns[1].array[0] == 100\n assert hdu.columns._arrays[1][0] == 100\n assert hdu.columns.columns[1].array[0] == 100\n assert hdu.data[0][1] == 100\n\n hdu.columns._arrays[1][0] = 90\n assert hdu.data._coldefs._arrays[1][0] == 90\n assert hdu.data._coldefs.columns[1].array[0] == 90\n assert hdu.columns._arrays[1][0] == 90\n assert hdu.columns.columns[1].array[0] == 90\n assert hdu.data[0][1] == 90\n\n hdu.columns.columns[1].array[0] = 80\n assert hdu.data._coldefs._arrays[1][0] == 80\n assert hdu.data._coldefs.columns[1].array[0] == 80\n assert hdu.columns._arrays[1][0] == 80\n assert hdu.columns.columns[1].array[0] == 80\n assert hdu.data[0][1] == 80\n\n info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),\n (1, '', 1, 'BinTableHDU', 30, '4R x 10C',\n '[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]', '')]\n\n assert fits.info(self.temp('newtable.fits'), output=False) == info\n\n hdul = fits.open(self.temp('newtable.fits'))\n hdu = hdul[1]\n\n assert (hdu.columns.names ==\n ['target', 'counts', 'notes', 'spectrum', 'flag', 'target1',\n 'counts1', 'notes1', 'spectrum1', 'flag1'])\n\n z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)\n array = np.rec.array(\n [('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),\n ('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),\n ('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),\n ('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],\n formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')\n assert comparerecords(hdu.data, array)\n\n # Same verification from the file\n hdu.data[0][1] = 300\n assert hdu.data._coldefs._arrays[1][0] == 300\n assert hdu.data._coldefs.columns[1].array[0] == 300\n assert hdu.columns._arrays[1][0] == 300\n assert hdu.columns.columns[1].array[0] == 300\n assert hdu.data[0][1] == 300\n\n hdu.data._coldefs._arrays[1][0] = 200\n assert hdu.data._coldefs._arrays[1][0] == 200\n assert hdu.data._coldefs.columns[1].array[0] == 200\n assert hdu.columns._arrays[1][0] == 200\n assert hdu.columns.columns[1].array[0] == 200\n assert hdu.data[0][1] == 200\n\n hdu.data._coldefs.columns[1].array[0] = 100\n assert hdu.data._coldefs._arrays[1][0] == 100\n assert hdu.data._coldefs.columns[1].array[0] == 100\n assert hdu.columns._arrays[1][0] == 100\n assert hdu.columns.columns[1].array[0] == 100\n assert hdu.data[0][1] == 100\n\n hdu.columns._arrays[1][0] = 90\n assert hdu.data._coldefs._arrays[1][0] == 90\n assert hdu.data._coldefs.columns[1].array[0] == 90\n assert hdu.columns._arrays[1][0] == 90\n assert hdu.columns.columns[1].array[0] == 90\n assert hdu.data[0][1] == 90\n\n hdu.columns.columns[1].array[0] = 80\n assert hdu.data._coldefs._arrays[1][0] == 80\n assert hdu.data._coldefs.columns[1].array[0] == 80\n assert hdu.columns._arrays[1][0] == 80\n assert hdu.columns.columns[1].array[0] == 80\n assert hdu.data[0][1] == 80\n\n t1.close()\n t2.close()\n hdul.close()\n\n def test_modify_column_attributes(self):\n \"\"\"Regression test for https://github.com/astropy/astropy/issues/996\n\n This just tests one particular use case, but it should apply pretty\n well to other similar cases.\n \"\"\"\n\n NULLS = {'a': 2, 'b': 'b', 'c': 2.3}\n\n data = np.array(list(zip([1, 2, 3, 4],\n ['a', 'b', 'c', 'd'],\n [2.3, 4.5, 6.7, 8.9])),\n dtype=[('a', int), ('b', 'S1'), ('c', float)])\n\n b = fits.BinTableHDU(data=data)\n for col in b.columns:\n col.null = NULLS[col.name]\n\n b.writeto(self.temp('test.fits'), overwrite=True)\n\n with fits.open(self.temp('test.fits')) as hdul:\n header = hdul[1].header\n assert header['TNULL1'] == 2\n assert header['TNULL2'] == 'b'\n assert header['TNULL3'] == 2.3\n\n @pytest.mark.xfail(not NUMPY_LT_1_14_1 and NUMPY_LT_1_14_2,\n reason=\"See https://github.com/astropy/astropy/issues/7214\")\n def test_mask_array(self):\n t = fits.open(self.data('table.fits'))\n tbdata = t[1].data\n mask = tbdata.field('V_mag') > 12\n newtbdata = tbdata[mask]\n hdu = fits.BinTableHDU(newtbdata)\n hdu.writeto(self.temp('newtable.fits'))\n\n hdul = fits.open(self.temp('newtable.fits'))\n\n # numpy >= 1.12 changes how structured arrays are printed, so we\n # match to a regex rather than a specific string.\n expect = r\"\\[\\('NGC1002',\\s+12.3[0-9]*\\) \\(\\'NGC1003\\',\\s+15.[0-9]+\\)\\]\"\n assert re.match(expect, str(hdu.data))\n assert re.match(expect, str(hdul[1].data))\n\n t.close()\n hdul.close()\n\n def test_slice_a_row(self):\n counts = np.array([312, 334, 308, 317])\n names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.writeto(self.temp('table1.fits'))\n\n t1 = fits.open(self.temp('table1.fits'))\n row = t1[1].data[2]\n assert row['counts'] == 308\n a, b, c = row[1:4]\n assert a == counts[2]\n assert b == ''\n assert (c == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()\n row['counts'] = 310\n assert row['counts'] == 310\n\n row[1] = 315\n assert row['counts'] == 315\n\n assert row[1:4]['counts'] == 315\n\n pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)\n\n row[1:4]['counts'] = 300\n assert row[1:4]['counts'] == 300\n assert row['counts'] == 300\n\n row[1:4][0] = 400\n assert row[1:4]['counts'] == 400\n row[1:4]['counts'] = 300\n assert row[1:4]['counts'] == 300\n\n # Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59\n row[1:4][::-1][-1] = 500\n assert row[1:4]['counts'] == 500\n row[1:4:2][0] = 300\n assert row[1:4]['counts'] == 300\n\n pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)\n\n assert row[1:4].field(0) == 300\n assert row[1:4].field('counts') == 300\n\n pytest.raises(KeyError, row[1:4].field, 'flag')\n\n row[1:4].setfield('counts', 500)\n assert row[1:4].field(0) == 500\n\n pytest.raises(KeyError, row[1:4].setfield, 'flag', False)\n\n assert t1[1].data._coldefs._arrays[1][2] == 500\n assert t1[1].data._coldefs.columns[1].array[2] == 500\n assert t1[1].columns._arrays[1][2] == 500\n assert t1[1].columns.columns[1].array[2] == 500\n assert t1[1].data[2][1] == 500\n\n t1.close()\n\n def test_fits_record_len(self):\n counts = np.array([312, 334, 308, 317])\n names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.writeto(self.temp('table1.fits'))\n\n t1 = fits.open(self.temp('table1.fits'))\n\n assert len(t1[1].data[0]) == 5\n assert len(t1[1].data[0][0:4]) == 4\n assert len(t1[1].data[0][0:5]) == 5\n assert len(t1[1].data[0][0:6]) == 5\n assert len(t1[1].data[0][0:7]) == 5\n assert len(t1[1].data[0][1:4]) == 3\n assert len(t1[1].data[0][1:5]) == 4\n assert len(t1[1].data[0][1:6]) == 4\n assert len(t1[1].data[0][1:7]) == 4\n\n t1.close()\n\n def test_add_data_by_rows(self):\n counts = np.array([312, 334, 308, 317])\n names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n\n tbhdu1 = fits.BinTableHDU.from_columns(coldefs)\n\n c1 = fits.Column(name='target', format='10A')\n c2 = fits.Column(name='counts', format='J', unit='DN')\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L')\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n\n tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)\n\n # Test assigning data to a tables row using a FITS_record\n tbhdu.data[0] = tbhdu1.data[0]\n tbhdu.data[4] = tbhdu1.data[3]\n\n # Test assigning data to a tables row using a tuple\n tbhdu.data[2] = ('NGC1', 312, 'A Note',\n np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),\n True)\n\n # Test assigning data to a tables row using a list\n tbhdu.data[3] = ['JIM1', '33', 'A Note',\n np.array([1., 2., 3., 4., 5.], dtype=np.float32),\n True]\n\n # Verify that all ndarray objects within the HDU reference the\n # same ndarray.\n assert (id(tbhdu.data._coldefs.columns[0].array) ==\n id(tbhdu.data._coldefs._arrays[0]))\n assert (id(tbhdu.data._coldefs.columns[0].array) ==\n id(tbhdu.columns.columns[0].array))\n assert (id(tbhdu.data._coldefs.columns[0].array) ==\n id(tbhdu.columns._arrays[0]))\n\n assert tbhdu.data[0][1] == 312\n assert tbhdu.data._coldefs._arrays[1][0] == 312\n assert tbhdu.data._coldefs.columns[1].array[0] == 312\n assert tbhdu.columns._arrays[1][0] == 312\n assert tbhdu.columns.columns[1].array[0] == 312\n assert tbhdu.columns.columns[0].array[0] == 'NGC1'\n assert tbhdu.columns.columns[2].array[0] == ''\n assert (tbhdu.columns.columns[3].array[0] ==\n np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()\n assert tbhdu.columns.columns[4].array[0] == True # nopep8\n\n assert tbhdu.data[3][1] == 33\n assert tbhdu.data._coldefs._arrays[1][3] == 33\n assert tbhdu.data._coldefs.columns[1].array[3] == 33\n assert tbhdu.columns._arrays[1][3] == 33\n assert tbhdu.columns.columns[1].array[3] == 33\n assert tbhdu.columns.columns[0].array[3] == 'JIM1'\n assert tbhdu.columns.columns[2].array[3] == 'A Note'\n assert (tbhdu.columns.columns[3].array[3] ==\n np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()\n assert tbhdu.columns.columns[4].array[3] == True # nopep8\n\n def test_assign_multiple_rows_to_table(self):\n counts = np.array([312, 334, 308, 317])\n names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n\n tbhdu1 = fits.BinTableHDU.from_columns(coldefs)\n\n counts = np.array([112, 134, 108, 117])\n names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n tbhdu.data[0][3] = np.array([1., 2., 3., 4., 5.], dtype=np.float32)\n\n tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)\n\n # Assign the 4 rows from the second table to rows 5 thru 8 of the\n # new table. Note that the last row of the new table will still be\n # initialized to the default values.\n tbhdu2.data[4:] = tbhdu.data\n\n # Verify that all ndarray objects within the HDU reference the\n # same ndarray.\n assert (id(tbhdu2.data._coldefs.columns[0].array) ==\n id(tbhdu2.data._coldefs._arrays[0]))\n assert (id(tbhdu2.data._coldefs.columns[0].array) ==\n id(tbhdu2.columns.columns[0].array))\n assert (id(tbhdu2.data._coldefs.columns[0].array) ==\n id(tbhdu2.columns._arrays[0]))\n\n assert tbhdu2.data[0][1] == 312\n assert tbhdu2.data._coldefs._arrays[1][0] == 312\n assert tbhdu2.data._coldefs.columns[1].array[0] == 312\n assert tbhdu2.columns._arrays[1][0] == 312\n assert tbhdu2.columns.columns[1].array[0] == 312\n assert tbhdu2.columns.columns[0].array[0] == 'NGC1'\n assert tbhdu2.columns.columns[2].array[0] == ''\n assert (tbhdu2.columns.columns[3].array[0] ==\n np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()\n assert tbhdu2.columns.columns[4].array[0] == True # nopep8\n\n assert tbhdu2.data[4][1] == 112\n assert tbhdu2.data._coldefs._arrays[1][4] == 112\n assert tbhdu2.data._coldefs.columns[1].array[4] == 112\n assert tbhdu2.columns._arrays[1][4] == 112\n assert tbhdu2.columns.columns[1].array[4] == 112\n assert tbhdu2.columns.columns[0].array[4] == 'NGC5'\n assert tbhdu2.columns.columns[2].array[4] == ''\n assert (tbhdu2.columns.columns[3].array[4] ==\n np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()\n assert tbhdu2.columns.columns[4].array[4] == False # nopep8\n assert tbhdu2.columns.columns[1].array[8] == 0\n assert tbhdu2.columns.columns[0].array[8] == ''\n assert tbhdu2.columns.columns[2].array[8] == ''\n assert (tbhdu2.columns.columns[3].array[8] ==\n np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()\n assert tbhdu2.columns.columns[4].array[8] == False # nopep8\n\n def test_verify_data_references(self):\n counts = np.array([312, 334, 308, 317])\n names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n\n # Verify that original ColDefs object has independent Column\n # objects.\n assert id(coldefs.columns[0]) != id(c1)\n\n # Verify that original ColDefs object has independent ndarray\n # objects.\n assert id(coldefs.columns[0].array) != id(names)\n\n # Verify that original ColDefs object references the same data\n # object as the original Column object.\n assert id(coldefs.columns[0].array) == id(c1.array)\n assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])\n\n # Verify new HDU has an independent ColDefs object.\n assert id(coldefs) != id(tbhdu.columns)\n\n # Verify new HDU has independent Column objects.\n assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])\n\n # Verify new HDU has independent ndarray objects.\n assert (id(coldefs.columns[0].array) !=\n id(tbhdu.columns.columns[0].array))\n\n # Verify that both ColDefs objects in the HDU reference the same\n # Coldefs object.\n assert id(tbhdu.columns) == id(tbhdu.data._coldefs)\n\n # Verify that all ndarray objects within the HDU reference the\n # same ndarray.\n assert (id(tbhdu.data._coldefs.columns[0].array) ==\n id(tbhdu.data._coldefs._arrays[0]))\n assert (id(tbhdu.data._coldefs.columns[0].array) ==\n id(tbhdu.columns.columns[0].array))\n assert (id(tbhdu.data._coldefs.columns[0].array) ==\n id(tbhdu.columns._arrays[0]))\n\n tbhdu.writeto(self.temp('table1.fits'))\n\n t1 = fits.open(self.temp('table1.fits'))\n\n t1[1].data[0][1] = 213\n\n assert t1[1].data[0][1] == 213\n assert t1[1].data._coldefs._arrays[1][0] == 213\n assert t1[1].data._coldefs.columns[1].array[0] == 213\n assert t1[1].columns._arrays[1][0] == 213\n assert t1[1].columns.columns[1].array[0] == 213\n\n t1[1].data._coldefs._arrays[1][0] = 100\n\n assert t1[1].data[0][1] == 100\n assert t1[1].data._coldefs._arrays[1][0] == 100\n assert t1[1].data._coldefs.columns[1].array[0] == 100\n assert t1[1].columns._arrays[1][0] == 100\n assert t1[1].columns.columns[1].array[0] == 100\n\n t1[1].data._coldefs.columns[1].array[0] = 500\n assert t1[1].data[0][1] == 500\n assert t1[1].data._coldefs._arrays[1][0] == 500\n assert t1[1].data._coldefs.columns[1].array[0] == 500\n assert t1[1].columns._arrays[1][0] == 500\n assert t1[1].columns.columns[1].array[0] == 500\n\n t1[1].columns._arrays[1][0] = 600\n assert t1[1].data[0][1] == 600\n assert t1[1].data._coldefs._arrays[1][0] == 600\n assert t1[1].data._coldefs.columns[1].array[0] == 600\n assert t1[1].columns._arrays[1][0] == 600\n assert t1[1].columns.columns[1].array[0] == 600\n\n t1[1].columns.columns[1].array[0] = 800\n assert t1[1].data[0][1] == 800\n assert t1[1].data._coldefs._arrays[1][0] == 800\n assert t1[1].data._coldefs.columns[1].array[0] == 800\n assert t1[1].columns._arrays[1][0] == 800\n assert t1[1].columns.columns[1].array[0] == 800\n\n t1.close()\n\n def test_new_table_with_ndarray(self):\n counts = np.array([312, 334, 308, 317])\n names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n\n tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))\n\n # Verify that all ndarray objects within the HDU reference the\n # same ndarray.\n assert (id(tbhdu1.data._coldefs.columns[0].array) ==\n id(tbhdu1.data._coldefs._arrays[0]))\n assert (id(tbhdu1.data._coldefs.columns[0].array) ==\n id(tbhdu1.columns.columns[0].array))\n assert (id(tbhdu1.data._coldefs.columns[0].array) ==\n id(tbhdu1.columns._arrays[0]))\n\n # Ensure I can change the value of one data element and it effects\n # all of the others.\n tbhdu1.data[0][1] = 213\n\n assert tbhdu1.data[0][1] == 213\n assert tbhdu1.data._coldefs._arrays[1][0] == 213\n assert tbhdu1.data._coldefs.columns[1].array[0] == 213\n assert tbhdu1.columns._arrays[1][0] == 213\n assert tbhdu1.columns.columns[1].array[0] == 213\n\n tbhdu1.data._coldefs._arrays[1][0] = 100\n\n assert tbhdu1.data[0][1] == 100\n assert tbhdu1.data._coldefs._arrays[1][0] == 100\n assert tbhdu1.data._coldefs.columns[1].array[0] == 100\n assert tbhdu1.columns._arrays[1][0] == 100\n assert tbhdu1.columns.columns[1].array[0] == 100\n\n tbhdu1.data._coldefs.columns[1].array[0] = 500\n assert tbhdu1.data[0][1] == 500\n assert tbhdu1.data._coldefs._arrays[1][0] == 500\n assert tbhdu1.data._coldefs.columns[1].array[0] == 500\n assert tbhdu1.columns._arrays[1][0] == 500\n assert tbhdu1.columns.columns[1].array[0] == 500\n\n tbhdu1.columns._arrays[1][0] = 600\n assert tbhdu1.data[0][1] == 600\n assert tbhdu1.data._coldefs._arrays[1][0] == 600\n assert tbhdu1.data._coldefs.columns[1].array[0] == 600\n assert tbhdu1.columns._arrays[1][0] == 600\n assert tbhdu1.columns.columns[1].array[0] == 600\n\n tbhdu1.columns.columns[1].array[0] = 800\n assert tbhdu1.data[0][1] == 800\n assert tbhdu1.data._coldefs._arrays[1][0] == 800\n assert tbhdu1.data._coldefs.columns[1].array[0] == 800\n assert tbhdu1.columns._arrays[1][0] == 800\n assert tbhdu1.columns.columns[1].array[0] == 800\n\n tbhdu1.writeto(self.temp('table1.fits'))\n\n t1 = fits.open(self.temp('table1.fits'))\n\n t1[1].data[0][1] = 213\n\n assert t1[1].data[0][1] == 213\n assert t1[1].data._coldefs._arrays[1][0] == 213\n assert t1[1].data._coldefs.columns[1].array[0] == 213\n assert t1[1].columns._arrays[1][0] == 213\n assert t1[1].columns.columns[1].array[0] == 213\n\n t1[1].data._coldefs._arrays[1][0] = 100\n\n assert t1[1].data[0][1] == 100\n assert t1[1].data._coldefs._arrays[1][0] == 100\n assert t1[1].data._coldefs.columns[1].array[0] == 100\n assert t1[1].columns._arrays[1][0] == 100\n assert t1[1].columns.columns[1].array[0] == 100\n\n t1[1].data._coldefs.columns[1].array[0] = 500\n assert t1[1].data[0][1] == 500\n assert t1[1].data._coldefs._arrays[1][0] == 500\n assert t1[1].data._coldefs.columns[1].array[0] == 500\n assert t1[1].columns._arrays[1][0] == 500\n assert t1[1].columns.columns[1].array[0] == 500\n\n t1[1].columns._arrays[1][0] = 600\n assert t1[1].data[0][1] == 600\n assert t1[1].data._coldefs._arrays[1][0] == 600\n assert t1[1].data._coldefs.columns[1].array[0] == 600\n assert t1[1].columns._arrays[1][0] == 600\n assert t1[1].columns.columns[1].array[0] == 600\n\n t1[1].columns.columns[1].array[0] = 800\n assert t1[1].data[0][1] == 800\n assert t1[1].data._coldefs._arrays[1][0] == 800\n assert t1[1].data._coldefs.columns[1].array[0] == 800\n assert t1[1].columns._arrays[1][0] == 800\n assert t1[1].columns.columns[1].array[0] == 800\n\n t1.close()\n\n def test_new_table_with_fits_rec(self):\n counts = np.array([312, 334, 308, 317])\n names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n\n tbhdu = fits.BinTableHDU.from_columns(coldefs)\n\n tbhdu.data[0][1] = 213\n\n assert tbhdu.data[0][1] == 213\n assert tbhdu.data._coldefs._arrays[1][0] == 213\n assert tbhdu.data._coldefs.columns[1].array[0] == 213\n assert tbhdu.columns._arrays[1][0] == 213\n assert tbhdu.columns.columns[1].array[0] == 213\n\n tbhdu.data._coldefs._arrays[1][0] = 100\n\n assert tbhdu.data[0][1] == 100\n assert tbhdu.data._coldefs._arrays[1][0] == 100\n assert tbhdu.data._coldefs.columns[1].array[0] == 100\n assert tbhdu.columns._arrays[1][0] == 100\n assert tbhdu.columns.columns[1].array[0] == 100\n\n tbhdu.data._coldefs.columns[1].array[0] = 500\n assert tbhdu.data[0][1] == 500\n assert tbhdu.data._coldefs._arrays[1][0] == 500\n assert tbhdu.data._coldefs.columns[1].array[0] == 500\n assert tbhdu.columns._arrays[1][0] == 500\n assert tbhdu.columns.columns[1].array[0] == 500\n\n tbhdu.columns._arrays[1][0] = 600\n assert tbhdu.data[0][1] == 600\n assert tbhdu.data._coldefs._arrays[1][0] == 600\n assert tbhdu.data._coldefs.columns[1].array[0] == 600\n assert tbhdu.columns._arrays[1][0] == 600\n assert tbhdu.columns.columns[1].array[0] == 600\n\n tbhdu.columns.columns[1].array[0] = 800\n assert tbhdu.data[0][1] == 800\n assert tbhdu.data._coldefs._arrays[1][0] == 800\n assert tbhdu.data._coldefs.columns[1].array[0] == 800\n assert tbhdu.columns._arrays[1][0] == 800\n assert tbhdu.columns.columns[1].array[0] == 800\n\n tbhdu.columns.columns[1].array[0] = 312\n\n tbhdu.writeto(self.temp('table1.fits'))\n\n t1 = fits.open(self.temp('table1.fits'))\n\n t1[1].data[0][1] = 1\n fr = t1[1].data\n assert t1[1].data[0][1] == 1\n assert t1[1].data._coldefs._arrays[1][0] == 1\n assert t1[1].data._coldefs.columns[1].array[0] == 1\n assert t1[1].columns._arrays[1][0] == 1\n assert t1[1].columns.columns[1].array[0] == 1\n assert fr[0][1] == 1\n assert fr._coldefs._arrays[1][0] == 1\n assert fr._coldefs.columns[1].array[0] == 1\n\n fr._coldefs.columns[1].array[0] = 312\n\n tbhdu1 = fits.BinTableHDU.from_columns(fr)\n\n i = 0\n for row in tbhdu1.data:\n for j in range(len(row)):\n if isinstance(row[j], np.ndarray):\n assert (row[j] == tbhdu.data[i][j]).all()\n else:\n assert row[j] == tbhdu.data[i][j]\n i = i + 1\n\n tbhdu1.data[0][1] = 213\n\n assert t1[1].data[0][1] == 312\n assert t1[1].data._coldefs._arrays[1][0] == 312\n assert t1[1].data._coldefs.columns[1].array[0] == 312\n assert t1[1].columns._arrays[1][0] == 312\n assert t1[1].columns.columns[1].array[0] == 312\n assert fr[0][1] == 312\n assert fr._coldefs._arrays[1][0] == 312\n assert fr._coldefs.columns[1].array[0] == 312\n assert tbhdu1.data[0][1] == 213\n assert tbhdu1.data._coldefs._arrays[1][0] == 213\n assert tbhdu1.data._coldefs.columns[1].array[0] == 213\n assert tbhdu1.columns._arrays[1][0] == 213\n assert tbhdu1.columns.columns[1].array[0] == 213\n\n t1[1].data[0][1] = 10\n\n assert t1[1].data[0][1] == 10\n assert t1[1].data._coldefs._arrays[1][0] == 10\n assert t1[1].data._coldefs.columns[1].array[0] == 10\n assert t1[1].columns._arrays[1][0] == 10\n assert t1[1].columns.columns[1].array[0] == 10\n assert fr[0][1] == 10\n assert fr._coldefs._arrays[1][0] == 10\n assert fr._coldefs.columns[1].array[0] == 10\n assert tbhdu1.data[0][1] == 213\n assert tbhdu1.data._coldefs._arrays[1][0] == 213\n assert tbhdu1.data._coldefs.columns[1].array[0] == 213\n assert tbhdu1.columns._arrays[1][0] == 213\n assert tbhdu1.columns.columns[1].array[0] == 213\n\n tbhdu1.data._coldefs._arrays[1][0] = 666\n\n assert t1[1].data[0][1] == 10\n assert t1[1].data._coldefs._arrays[1][0] == 10\n assert t1[1].data._coldefs.columns[1].array[0] == 10\n assert t1[1].columns._arrays[1][0] == 10\n assert t1[1].columns.columns[1].array[0] == 10\n assert fr[0][1] == 10\n assert fr._coldefs._arrays[1][0] == 10\n assert fr._coldefs.columns[1].array[0] == 10\n assert tbhdu1.data[0][1] == 666\n assert tbhdu1.data._coldefs._arrays[1][0] == 666\n assert tbhdu1.data._coldefs.columns[1].array[0] == 666\n assert tbhdu1.columns._arrays[1][0] == 666\n assert tbhdu1.columns.columns[1].array[0] == 666\n\n t1.close()\n\n def test_bin_table_hdu_constructor(self):\n counts = np.array([312, 334, 308, 317])\n names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])\n c1 = fits.Column(name='target', format='10A', array=names)\n c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)\n c3 = fits.Column(name='notes', format='A10')\n c4 = fits.Column(name='spectrum', format='5E')\n c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])\n coldefs = fits.ColDefs([c1, c2, c3, c4, c5])\n\n tbhdu1 = fits.BinTableHDU.from_columns(coldefs)\n\n hdu = fits.BinTableHDU(tbhdu1.data)\n\n # Verify that all ndarray objects within the HDU reference the\n # same ndarray.\n assert (id(hdu.data._coldefs.columns[0].array) ==\n id(hdu.data._coldefs._arrays[0]))\n assert (id(hdu.data._coldefs.columns[0].array) ==\n id(hdu.columns.columns[0].array))\n assert (id(hdu.data._coldefs.columns[0].array) ==\n id(hdu.columns._arrays[0]))\n\n # Verify that the references in the original HDU are the same as the\n # references in the new HDU.\n assert (id(tbhdu1.data._coldefs.columns[0].array) ==\n id(hdu.data._coldefs._arrays[0]))\n\n # Verify that a change in the new HDU is reflected in both the new\n # and original HDU.\n\n hdu.data[0][1] = 213\n\n assert hdu.data[0][1] == 213\n assert hdu.data._coldefs._arrays[1][0] == 213\n assert hdu.data._coldefs.columns[1].array[0] == 213\n assert hdu.columns._arrays[1][0] == 213\n assert hdu.columns.columns[1].array[0] == 213\n assert tbhdu1.data[0][1] == 213\n assert tbhdu1.data._coldefs._arrays[1][0] == 213\n assert tbhdu1.data._coldefs.columns[1].array[0] == 213\n assert tbhdu1.columns._arrays[1][0] == 213\n assert tbhdu1.columns.columns[1].array[0] == 213\n\n hdu.data._coldefs._arrays[1][0] = 100\n\n assert hdu.data[0][1] == 100\n assert hdu.data._coldefs._arrays[1][0] == 100\n assert hdu.data._coldefs.columns[1].array[0] == 100\n assert hdu.columns._arrays[1][0] == 100\n assert hdu.columns.columns[1].array[0] == 100\n assert tbhdu1.data[0][1] == 100\n assert tbhdu1.data._coldefs._arrays[1][0] == 100\n assert tbhdu1.data._coldefs.columns[1].array[0] == 100\n assert tbhdu1.columns._arrays[1][0] == 100\n assert tbhdu1.columns.columns[1].array[0] == 100\n\n hdu.data._coldefs.columns[1].array[0] = 500\n assert hdu.data[0][1] == 500\n assert hdu.data._coldefs._arrays[1][0] == 500\n assert hdu.data._coldefs.columns[1].array[0] == 500\n assert hdu.columns._arrays[1][0] == 500\n assert hdu.columns.columns[1].array[0] == 500\n assert tbhdu1.data[0][1] == 500\n assert tbhdu1.data._coldefs._arrays[1][0] == 500\n assert tbhdu1.data._coldefs.columns[1].array[0] == 500\n assert tbhdu1.columns._arrays[1][0] == 500\n assert tbhdu1.columns.columns[1].array[0] == 500\n\n hdu.columns._arrays[1][0] = 600\n assert hdu.data[0][1] == 600\n assert hdu.data._coldefs._arrays[1][0] == 600\n assert hdu.data._coldefs.columns[1].array[0] == 600\n assert hdu.columns._arrays[1][0] == 600\n assert hdu.columns.columns[1].array[0] == 600\n assert tbhdu1.data[0][1] == 600\n assert tbhdu1.data._coldefs._arrays[1][0] == 600\n assert tbhdu1.data._coldefs.columns[1].array[0] == 600\n assert tbhdu1.columns._arrays[1][0] == 600\n assert tbhdu1.columns.columns[1].array[0] == 600\n\n hdu.columns.columns[1].array[0] = 800\n assert hdu.data[0][1] == 800\n assert hdu.data._coldefs._arrays[1][0] == 800\n assert hdu.data._coldefs.columns[1].array[0] == 800\n assert hdu.columns._arrays[1][0] == 800\n assert hdu.columns.columns[1].array[0] == 800\n assert tbhdu1.data[0][1] == 800\n assert tbhdu1.data._coldefs._arrays[1][0] == 800\n assert tbhdu1.data._coldefs.columns[1].array[0] == 800\n assert tbhdu1.columns._arrays[1][0] == 800\n assert tbhdu1.columns.columns[1].array[0] == 800\n\n def test_constructor_name_arg(self):\n \"\"\"testConstructorNameArg\n\n Passing name='...' to the BinTableHDU and TableHDU constructors\n should set the .name attribute and 'EXTNAME' header keyword, and\n override any name in an existing 'EXTNAME' value.\n \"\"\"\n\n for hducls in [fits.BinTableHDU, fits.TableHDU]:\n # First test some default assumptions\n hdu = hducls()\n assert hdu.name == ''\n assert 'EXTNAME' not in hdu.header\n hdu.name = 'FOO'\n assert hdu.name == 'FOO'\n assert hdu.header['EXTNAME'] == 'FOO'\n\n # Passing name to constructor\n hdu = hducls(name='FOO')\n assert hdu.name == 'FOO'\n assert hdu.header['EXTNAME'] == 'FOO'\n\n # And overriding a header with a different extname\n hdr = fits.Header()\n hdr['EXTNAME'] = 'EVENTS'\n hdu = hducls(header=hdr, name='FOO')\n assert hdu.name == 'FOO'\n assert hdu.header['EXTNAME'] == 'FOO'\n\n def test_constructor_ver_arg(self):\n for hducls in [fits.BinTableHDU, fits.TableHDU]:\n # First test some default assumptions\n hdu = hducls()\n assert hdu.ver == 1\n assert 'EXTVER' not in hdu.header\n hdu.ver = 2\n assert hdu.ver == 2\n assert hdu.header['EXTVER'] == 2\n\n # Passing name to constructor\n hdu = hducls(ver=3)\n assert hdu.ver == 3\n assert hdu.header['EXTVER'] == 3\n\n # And overriding a header with a different extver\n hdr = fits.Header()\n hdr['EXTVER'] = 4\n hdu = hducls(header=hdr, ver=5)\n assert hdu.ver == 5\n assert hdu.header['EXTVER'] == 5\n\n def test_unicode_colname(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/5204\n \"Handle unicode FITS BinTable column names on Python 2\"\n \"\"\"\n col = fits.Column(name=u'spam', format='E', array=[42.])\n # This used to raise a TypeError, now it works\n fits.BinTableHDU.from_columns([col])\n\n def test_bin_table_with_logical_array(self):\n c1 = fits.Column(name='flag', format='2L',\n array=[[True, False], [False, True]])\n coldefs = fits.ColDefs([c1])\n\n tbhdu1 = fits.BinTableHDU.from_columns(coldefs)\n\n assert (tbhdu1.data.field('flag')[0] ==\n np.array([True, False], dtype=bool)).all()\n assert (tbhdu1.data.field('flag')[1] ==\n np.array([False, True], dtype=bool)).all()\n\n tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)\n\n assert (tbhdu.data.field('flag')[0] ==\n np.array([True, False], dtype=bool)).all()\n assert (tbhdu.data.field('flag')[1] ==\n np.array([False, True], dtype=bool)).all()\n\n def test_fits_rec_column_access(self):\n t = fits.open(self.data('table.fits'))\n tbdata = t[1].data\n assert (tbdata.V_mag == tbdata.field('V_mag')).all()\n assert (tbdata.V_mag == tbdata['V_mag']).all()\n\n t.close()\n\n def test_table_with_zero_width_column(self):\n hdul = fits.open(self.data('zerowidth.fits'))\n tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'\n assert 'ORBPARM' in tbhdu.columns.names\n # The ORBPARM column should not be in the data, though the data should\n # be readable\n assert 'ORBPARM' in tbhdu.data.names\n assert 'ORBPARM' in tbhdu.data.dtype.names\n # Verify that some of the data columns are still correctly accessible\n # by name\n assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'\n assert comparefloats(\n tbhdu.data[0]['STABXYZ'],\n np.array([499.85566663, -1317.99231554, -735.18866164],\n dtype=np.float64))\n assert tbhdu.data[0]['NOSTA'] == 1\n assert tbhdu.data[0]['MNTSTA'] == 0\n assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'\n assert comparefloats(\n tbhdu.data[-1]['STABXYZ'],\n np.array([0.0, 0.0, 0.0], dtype=np.float64))\n assert tbhdu.data[-1]['NOSTA'] == 29\n assert tbhdu.data[-1]['MNTSTA'] == 0\n hdul.writeto(self.temp('newtable.fits'))\n hdul.close()\n hdul = fits.open(self.temp('newtable.fits'))\n tbhdu = hdul[2]\n\n # Verify that the previous tests still hold after writing\n assert 'ORBPARM' in tbhdu.columns.names\n assert 'ORBPARM' in tbhdu.data.names\n assert 'ORBPARM' in tbhdu.data.dtype.names\n assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'\n assert comparefloats(\n tbhdu.data[0]['STABXYZ'],\n np.array([499.85566663, -1317.99231554, -735.18866164],\n dtype=np.float64))\n assert tbhdu.data[0]['NOSTA'] == 1\n assert tbhdu.data[0]['MNTSTA'] == 0\n assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'\n assert comparefloats(\n tbhdu.data[-1]['STABXYZ'],\n np.array([0.0, 0.0, 0.0], dtype=np.float64))\n assert tbhdu.data[-1]['NOSTA'] == 29\n assert tbhdu.data[-1]['MNTSTA'] == 0\n hdul.close()\n\n def test_string_column_padding(self):\n a = ['img1', 'img2', 'img3a', 'p']\n s = 'img1\\x00\\x00\\x00\\x00\\x00\\x00' \\\n 'img2\\x00\\x00\\x00\\x00\\x00\\x00' \\\n 'img3a\\x00\\x00\\x00\\x00\\x00' \\\n 'p\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n\n acol = fits.Column(name='MEMNAME', format='A10',\n array=chararray.array(a))\n ahdu = fits.BinTableHDU.from_columns([acol])\n assert ahdu.data.tostring().decode('raw-unicode-escape') == s\n ahdu.writeto(self.temp('newtable.fits'))\n with fits.open(self.temp('newtable.fits')) as hdul:\n assert hdul[1].data.tostring().decode('raw-unicode-escape') == s\n assert (hdul[1].data['MEMNAME'] == a).all()\n del hdul\n\n ahdu = fits.TableHDU.from_columns([acol])\n with ignore_warnings():\n ahdu.writeto(self.temp('newtable.fits'), overwrite=True)\n\n with fits.open(self.temp('newtable.fits')) as hdul:\n assert (hdul[1].data.tostring().decode('raw-unicode-escape') ==\n s.replace('\\x00', ' '))\n assert (hdul[1].data['MEMNAME'] == a).all()\n ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())\n del hdul\n\n # Now serialize once more as a binary table; padding bytes should\n # revert to zeroes\n ahdu.writeto(self.temp('newtable.fits'), overwrite=True)\n with fits.open(self.temp('newtable.fits')) as hdul:\n assert hdul[1].data.tostring().decode('raw-unicode-escape') == s\n assert (hdul[1].data['MEMNAME'] == a).all()\n\n def test_multi_dimensional_columns(self):\n \"\"\"\n Tests the multidimensional column implementation with both numeric\n arrays and string arrays.\n \"\"\"\n\n data = np.rec.array(\n [([0, 1, 2, 3, 4, 5], 'row1' * 2),\n ([6, 7, 8, 9, 0, 1], 'row2' * 2),\n ([2, 3, 4, 5, 6, 7], 'row3' * 2)], formats='6i4,a8')\n\n thdu = fits.BinTableHDU.from_columns(data)\n # Modify the TDIM fields to my own specification\n thdu.header['TDIM1'] = '(2,3)'\n thdu.header['TDIM2'] = '(4,2)'\n\n thdu.writeto(self.temp('newtable.fits'))\n\n with fits.open(self.temp('newtable.fits')) as hdul:\n thdu = hdul[1]\n\n c1 = thdu.data.field(0)\n c2 = thdu.data.field(1)\n\n assert c1.shape == (3, 3, 2)\n assert c2.shape == (3, 2)\n assert (c1 == np.array([[[0, 1], [2, 3], [4, 5]],\n [[6, 7], [8, 9], [0, 1]],\n [[2, 3], [4, 5], [6, 7]]])).all()\n assert (c2 == np.array([['row1', 'row1'],\n ['row2', 'row2'],\n ['row3', 'row3']])).all()\n del c1\n del c2\n del thdu\n del hdul\n\n # Test setting the TDIMn header based on the column data\n data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', 4)])\n data['x'] = 1, 2, 3\n data['s'] = 'ok'\n with ignore_warnings():\n fits.writeto(self.temp('newtable.fits'), data, overwrite=True)\n\n t = fits.getdata(self.temp('newtable.fits'))\n\n assert t.field(1).dtype.str[-1] == '5'\n assert t.field(1).shape == (3, 4)\n\n # Like the previous test, but with an extra dimension (a bit more\n # complicated)\n data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', (4, 3))])\n data['x'] = 1, 2, 3\n data['s'] = 'ok'\n\n del t\n\n with ignore_warnings():\n fits.writeto(self.temp('newtable.fits'), data, overwrite=True)\n\n t = fits.getdata(self.temp('newtable.fits'))\n\n assert t.field(1).dtype.str[-1] == '5'\n assert t.field(1).shape == (3, 4, 3)\n\n def test_bin_table_init_from_string_array_column(self):\n \"\"\"\n Tests two ways of creating a new `BinTableHDU` from a column of\n string arrays.\n\n This tests for a couple different regressions, and ensures that\n both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work\n equivalently.\n\n Some of this is redundant with the following test, but checks some\n subtly different cases.\n \"\"\"\n\n data = [[b'abcd', b'efgh'],\n [b'ijkl', b'mnop'],\n [b'qrst', b'uvwx']]\n\n arr = np.array([(data,), (data,), (data,), (data,), (data,)],\n dtype=[('S', '(3, 2)S4')])\n\n with catch_warnings() as w:\n tbhdu1 = fits.BinTableHDU(data=arr)\n\n assert len(w) == 0\n\n def test_dims_and_roundtrip(tbhdu):\n assert tbhdu.data['S'].shape == (5, 3, 2)\n assert tbhdu.data['S'].dtype.str.endswith('U4')\n\n tbhdu.writeto(self.temp('test.fits'), overwrite=True)\n\n with fits.open(self.temp('test.fits')) as hdul:\n tbhdu2 = hdul[1]\n assert tbhdu2.header['TDIM1'] == '(4,2,3)'\n assert tbhdu2.data['S'].shape == (5, 3, 2)\n assert tbhdu.data['S'].dtype.str.endswith('U4')\n assert np.all(tbhdu2.data['S'] == tbhdu.data['S'])\n\n test_dims_and_roundtrip(tbhdu1)\n\n tbhdu2 = fits.BinTableHDU.from_columns(arr)\n test_dims_and_roundtrip(tbhdu2)\n\n def test_columns_with_truncating_tdim(self):\n \"\"\"\n According to the FITS standard (section 7.3.2):\n\n If the number of elements in the array implied by the TDIMn is less\n than the allocated size of the ar- ray in the FITS file, then the\n unused trailing elements should be interpreted as containing\n undefined fill values.\n\n *deep sigh* What this means is if a column has a repeat count larger\n than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',\n but TFORM1 = 6I), then instead of this being an outright error we are\n to take the first 4 elements as implied by the TDIM and ignore the\n additional two trailing elements.\n \"\"\"\n\n # It's hard to even successfully create a table like this. I think\n # it *should* be difficult, but once created it should at least be\n # possible to read.\n arr1 = [[b'ab', b'cd'], [b'ef', b'gh'], [b'ij', b'kl']]\n arr2 = [1, 2, 3, 4, 5]\n\n arr = np.array([(arr1, arr2), (arr1, arr2)],\n dtype=[('a', '(3, 2)S2'), ('b', '5i8')])\n\n tbhdu = fits.BinTableHDU(data=arr)\n tbhdu.writeto(self.temp('test.fits'))\n\n with open(self.temp('test.fits'), 'rb') as f:\n raw_bytes = f.read()\n\n # Artificially truncate TDIM in the header; this seems to be the\n # easiest way to do this while getting around Astropy's insistence on the\n # data and header matching perfectly; again, we have no interest in\n # making it possible to write files in this format, only read them\n with open(self.temp('test.fits'), 'wb') as f:\n f.write(raw_bytes.replace(b'(2,2,3)', b'(2,2,2)'))\n\n with fits.open(self.temp('test.fits')) as hdul:\n tbhdu2 = hdul[1]\n assert tbhdu2.header['TDIM1'] == '(2,2,2)'\n assert tbhdu2.header['TFORM1'] == '12A'\n for row in tbhdu2.data:\n assert np.all(row['a'] == [['ab', 'cd'], ['ef', 'gh']])\n assert np.all(row['b'] == [1, 2, 3, 4, 5])\n\n def test_string_array_round_trip(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201\"\"\"\n\n data = [['abc', 'def', 'ghi'],\n ['jkl', 'mno', 'pqr'],\n ['stu', 'vwx', 'yz ']]\n\n recarr = np.rec.array([(data,), (data,)], formats=['(3,3)S3'])\n\n t = fits.BinTableHDU(data=recarr)\n t.writeto(self.temp('test.fits'))\n\n with fits.open(self.temp('test.fits')) as h:\n assert 'TDIM1' in h[1].header\n assert h[1].header['TDIM1'] == '(3,3,3)'\n assert len(h[1].data) == 2\n assert len(h[1].data[0]) == 1\n assert (h[1].data.field(0)[0] ==\n np.char.decode(recarr.field(0)[0], 'ascii')).all()\n\n with fits.open(self.temp('test.fits')) as h:\n # Access the data; I think this is necessary to exhibit the bug\n # reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201\n h[1].data[:]\n h.writeto(self.temp('test2.fits'))\n\n with fits.open(self.temp('test2.fits')) as h:\n assert 'TDIM1' in h[1].header\n assert h[1].header['TDIM1'] == '(3,3,3)'\n assert len(h[1].data) == 2\n assert len(h[1].data[0]) == 1\n assert (h[1].data.field(0)[0] ==\n np.char.decode(recarr.field(0)[0], 'ascii')).all()\n\n def test_new_table_with_nd_column(self):\n \"\"\"Regression test for\n https://github.com/spacetelescope/PyFITS/issues/3\n \"\"\"\n\n arra = np.array(['a', 'b'], dtype='|S1')\n arrb = np.array([['a', 'bc'], ['cd', 'e']], dtype='|S2')\n arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])\n\n cols = [\n fits.Column(name='str', format='1A', array=arra),\n fits.Column(name='strarray', format='4A', dim='(2,2)',\n array=arrb),\n fits.Column(name='intarray', format='4I', dim='(2, 2)',\n array=arrc)\n ]\n\n hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))\n hdu.writeto(self.temp('test.fits'))\n\n with fits.open(self.temp('test.fits')) as h:\n # Need to force string arrays to byte arrays in order to compare\n # correctly on Python 3\n assert (h[1].data['str'].encode('ascii') == arra).all()\n assert (h[1].data['strarray'].encode('ascii') == arrb).all()\n assert (h[1].data['intarray'] == arrc).all()\n\n def test_mismatched_tform_and_tdim(self):\n \"\"\"Normally the product of the dimensions listed in a TDIMn keyword\n must be less than or equal to the repeat count in the TFORMn keyword.\n\n This tests that this works if less than (treating the trailing bytes\n as unspecified fill values per the FITS standard) and fails if the\n dimensions specified by TDIMn are greater than the repeat count.\n \"\"\"\n\n arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])\n arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])\n\n cols = [fits.Column(name='a', format='20I', dim='(2,2)',\n array=arra),\n fits.Column(name='b', format='4I', dim='(2,2)',\n array=arrb)]\n\n # The first column has the mismatched repeat count\n hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))\n hdu.writeto(self.temp('test.fits'))\n\n with fits.open(self.temp('test.fits')) as h:\n assert h[1].header['TFORM1'] == '20I'\n assert h[1].header['TFORM2'] == '4I'\n assert h[1].header['TDIM1'] == h[1].header['TDIM2'] == '(2,2)'\n assert (h[1].data['a'] == arra).all()\n assert (h[1].data['b'] == arrb).all()\n assert h[1].data.itemsize == 48 # 16-bits times 24\n\n # If dims is more than the repeat count in the format specifier raise\n # an error\n pytest.raises(VerifyError, fits.Column, name='a', format='2I',\n dim='(2,2)', array=arra)\n\n def test_tdim_of_size_one(self):\n \"\"\"Regression test for https://github.com/astropy/astropy/pull/3580\"\"\"\n\n with fits.open(self.data('tdim.fits')) as hdulist:\n assert hdulist[1].data['V_mag'].shape == (3, 1, 1)\n\n def test_slicing(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52\"\"\"\n\n with fits.open(self.data('table.fits')) as f:\n data = f[1].data\n targets = data.field('target')\n s = data[:]\n assert (s.field('target') == targets).all()\n for n in range(len(targets) + 2):\n s = data[:n]\n assert (s.field('target') == targets[:n]).all()\n s = data[n:]\n assert (s.field('target') == targets[n:]).all()\n s = data[::2]\n assert (s.field('target') == targets[::2]).all()\n s = data[::-1]\n assert (s.field('target') == targets[::-1]).all()\n\n def test_array_slicing(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55\"\"\"\n\n with fits.open(self.data('table.fits')) as f:\n data = f[1].data\n s1 = data[data['target'] == 'NGC1001']\n s2 = data[np.where(data['target'] == 'NGC1001')]\n s3 = data[[0]]\n s4 = data[:1]\n for s in [s1, s2, s3, s4]:\n assert isinstance(s, fits.FITS_rec)\n\n assert comparerecords(s1, s2)\n assert comparerecords(s2, s3)\n assert comparerecords(s3, s4)\n\n def test_array_broadcasting(self):\n \"\"\"\n Regression test for https://github.com/spacetelescope/PyFITS/pull/48\n \"\"\"\n\n with fits.open(self.data('table.fits')) as hdu:\n data = hdu[1].data\n data['V_mag'] = 0\n assert np.all(data['V_mag'] == 0)\n\n data['V_mag'] = 1\n assert np.all(data['V_mag'] == 1)\n\n for container in (list, tuple, np.array):\n data['V_mag'] = container([1, 2, 3])\n assert np.array_equal(data['V_mag'], np.array([1, 2, 3]))\n\n def test_array_slicing_readonly(self):\n \"\"\"\n Like test_array_slicing but with the file opened in 'readonly' mode.\n Regression test for a crash when slicing readonly memmap'd tables.\n \"\"\"\n\n with fits.open(self.data('table.fits'), mode='readonly') as f:\n data = f[1].data\n s1 = data[data['target'] == 'NGC1001']\n s2 = data[np.where(data['target'] == 'NGC1001')]\n s3 = data[[0]]\n s4 = data[:1]\n for s in [s1, s2, s3, s4]:\n assert isinstance(s, fits.FITS_rec)\n assert comparerecords(s1, s2)\n assert comparerecords(s2, s3)\n assert comparerecords(s3, s4)\n\n def test_dump_load_round_trip(self):\n \"\"\"\n A simple test of the dump/load methods; dump the data, column, and\n header files and try to reload the table from them.\n \"\"\"\n\n hdul = fits.open(self.data('table.fits'))\n tbhdu = hdul[1]\n datafile = self.temp('data.txt')\n cdfile = self.temp('coldefs.txt')\n hfile = self.temp('header.txt')\n\n tbhdu.dump(datafile, cdfile, hfile)\n\n new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)\n\n assert comparerecords(tbhdu.data, new_tbhdu.data)\n\n # Double check that the headers are equivalent\n assert str(tbhdu.header) == str(new_tbhdu.header)\n\n hdul.close()\n\n def test_dump_load_array_colums(self):\n \"\"\"\n Regression test for https://github.com/spacetelescope/PyFITS/issues/22\n\n Ensures that a table containing a multi-value array column can be\n dumped and loaded successfully.\n \"\"\"\n\n data = np.rec.array([('a', [1, 2, 3, 4], 0.1),\n ('b', [5, 6, 7, 8], 0.2)],\n formats='a1,4i4,f8')\n tbhdu = fits.BinTableHDU.from_columns(data)\n datafile = self.temp('data.txt')\n cdfile = self.temp('coldefs.txt')\n hfile = self.temp('header.txt')\n\n tbhdu.dump(datafile, cdfile, hfile)\n new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)\n assert comparerecords(tbhdu.data, new_tbhdu.data)\n assert str(tbhdu.header) == str(new_tbhdu.header)\n\n def test_load_guess_format(self):\n \"\"\"\n Tests loading a table dump with no supplied coldefs or header, so that\n the table format has to be guessed at. There is of course no exact\n science to this; the table that's produced simply uses sensible guesses\n for that format. Ideally this should never have to be used.\n \"\"\"\n\n # Create a table containing a variety of data types.\n a0 = np.array([False, True, False], dtype=bool)\n c0 = fits.Column(name='c0', format='L', array=a0)\n\n # Format X currently not supported by the format\n # a1 = np.array([[0], [1], [0]], dtype=np.uint8)\n # c1 = fits.Column(name='c1', format='X', array=a1)\n\n a2 = np.array([1, 128, 255], dtype=np.uint8)\n c2 = fits.Column(name='c2', format='B', array=a2)\n a3 = np.array([-30000, 1, 256], dtype=np.int16)\n c3 = fits.Column(name='c3', format='I', array=a3)\n a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)\n c4 = fits.Column(name='c4', format='J', array=a4)\n a5 = np.array(['a', 'abc', 'ab'])\n c5 = fits.Column(name='c5', format='A3', array=a5)\n a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)\n c6 = fits.Column(name='c6', format='D', array=a6)\n a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j],\n dtype=np.complex128)\n c7 = fits.Column(name='c7', format='M', array=a7)\n a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)\n c8 = fits.Column(name='c8', format='PJ()', array=a8)\n\n tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])\n\n datafile = self.temp('data.txt')\n tbhdu.dump(datafile)\n\n new_tbhdu = fits.BinTableHDU.load(datafile)\n\n # In this particular case the record data at least should be equivalent\n assert comparerecords(tbhdu.data, new_tbhdu.data)\n\n def test_attribute_field_shadowing(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86\n\n Numpy recarray objects have a poorly-considered feature of allowing\n field access by attribute lookup. However, if a field name conincides\n with an existing attribute/method of the array, the existing name takes\n precence (making the attribute-based field lookup completely unreliable\n in general cases).\n\n This ensures that any FITS_rec attributes still work correctly even\n when there is a field with the same name as that attribute.\n \"\"\"\n\n c1 = fits.Column(name='names', format='I', array=[1])\n c2 = fits.Column(name='formats', format='I', array=[2])\n c3 = fits.Column(name='other', format='I', array=[3])\n\n t = fits.BinTableHDU.from_columns([c1, c2, c3])\n assert t.data.names == ['names', 'formats', 'other']\n assert t.data.formats == ['I'] * 3\n assert (t.data['names'] == [1]).all()\n assert (t.data['formats'] == [2]).all()\n assert (t.data.other == [3]).all()\n\n def test_table_from_bool_fields(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113\n\n Tests creating a table from a recarray containing numpy.bool columns.\n \"\"\"\n\n array = np.rec.array([(True, False), (False, True)], formats='|b1,|b1')\n thdu = fits.BinTableHDU.from_columns(array)\n assert thdu.columns.formats == ['L', 'L']\n assert comparerecords(thdu.data, array)\n\n # Test round trip\n thdu.writeto(self.temp('table.fits'))\n data = fits.getdata(self.temp('table.fits'), ext=1)\n assert thdu.columns.formats == ['L', 'L']\n assert comparerecords(data, array)\n\n def test_table_from_bool_fields2(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215\n\n Tests the case where a multi-field ndarray (not a recarray) containing\n a bool field is used to initialize a `BinTableHDU`.\n \"\"\"\n\n arr = np.array([(False,), (True,), (False,)], dtype=[('a', '?')])\n hdu = fits.BinTableHDU(data=arr)\n assert (hdu.data['a'] == arr['a']).all()\n\n def test_bool_column_update(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139\"\"\"\n\n c1 = fits.Column('F1', 'L', array=[True, False])\n c2 = fits.Column('F2', 'L', array=[False, True])\n thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))\n thdu.writeto(self.temp('table.fits'))\n\n with fits.open(self.temp('table.fits'), mode='update') as hdul:\n hdul[1].data['F1'][1] = True\n hdul[1].data['F2'][0] = True\n\n with fits.open(self.temp('table.fits')) as hdul:\n assert (hdul[1].data['F1'] == [True, True]).all()\n assert (hdul[1].data['F2'] == [True, True]).all()\n\n def test_missing_tnull(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197\"\"\"\n\n c = fits.Column('F1', 'A3', null='---',\n array=np.array(['1.0', '2.0', '---', '3.0']),\n ascii=True)\n table = fits.TableHDU.from_columns([c])\n table.writeto(self.temp('test.fits'))\n\n # Now let's delete the TNULL1 keyword, making this essentially\n # unreadable\n with fits.open(self.temp('test.fits'), mode='update') as h:\n h[1].header['TFORM1'] = 'E3'\n del h[1].header['TNULL1']\n\n with fits.open(self.temp('test.fits')) as h:\n pytest.raises(ValueError, lambda: h[1].data['F1'])\n\n try:\n with fits.open(self.temp('test.fits')) as h:\n h[1].data['F1']\n except ValueError as e:\n assert str(e).endswith(\n \"the header may be missing the necessary TNULL1 \"\n \"keyword or the table contains invalid data\")\n\n def test_blank_field_zero(self):\n \"\"\"Regression test for https://github.com/astropy/astropy/issues/5134\n\n Blank values in numerical columns of ASCII tables should be replaced\n with zeros, so they can be loaded into numpy arrays.\n\n When a TNULL value is set and there are blank fields not equal to that\n value, they should be replaced with zeros.\n \"\"\"\n\n # Test an integer column with blank string as null\n nullval1 = u' '\n\n c1 = fits.Column('F1', format='I8', null=nullval1,\n array=np.array([0, 1, 2, 3, 4]),\n ascii=True)\n table = fits.TableHDU.from_columns([c1])\n table.writeto(self.temp('ascii_null.fits'))\n\n # Replace the 1st col, 3rd row, with a null field.\n with open(self.temp('ascii_null.fits'), mode='r+') as h:\n nulled = h.read().replace(u'2 ', u' ')\n h.seek(0)\n h.write(nulled)\n\n with fits.open(self.temp('ascii_null.fits'), memmap=True) as f:\n assert f[1].data[2][0] == 0\n\n # Test a float column with a null value set and blank fields.\n nullval2 = 'NaN'\n c2 = fits.Column('F1', format='F12.8', null=nullval2,\n array=np.array([1.0, 2.0, 3.0, 4.0]),\n ascii=True)\n table = fits.TableHDU.from_columns([c2])\n table.writeto(self.temp('ascii_null2.fits'))\n\n # Replace the 1st col, 3rd row, with a null field.\n with open(self.temp('ascii_null2.fits'), mode='r+') as h:\n nulled = h.read().replace(u'3.00000000', u' ')\n h.seek(0)\n h.write(nulled)\n\n with fits.open(self.temp('ascii_null2.fits'), memmap=True) as f:\n # (Currently it should evaluate to 0.0, but if a TODO in fitsrec is\n # completed, then it should evaluate to NaN.)\n assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])\n\n def test_column_array_type_mismatch(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218\"\"\"\n\n arr = [-99] * 20\n col = fits.Column('mag', format='E', array=arr)\n assert (arr == col.array).all()\n\n def test_table_none(self):\n \"\"\"Regression test\n for https://github.com/spacetelescope/PyFITS/issues/27\n \"\"\"\n\n with fits.open(self.data('tb.fits')) as h:\n h[1].data\n h[1].data = None\n assert isinstance(h[1].data, fits.FITS_rec)\n assert len(h[1].data) == 0\n h[1].writeto(self.temp('test.fits'))\n\n with fits.open(self.temp('test.fits')) as h:\n assert h[1].header['NAXIS'] == 2\n assert h[1].header['NAXIS1'] == 12\n assert h[1].header['NAXIS2'] == 0\n assert isinstance(h[1].data, fits.FITS_rec)\n assert len(h[1].data) == 0\n\n def test_unncessary_table_load(self):\n \"\"\"Test unnecessary parsing and processing of FITS tables when writing\n direclty from one FITS file to a new file without first reading the\n data for user manipulation.\n\n In other words, it should be possible to do a direct copy of the raw\n data without unecessary processing of the data.\n \"\"\"\n\n with fits.open(self.data('table.fits')) as h:\n h[1].writeto(self.temp('test.fits'))\n\n # Since this was a direct copy the h[1].data attribute should not have\n # even been accessed (since this means the data was read and parsed)\n assert 'data' not in h[1].__dict__\n\n with fits.open(self.data('table.fits')) as h1:\n with fits.open(self.temp('test.fits')) as h2:\n assert str(h1[1].header) == str(h2[1].header)\n assert comparerecords(h1[1].data, h2[1].data)\n\n def test_table_from_columns_of_other_table(self):\n \"\"\"Tests a rare corner case where the columns of an existing table\n are used to create a new table with the new_table function. In this\n specific case, however, the existing table's data has not been read\n yet, so new_table has to get at it through the Delayed proxy.\n\n Note: Although this previously tested new_table it now uses\n BinTableHDU.from_columns directly, around which new_table is a mere\n wrapper.\n \"\"\"\n\n hdul = fits.open(self.data('table.fits'))\n\n # Make sure the column array is in fact delayed...\n assert isinstance(hdul[1].columns._arrays[0], Delayed)\n\n # Create a new table...\n t = fits.BinTableHDU.from_columns(hdul[1].columns)\n\n # The original columns should no longer be delayed...\n assert not isinstance(hdul[1].columns._arrays[0], Delayed)\n\n t.writeto(self.temp('test.fits'))\n\n with fits.open(self.temp('test.fits')) as hdul2:\n assert comparerecords(hdul[1].data, hdul2[1].data)\n\n hdul.close()\n\n def test_bintable_to_asciitable(self):\n \"\"\"Tests initializing a TableHDU with the data from a BinTableHDU.\"\"\"\n\n with fits.open(self.data('tb.fits')) as hdul:\n tbdata = hdul[1].data\n tbhdu = fits.TableHDU(data=tbdata)\n with ignore_warnings():\n tbhdu.writeto(self.temp('test.fits'), overwrite=True)\n with fits.open(self.temp('test.fits')) as hdul2:\n tbdata2 = hdul2[1].data\n assert np.all(tbdata['c1'] == tbdata2['c1'])\n assert np.all(tbdata['c2'] == tbdata2['c2'])\n # c3 gets converted from float32 to float64 when writing\n # test.fits, so cast to float32 before testing that the correct\n # value is retrieved\n assert np.all(tbdata['c3'].astype(np.float32) ==\n tbdata2['c3'].astype(np.float32))\n # c4 is a boolean column in the original table; we want ASCII\n # columns to convert these to columns of 'T'/'F' strings\n assert np.all(np.where(tbdata['c4'], 'T', 'F') ==\n tbdata2['c4'])\n\n def test_pickle(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/1597\n\n Tests for pickling FITS_rec objects\n \"\"\"\n\n # open existing FITS tables (images pickle by default, no test needed):\n with fits.open(self.data('tb.fits')) as btb:\n # Test column array is delayed and can pickle\n assert isinstance(btb[1].columns._arrays[0], Delayed)\n\n btb_pd = pickle.dumps(btb[1].data)\n btb_pl = pickle.loads(btb_pd)\n\n # It should not be delayed any more\n assert not isinstance(btb[1].columns._arrays[0], Delayed)\n\n assert comparerecords(btb_pl, btb[1].data)\n\n with fits.open(self.data('ascii.fits')) as asc:\n asc_pd = pickle.dumps(asc[1].data)\n asc_pl = pickle.loads(asc_pd)\n assert comparerecords(asc_pl, asc[1].data)\n\n with fits.open(self.data('random_groups.fits')) as rgr:\n rgr_pd = pickle.dumps(rgr[0].data)\n rgr_pl = pickle.loads(rgr_pd)\n assert comparerecords(rgr_pl, rgr[0].data)\n\n with fits.open(self.data('zerowidth.fits')) as zwc:\n # Doesn't pickle zero-width (_phanotm) column 'ORBPARM'\n with ignore_warnings():\n zwc_pd = pickle.dumps(zwc[2].data)\n zwc_pl = pickle.loads(zwc_pd)\n assert comparerecords(zwc_pl, zwc[2].data)\n\n def test_zero_length_table(self):\n array = np.array([], dtype=[\n ('a', 'i8'),\n ('b', 'S64'),\n ('c', ('i4', (3, 2)))])\n hdu = fits.BinTableHDU(array)\n assert hdu.header['NAXIS1'] == 96\n assert hdu.header['NAXIS2'] == 0\n assert hdu.header['TDIM3'] == '(2,3)'\n\n field = hdu.data.field(1)\n assert field.shape == (0,)\n\n def test_dim_column_byte_order_mismatch(self):\n \"\"\"\n When creating a table column with non-trivial TDIMn, and\n big-endian array data read from an existing FITS file, the data\n should not be unnecessarily byteswapped.\n\n Regression test for https://github.com/astropy/astropy/issues/3561\n \"\"\"\n\n data = fits.getdata(self.data('random_groups.fits'))['DATA']\n col = fits.Column(name='TEST', array=data, dim='(3,1,128,1,1)',\n format='1152E')\n thdu = fits.BinTableHDU.from_columns([col])\n thdu.writeto(self.temp('test.fits'))\n\n with fits.open(self.temp('test.fits')) as hdul:\n assert np.all(hdul[1].data['TEST'] == data)\n\n def test_fits_rec_from_existing(self):\n \"\"\"\n Tests creating a `FITS_rec` object with `FITS_rec.from_columns`\n from an existing `FITS_rec` object read from a FITS file.\n\n This ensures that the per-column arrays are updated properly.\n\n Regression test for https://github.com/spacetelescope/PyFITS/issues/99\n \"\"\"\n\n # The use case that revealed this problem was trying to create a new\n # table from an existing table, but with additional rows so that we can\n # append data from a second table (with the same column structure)\n\n data1 = fits.getdata(self.data('tb.fits'))\n data2 = fits.getdata(self.data('tb.fits'))\n nrows = len(data1) + len(data2)\n\n merged = fits.FITS_rec.from_columns(data1, nrows=nrows)\n merged[len(data1):] = data2\n mask = merged['c1'] > 1\n masked = merged[mask]\n\n # The test table only has two rows, only the second of which is > 1 for\n # the 'c1' column\n assert comparerecords(data1[1:], masked[:1])\n assert comparerecords(data1[1:], masked[1:])\n\n # Double check that the original data1 table hasn't been affected by\n # its use in creating the \"merged\" table\n assert comparerecords(data1, fits.getdata(self.data('tb.fits')))\n\n def test_update_string_column_inplace(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/4452\n\n Ensure that changes to values in a string column are saved when\n a file is opened in ``mode='update'``.\n \"\"\"\n\n data = np.array([('abc',)], dtype=[('a', 'S3')])\n fits.writeto(self.temp('test.fits'), data)\n\n with fits.open(self.temp('test.fits'), mode='update') as hdul:\n hdul[1].data['a'][0] = 'XYZ'\n assert hdul[1].data['a'][0] == 'XYZ'\n\n with fits.open(self.temp('test.fits')) as hdul:\n assert hdul[1].data['a'][0] == 'XYZ'\n\n # Test update but with a non-trivial TDIMn\n data = np.array([([['abc', 'def', 'geh'],\n ['ijk', 'lmn', 'opq']],)],\n dtype=[('a', ('S3', (2, 3)))])\n\n fits.writeto(self.temp('test2.fits'), data)\n\n expected = [['abc', 'def', 'geh'],\n ['ijk', 'XYZ', 'opq']]\n\n with fits.open(self.temp('test2.fits'), mode='update') as hdul:\n assert hdul[1].header['TDIM1'] == '(3,3,2)'\n # Note: Previously I wrote data['a'][0][1, 1] to address\n # the single row. However, this is broken for chararray because\n # data['a'][0] does *not* return a view of the original array--this\n # is a bug in chararray though and not a bug in any FITS-specific\n # code so we'll roll with it for now...\n # (by the way the bug in question is fixed in newer Numpy versions)\n hdul[1].data['a'][0, 1, 1] = 'XYZ'\n assert np.all(hdul[1].data['a'][0] == expected)\n\n with fits.open(self.temp('test2.fits')) as hdul:\n assert hdul[1].header['TDIM1'] == '(3,3,2)'\n assert np.all(hdul[1].data['a'][0] == expected)\n\n @pytest.mark.skipif(str('not HAVE_OBJGRAPH'))\n def test_reference_leak(self):\n \"\"\"Regression test for https://github.com/astropy/astropy/pull/520\"\"\"\n\n def readfile(filename):\n with fits.open(filename) as hdul:\n data = hdul[1].data.copy()\n\n for colname in data.dtype.names:\n data[colname]\n\n with _refcounting('FITS_rec'):\n readfile(self.data('memtest.fits'))\n\n @pytest.mark.skipif(str('not HAVE_OBJGRAPH'))\n def test_reference_leak2(self, tmpdir):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/pull/4539\n\n This actually re-runs a small set of tests that I found, during\n careful testing, exhibited the reference leaks fixed by #4539, but\n now with reference counting around each test to ensure that the\n leaks are fixed.\n \"\"\"\n\n from .test_core import TestCore\n from .test_connect import TestMultipleHDU\n\n t1 = TestCore()\n t1.setup()\n try:\n with _refcounting('FITS_rec'):\n t1.test_add_del_columns2()\n finally:\n t1.teardown()\n del t1\n\n t2 = self.__class__()\n for test_name in ['test_recarray_to_bintablehdu',\n 'test_numpy_ndarray_to_bintablehdu',\n 'test_new_table_from_recarray',\n 'test_new_fitsrec']:\n t2.setup()\n try:\n with _refcounting('FITS_rec'):\n getattr(t2, test_name)()\n finally:\n t2.teardown()\n del t2\n\n t3 = TestMultipleHDU()\n t3.setup_class()\n try:\n with _refcounting('FITS_rec'):\n t3.test_read(tmpdir)\n finally:\n t3.teardown_class()\n del t3\n\n def test_dump_clobber_vs_overwrite(self):\n with fits.open(self.data('table.fits')) as hdul:\n tbhdu = hdul[1]\n datafile = self.temp('data.txt')\n cdfile = self.temp('coldefs.txt')\n hfile = self.temp('header.txt')\n tbhdu.dump(datafile, cdfile, hfile)\n tbhdu.dump(datafile, cdfile, hfile, overwrite=True)\n with catch_warnings(AstropyDeprecationWarning) as warning_lines:\n tbhdu.dump(datafile, cdfile, hfile, clobber=True)\n assert warning_lines[0].category == AstropyDeprecationWarning\n assert (str(warning_lines[0].message) == '\"clobber\" was '\n 'deprecated in version 2.0 and will be removed in a '\n 'future version. Use argument \"overwrite\" instead.')\n\n def test_pseudo_unsigned_ints(self):\n \"\"\"\n Tests updating a table column containing pseudo-unsigned ints.\n \"\"\"\n\n data = np.array([1, 2, 3], dtype=np.uint32)\n col = fits.Column(name='A', format='1J', bzero=2**31, array=data)\n thdu = fits.BinTableHDU.from_columns([col])\n thdu.writeto(self.temp('test.fits'))\n\n # Test that the file wrote out correctly\n with fits.open(self.temp('test.fits'), uint=True) as hdul:\n hdu = hdul[1]\n assert 'TZERO1' in hdu.header\n assert hdu.header['TZERO1'] == 2**31\n assert hdu.data['A'].dtype == np.dtype('uint32')\n assert np.all(hdu.data['A'] == data)\n\n # Test updating the unsigned int data\n hdu.data['A'][0] = 99\n hdu.writeto(self.temp('test2.fits'))\n\n with fits.open(self.temp('test2.fits'), uint=True) as hdul:\n hdu = hdul[1]\n assert 'TZERO1' in hdu.header\n assert hdu.header['TZERO1'] == 2**31\n assert hdu.data['A'].dtype == np.dtype('uint32')\n assert np.all(hdu.data['A'] == [99, 2, 3])\n\n def test_column_with_scaling(self):\n \"\"\"Check that a scaled column if correctly saved once it is modified.\n Regression test for https://github.com/astropy/astropy/issues/6887\n \"\"\"\n c1 = fits.Column(name='c1', array=np.array([1], dtype='>i2'),\n format='1I', bscale=1, bzero=32768)\n S = fits.HDUList([fits.PrimaryHDU(),\n fits.BinTableHDU.from_columns([c1])])\n\n # Change value in memory\n S[1].data['c1'][0] = 2\n S.writeto(self.temp(\"a.fits\"))\n assert S[1].data['c1'] == 2\n\n # Read and change value in memory\n with fits.open(self.temp(\"a.fits\")) as X:\n X[1].data['c1'][0] = 10\n assert X[1].data['c1'][0] == 10\n\n # Write back to file\n X.writeto(self.temp(\"b.fits\"))\n\n # Now check the file\n with fits.open(self.temp(\"b.fits\")) as hdul:\n assert hdul[1].data['c1'][0] == 10\n\n\n@contextlib.contextmanager\ndef _refcounting(type_):\n \"\"\"\n Perform the body of a with statement with reference counting for the\n given type (given by class name)--raises an assertion error if there\n are more unfreed objects of the given type than when we entered the\n with statement.\n \"\"\"\n\n gc.collect()\n refcount = len(objgraph.by_type(type_))\n yield refcount\n gc.collect()\n assert len(objgraph.by_type(type_)) <= refcount, \\\n \"More {0!r} objects still in memory than before.\"\n\n\nclass TestVLATables(FitsTestCase):\n \"\"\"Tests specific to tables containing variable-length arrays.\"\"\"\n\n def test_variable_length_columns(self):\n def test(format_code):\n col = fits.Column(name='QUAL_SPE', format=format_code,\n array=[[0] * 1571] * 225)\n tb_hdu = fits.BinTableHDU.from_columns([col])\n pri_hdu = fits.PrimaryHDU()\n hdu_list = fits.HDUList([pri_hdu, tb_hdu])\n with ignore_warnings():\n hdu_list.writeto(self.temp('toto.fits'), overwrite=True)\n\n with fits.open(self.temp('toto.fits')) as toto:\n q = toto[1].data.field('QUAL_SPE')\n assert (q[0][4:8] ==\n np.array([0, 0, 0, 0], dtype=np.uint8)).all()\n assert toto[1].columns[0].format.endswith('J(1571)')\n\n for code in ('PJ()', 'QJ()'):\n test(code)\n\n def test_extend_variable_length_array(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54\"\"\"\n\n def test(format_code):\n arr = [[1] * 10] * 10\n col1 = fits.Column(name='TESTVLF', format=format_code, array=arr)\n col2 = fits.Column(name='TESTSCA', format='J', array=[1] * 10)\n tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)\n # This asserts that the normal 'scalar' column's length was extended\n assert len(tb_hdu.data['TESTSCA']) == 15\n # And this asserts that the VLF column was extended in the same manner\n assert len(tb_hdu.data['TESTVLF']) == 15\n # We can't compare the whole array since the _VLF is an array of\n # objects, but comparing just the edge case rows should suffice\n assert (tb_hdu.data['TESTVLF'][0] == arr[0]).all()\n assert (tb_hdu.data['TESTVLF'][9] == arr[9]).all()\n assert (tb_hdu.data['TESTVLF'][10] == ([0] * 10)).all()\n assert (tb_hdu.data['TESTVLF'][-1] == ([0] * 10)).all()\n\n for code in ('PJ()', 'QJ()'):\n test(code)\n\n def test_variable_length_table_format_pd_from_object_array(self):\n def test(format_code):\n a = np.array([np.array([7.2e-20, 7.3e-20]), np.array([0.0]),\n np.array([0.0])], 'O')\n acol = fits.Column(name='testa', format=format_code, array=a)\n tbhdu = fits.BinTableHDU.from_columns([acol])\n with ignore_warnings():\n tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)\n with fits.open(self.temp('newtable.fits')) as tbhdu1:\n assert tbhdu1[1].columns[0].format.endswith('D(2)')\n for j in range(3):\n for i in range(len(a[j])):\n assert tbhdu1[1].data.field(0)[j][i] == a[j][i]\n\n for code in ('PD()', 'QD()'):\n test(code)\n\n def test_variable_length_table_format_pd_from_list(self):\n def test(format_code):\n a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]),\n np.array([0.0])]\n acol = fits.Column(name='testa', format=format_code, array=a)\n tbhdu = fits.BinTableHDU.from_columns([acol])\n with ignore_warnings():\n tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)\n\n with fits.open(self.temp('newtable.fits')) as tbhdu1:\n assert tbhdu1[1].columns[0].format.endswith('D(2)')\n for j in range(3):\n for i in range(len(a[j])):\n assert tbhdu1[1].data.field(0)[j][i] == a[j][i]\n\n for code in ('PD()', 'QD()'):\n test(code)\n\n def test_variable_length_table_format_pa_from_object_array(self):\n def test(format_code):\n a = np.array([np.array(['a', 'b', 'c']), np.array(['d', 'e']),\n np.array(['f'])], 'O')\n acol = fits.Column(name='testa', format=format_code, array=a)\n tbhdu = fits.BinTableHDU.from_columns([acol])\n with ignore_warnings():\n tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)\n\n with fits.open(self.temp('newtable.fits')) as hdul:\n assert hdul[1].columns[0].format.endswith('A(3)')\n for j in range(3):\n for i in range(len(a[j])):\n assert hdul[1].data.field(0)[j][i] == a[j][i]\n\n for code in ('PA()', 'QA()'):\n test(code)\n\n def test_variable_length_table_format_pa_from_list(self):\n def test(format_code):\n a = ['a', 'ab', 'abc']\n acol = fits.Column(name='testa', format=format_code, array=a)\n tbhdu = fits.BinTableHDU.from_columns([acol])\n with ignore_warnings():\n tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)\n\n with fits.open(self.temp('newtable.fits')) as hdul:\n assert hdul[1].columns[0].format.endswith('A(3)')\n for j in range(3):\n for i in range(len(a[j])):\n assert hdul[1].data.field(0)[j][i] == a[j][i]\n\n for code in ('PA()', 'QA()'):\n test(code)\n\n def test_getdata_vla(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200\"\"\"\n\n def test(format_code):\n col = fits.Column(name='QUAL_SPE', format=format_code,\n array=[np.arange(1572)] * 225)\n tb_hdu = fits.BinTableHDU.from_columns([col])\n pri_hdu = fits.PrimaryHDU()\n hdu_list = fits.HDUList([pri_hdu, tb_hdu])\n with ignore_warnings():\n hdu_list.writeto(self.temp('toto.fits'), overwrite=True)\n\n data = fits.getdata(self.temp('toto.fits'))\n\n # Need to compare to the original data row by row since the FITS_rec\n # returns an array of _VLA objects\n for row_a, row_b in zip(data['QUAL_SPE'], col.array):\n assert (row_a == row_b).all()\n\n for code in ('PJ()', 'QJ()'):\n test(code)\n\n def test_copy_vla(self):\n \"\"\"\n Regression test for https://github.com/spacetelescope/PyFITS/issues/47\n \"\"\"\n\n # Make a file containing a couple of VLA tables\n arr1 = [np.arange(n + 1) for n in range(255)]\n arr2 = [np.arange(255, 256 + n) for n in range(255)]\n\n # A dummy non-VLA column needed to reproduce issue #47\n c = fits.Column('test', format='J', array=np.arange(255))\n c1 = fits.Column('A', format='PJ', array=arr1)\n c2 = fits.Column('B', format='PJ', array=arr2)\n t1 = fits.BinTableHDU.from_columns([c, c1])\n t2 = fits.BinTableHDU.from_columns([c, c2])\n\n hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])\n hdul.writeto(self.temp('test.fits'), overwrite=True)\n\n # Just test that the test file wrote out correctly\n with fits.open(self.temp('test.fits')) as h:\n assert h[1].header['TFORM2'] == 'PJ(255)'\n assert h[2].header['TFORM2'] == 'PJ(255)'\n assert comparerecords(h[1].data, t1.data)\n assert comparerecords(h[2].data, t2.data)\n\n # Try copying the second VLA and writing to a new file\n with fits.open(self.temp('test.fits')) as h:\n new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)\n new_hdu.writeto(self.temp('test3.fits'))\n\n with fits.open(self.temp('test3.fits')) as h2:\n assert comparerecords(h2[1].data, t2.data)\n\n new_hdul = fits.HDUList([fits.PrimaryHDU()])\n new_hdul.writeto(self.temp('test2.fits'))\n\n # Open several copies of the test file and append copies of the second\n # VLA table\n with fits.open(self.temp('test2.fits'), mode='append') as new_hdul:\n for _ in range(2):\n with fits.open(self.temp('test.fits')) as h:\n new_hdul.append(h[2])\n new_hdul.flush()\n\n # Test that all the VLA copies wrote correctly\n with fits.open(self.temp('test2.fits')) as new_hdul:\n for idx in range(1, 3):\n assert comparerecords(new_hdul[idx].data, t2.data)\n\n\n# These are tests that solely test the Column and ColDefs interfaces and\n# related functionality without directly involving full tables; currently there\n# are few of these but I expect there to be more as I improve the test coverage\nclass TestColumnFunctions(FitsTestCase):\n def test_column_format_interpretation(self):\n \"\"\"\n Test to ensure that when Numpy-style record formats are passed in to\n the Column constructor for the format argument, they are recognized so\n long as it's unambiguous (where \"unambiguous\" here is questionable\n since Numpy is case insensitive when parsing the format codes. But\n their \"proper\" case is lower-case, so we can accept that. Basically,\n actually, any key in the NUMPY2FITS dict should be accepted.\n \"\"\"\n\n for recformat, fitsformat in NUMPY2FITS.items():\n c = fits.Column('TEST', np.dtype(recformat))\n c.format == fitsformat\n c = fits.Column('TEST', recformat)\n c.format == fitsformat\n c = fits.Column('TEST', fitsformat)\n c.format == fitsformat\n\n # Test a few cases that are ambiguous in that they *are* valid binary\n # table formats though not ones that are likely to be used, but are\n # also valid common ASCII table formats\n c = fits.Column('TEST', 'I4')\n assert c.format == 'I4'\n assert c.format.format == 'I'\n assert c.format.width == 4\n\n c = fits.Column('TEST', 'F15.8')\n assert c.format == 'F15.8'\n assert c.format.format == 'F'\n assert c.format.width == 15\n assert c.format.precision == 8\n\n c = fits.Column('TEST', 'E15.8')\n assert c.format.format == 'E'\n assert c.format.width == 15\n assert c.format.precision == 8\n\n c = fits.Column('TEST', 'D15.8')\n assert c.format.format == 'D'\n assert c.format.width == 15\n assert c.format.precision == 8\n\n # zero-precision should be allowed as well, for float types\n # https://github.com/astropy/astropy/issues/3422\n c = fits.Column('TEST', 'F10.0')\n assert c.format.format == 'F'\n assert c.format.width == 10\n assert c.format.precision == 0\n\n c = fits.Column('TEST', 'E10.0')\n assert c.format.format == 'E'\n assert c.format.width == 10\n assert c.format.precision == 0\n\n c = fits.Column('TEST', 'D10.0')\n assert c.format.format == 'D'\n assert c.format.width == 10\n assert c.format.precision == 0\n\n # These are a couple cases where the format code is a valid binary\n # table format, and is not strictly a valid ASCII table format but\n # could be *interpreted* as one by appending a default width. This\n # will only happen either when creating an ASCII table or when\n # explicitly specifying ascii=True when the column is created\n c = fits.Column('TEST', 'I')\n assert c.format == 'I'\n assert c.format.recformat == 'i2'\n c = fits.Column('TEST', 'I', ascii=True)\n assert c.format == 'I10'\n\n c = fits.Column('TEST', 'E')\n assert c.format == 'E'\n assert c.format.recformat == 'f4'\n c = fits.Column('TEST', 'E', ascii=True)\n assert c.format == 'E15.7'\n\n # F is not a valid binary table format so it should be unambiguously\n # treated as an ASCII column\n c = fits.Column('TEST', 'F')\n assert c.format == 'F16.7'\n\n c = fits.Column('TEST', 'D')\n assert c.format == 'D'\n assert c.format.recformat == 'f8'\n c = fits.Column('TEST', 'D', ascii=True)\n assert c.format == 'D25.17'\n\n def test_zero_precision_float_column(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/3422\n \"\"\"\n\n c = fits.Column('TEST', 'F5.0', array=[1.1, 2.2, 3.3])\n # The decimal places will be clipped\n t = fits.TableHDU.from_columns([c])\n t.writeto(self.temp('test.fits'))\n\n with fits.open(self.temp('test.fits')) as hdul:\n assert hdul[1].header['TFORM1'] == 'F5.0'\n assert hdul[1].data['TEST'].dtype == np.dtype('float64')\n assert np.all(hdul[1].data['TEST'] == [1.0, 2.0, 3.0])\n\n # Check how the raw data looks\n raw = np.rec.recarray.field(hdul[1].data, 'TEST')\n assert raw.tostring() == b' 1. 2. 3.'\n\n def test_column_array_type_mismatch(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218\"\"\"\n\n arr = [-99] * 20\n col = fits.Column('mag', format='E', array=arr)\n assert (arr == col.array).all()\n\n def test_new_coldefs_with_invalid_seqence(self):\n \"\"\"Test that a TypeError is raised when a ColDefs is instantiated with\n a sequence of non-Column objects.\n \"\"\"\n\n pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])\n\n def test_coldefs_init_from_array(self):\n \"\"\"Test that ColDefs._init_from_array works with single element data-\n types as well as multi-element data-types\n \"\"\"\n nd_array = np.ndarray((1,), dtype=[('A', '<u4', (2,)), ('B', '>u2')])\n col_defs = fits.column.ColDefs(nd_array)\n assert 2**31 == col_defs['A'].bzero\n assert 2**15 == col_defs['B'].bzero\n\n def test_pickle(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/1597\n\n Tests for pickling FITS_rec objects\n \"\"\"\n\n # open existing FITS tables (images pickle by default, no test needed):\n with fits.open(self.data('tb.fits')) as btb:\n # Test column array is delayed and can pickle\n assert isinstance(btb[1].columns._arrays[0], Delayed)\n\n btb_pd = pickle.dumps(btb[1].data)\n btb_pl = pickle.loads(btb_pd)\n\n # It should not be delayed any more\n assert not isinstance(btb[1].columns._arrays[0], Delayed)\n\n assert comparerecords(btb_pl, btb[1].data)\n\n with fits.open(self.data('ascii.fits')) as asc:\n asc_pd = pickle.dumps(asc[1].data)\n asc_pl = pickle.loads(asc_pd)\n assert comparerecords(asc_pl, asc[1].data)\n\n with fits.open(self.data('random_groups.fits')) as rgr:\n rgr_pd = pickle.dumps(rgr[0].data)\n rgr_pl = pickle.loads(rgr_pd)\n assert comparerecords(rgr_pl, rgr[0].data)\n\n with fits.open(self.data('zerowidth.fits')) as zwc:\n # Doesn't pickle zero-width (_phanotm) column 'ORBPARM'\n zwc_pd = pickle.dumps(zwc[2].data)\n zwc_pl = pickle.loads(zwc_pd)\n with pytest.warns(UserWarning, match='Field 2 has a repeat count '\n 'of 0 in its format code'):\n assert comparerecords(zwc_pl, zwc[2].data)\n\n def test_column_lookup_by_name(self):\n \"\"\"Tests that a `ColDefs` can be indexed by column name.\"\"\"\n\n a = fits.Column(name='a', format='D')\n b = fits.Column(name='b', format='D')\n\n cols = fits.ColDefs([a, b])\n\n assert cols['a'] == cols[0]\n assert cols['b'] == cols[1]\n\n def test_column_attribute_change_after_removal(self):\n \"\"\"\n This is a test of the column attribute change notification system.\n\n After a column has been removed from a table (but other references\n are kept to that same column) changes to that column's attributes\n should not trigger a notification on the table it was removed from.\n \"\"\"\n\n # One way we can check this is to ensure there are no further changes\n # to the header\n table = fits.BinTableHDU.from_columns([\n fits.Column('a', format='D'),\n fits.Column('b', format='D')])\n\n b = table.columns['b']\n\n table.columns.del_col('b')\n assert table.data.dtype.names == ('a',)\n\n b.name = 'HELLO'\n\n assert b.name == 'HELLO'\n assert 'TTYPE2' not in table.header\n assert table.header['TTYPE1'] == 'a'\n assert table.columns.names == ['a']\n\n with pytest.raises(KeyError):\n table.columns['b']\n\n # Make sure updates to the remaining column still work\n table.columns.change_name('a', 'GOODBYE')\n with pytest.raises(KeyError):\n table.columns['a']\n\n assert table.columns['GOODBYE'].name == 'GOODBYE'\n assert table.data.dtype.names == ('GOODBYE',)\n assert table.columns.names == ['GOODBYE']\n assert table.data.columns.names == ['GOODBYE']\n\n table.columns['GOODBYE'].name = 'foo'\n with pytest.raises(KeyError):\n table.columns['GOODBYE']\n\n assert table.columns['foo'].name == 'foo'\n assert table.data.dtype.names == ('foo',)\n assert table.columns.names == ['foo']\n assert table.data.columns.names == ['foo']\n\n def test_x_column_deepcopy(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/pull/4514\n\n Tests that columns with the X (bit array) format can be deep-copied.\n \"\"\"\n\n c = fits.Column('xcol', format='5X', array=[1, 0, 0, 1, 0])\n c2 = copy.deepcopy(c)\n assert c2.name == c.name\n assert c2.format == c.format\n assert np.all(c2.array == c.array)\n\n def test_p_column_deepcopy(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/pull/4514\n\n Tests that columns with the P/Q formats (variable length arrays) can be\n deep-copied.\n \"\"\"\n\n c = fits.Column('pcol', format='PJ', array=[[1, 2], [3, 4, 5]])\n c2 = copy.deepcopy(c)\n assert c2.name == c.name\n assert c2.format == c.format\n assert np.all(c2.array[0] == c.array[0])\n assert np.all(c2.array[1] == c.array[1])\n\n c3 = fits.Column('qcol', format='QJ', array=[[1, 2], [3, 4, 5]])\n c4 = copy.deepcopy(c3)\n assert c4.name == c3.name\n assert c4.format == c3.format\n assert np.all(c4.array[0] == c3.array[0])\n assert np.all(c4.array[1] == c3.array[1])\n\n def test_column_verify_keywords(self):\n \"\"\"\n Test that the keyword arguments used to initialize a Column, specifically\n those that typically read from a FITS header (so excluding array),\n are verified to have a valid value.\n \"\"\"\n\n with pytest.raises(AssertionError) as err:\n c = fits.Column(1, format='I', array=[1, 2, 3, 4, 5])\n assert 'Column name must be a string able to fit' in str(err.value)\n\n with pytest.raises(VerifyError) as err:\n c = fits.Column('col', format='I', null='Nan', disp=1, coord_type=1,\n coord_unit=2, coord_ref_point='1', coord_ref_value='1',\n coord_inc='1', time_ref_pos=1)\n err_msgs = ['keyword arguments to Column were invalid', 'TNULL', 'TDISP',\n 'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS']\n for msg in err_msgs:\n assert msg in str(err.value)\n\n def test_column_verify_start(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/pull/6359\n\n Test the validation of the column start position option (ASCII table only),\n corresponding to ``TBCOL`` keyword.\n Test whether the VerifyError message generated is the one with highest priority,\n i.e. the order of error messages to be displayed is maintained.\n \"\"\"\n\n with pytest.raises(VerifyError) as err:\n c = fits.Column('a', format='B', start='a', array=[1, 2, 3])\n assert \"start option (TBCOLn) is not allowed for binary table columns\" in str(err.value)\n\n with pytest.raises(VerifyError) as err:\n c = fits.Column('a', format='I', start='a', array=[1, 2, 3])\n assert \"start option (TBCOLn) must be a positive integer (got 'a').\" in str(err.value)\n\n with pytest.raises(VerifyError) as err:\n c = fits.Column('a', format='I', start='-56', array=[1, 2, 3])\n assert \"start option (TBCOLn) must be a positive integer (got -56).\" in str(err.value)\n\ndef test_regression_5383():\n\n # Regression test for an undefined variable\n\n x = np.array([1, 2, 3])\n col = fits.Column(name='a', array=x, format='E')\n hdu = fits.BinTableHDU.from_columns([col])\n del hdu._header['TTYPE1']\n hdu.columns[0].name = 'b'\n\n\ndef test_table_to_hdu():\n from astropy.table import Table\n table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],\n names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])\n table['a'].unit = 'm/s'\n table['b'].unit = 'not-a-unit'\n table.meta['foo'] = 'bar'\n\n with catch_warnings() as w:\n hdu = fits.BinTableHDU(table, header=fits.Header({'TEST': 1}))\n assert len(w) == 1\n assert str(w[0].message).startswith(\"'not-a-unit' did not parse as\"\n \" fits unit\")\n\n for name in 'abc':\n assert np.array_equal(table[name], hdu.data[name])\n\n # Check that TUNITn cards appear in the correct order\n # (https://github.com/astropy/astropy/pull/5720)\n assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')\n\n assert hdu.header['FOO'] == 'bar'\n assert hdu.header['TEST'] == 1\n\n\ndef test_regression_scalar_indexing():\n # Indexing a FITS_rec with a tuple that returns a scalar record\n # should work\n x = np.array([(1.0, 2), (3.0, 4)],\n dtype=[('x', float), ('y', int)]).view(fits.FITS_rec)\n x1a = x[1]\n # this should succeed.\n x1b = x[(1,)]\n # FITS_record does not define __eq__; so test elements.\n assert all(a == b for a, b in zip(x1a, x1b))\n\n\ndef test_new_column_attributes_preserved(tmpdir):\n\n # Regression test for https://github.com/astropy/astropy/issues/7145\n # This makes sure that for now we don't clear away keywords that have\n # newly been recognized (in Astropy 3.0) as special column attributes but\n # instead just warn that we might do so in future. The new keywords are:\n # TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS\n\n col = []\n col.append(fits.Column(name=\"TIME\", format=\"1E\", unit=\"s\"))\n col.append(fits.Column(name=\"RAWX\", format=\"1I\", unit=\"pixel\"))\n col.append(fits.Column(name=\"RAWY\", format=\"1I\"))\n cd = fits.ColDefs(col)\n\n hdr = fits.Header()\n\n # Keywords that will get ignored in favor of these in the data\n hdr['TUNIT1'] = 'pixel'\n hdr['TUNIT2'] = 'm'\n hdr['TUNIT3'] = 'm'\n\n # Keywords that were added in Astropy 3.0 that should eventually be\n # ignored and set on the data instead\n hdr['TCTYP2'] = 'RA---TAN'\n hdr['TCTYP3'] = 'ANGLE'\n hdr['TCRVL2'] = -999.0\n hdr['TCRVL3'] = -999.0\n hdr['TCRPX2'] = 1.0\n hdr['TCRPX3'] = 1.0\n hdr['TALEN2'] = 16384\n hdr['TALEN3'] = 1024\n hdr['TCUNI2'] = 'angstrom'\n hdr['TCUNI3'] = 'deg'\n\n # Other non-relevant keywords\n hdr['RA'] = 1.5\n hdr['DEC'] = 3.0\n\n with pytest.warns(AstropyDeprecationWarning) as warning_list:\n hdu = fits.BinTableHDU.from_columns(cd, hdr)\n assert str(warning_list[0].message).startswith(\"The following keywords are now recognized as special\")\n\n # First, check that special keywords such as TUNIT are ignored in the header\n # We may want to change that behavior in future, but this is the way it's\n # been for a while now.\n\n assert hdu.columns[0].unit == 's'\n assert hdu.columns[1].unit == 'pixel'\n assert hdu.columns[2].unit is None\n\n assert hdu.header['TUNIT1'] == 's'\n assert hdu.header['TUNIT2'] == 'pixel'\n assert 'TUNIT3' not in hdu.header # TUNIT3 was removed\n\n # Now, check that the new special keywords are actually still there\n # but weren't used to set the attributes on the data\n\n assert hdu.columns[0].coord_type is None\n assert hdu.columns[1].coord_type is None\n assert hdu.columns[2].coord_type is None\n\n assert 'TCTYP1' not in hdu.header\n assert hdu.header['TCTYP2'] == 'RA---TAN'\n assert hdu.header['TCTYP3'] == 'ANGLE'\n\n # Make sure that other keywords are still there\n\n assert hdu.header['RA'] == 1.5\n assert hdu.header['DEC'] == 3.0\n\n # Now we can write this HDU to a file and re-load. Re-loading *should*\n # cause the special column attribtues to be picked up (it's just that when a\n # header is manually specified, these values are ignored)\n\n filename = tmpdir.join('test.fits').strpath\n\n hdu.writeto(filename)\n\n # Make sure we don't emit a warning in this case\n with pytest.warns(None) as warning_list:\n with fits.open(filename) as hdul:\n hdu2 = hdul[1]\n assert len(warning_list) == 0\n\n # Check that column attributes are now correctly set\n\n assert hdu2.columns[0].unit == 's'\n assert hdu2.columns[1].unit == 'pixel'\n assert hdu2.columns[2].unit is None\n\n assert hdu2.header['TUNIT1'] == 's'\n assert hdu2.header['TUNIT2'] == 'pixel'\n assert 'TUNIT3' not in hdu2.header # TUNIT3 was removed\n\n # Now, check that the new special keywords are actually still there\n # but weren't used to set the attributes on the data\n\n assert hdu2.columns[0].coord_type is None\n assert hdu2.columns[1].coord_type == 'RA---TAN'\n assert hdu2.columns[2].coord_type == 'ANGLE'\n\n assert 'TCTYP1' not in hdu2.header\n assert hdu2.header['TCTYP2'] == 'RA---TAN'\n assert hdu2.header['TCTYP3'] == 'ANGLE'\n\n # Make sure that other keywords are still there\n\n assert hdu2.header['RA'] == 1.5\n assert hdu2.header['DEC'] == 3.0\n\n\ndef test_empty_table(tmpdir):\n ofile = str(tmpdir.join('emptytable.fits'))\n hdu = fits.BinTableHDU(header=None, data=None, name='TEST')\n hdu.writeto(ofile)\n\n with fits.open(ofile) as hdul:\n assert hdul['TEST'].data.size == 0\n\n ofile = str(tmpdir.join('emptytable.fits.gz'))\n hdu = fits.BinTableHDU(header=None, data=None, name='TEST')\n hdu.writeto(ofile, overwrite=True)\n\n with fits.open(ofile) as hdul:\n assert hdul['TEST'].data.size == 0\n",
"\nimport pytest\nimport numpy as np\nfrom urllib.error import HTTPError\n\nfrom astropy.time import Time\nfrom astropy import units as u\nfrom astropy.constants import c\nfrom astropy.coordinates.builtin_frames import GCRS\nfrom astropy.coordinates.earth import EarthLocation\nfrom astropy.coordinates.sky_coordinate import SkyCoord\nfrom astropy.coordinates.solar_system import (get_body, get_moon, BODY_NAME_TO_KERNEL_SPEC,\n _apparent_position_in_true_coordinates,\n get_body_barycentric, get_body_barycentric_posvel)\nfrom astropy.coordinates.funcs import get_sun\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.units import allclose as quantity_allclose\nfrom astropy.utils.data import download_file\n\ntry:\n import jplephem # pylint: disable=W0611\nexcept ImportError:\n HAS_JPLEPHEM = False\nelse:\n HAS_JPLEPHEM = True\n\ntry:\n from skyfield.api import load # pylint: disable=W0611\nexcept ImportError:\n HAS_SKYFIELD = False\nelse:\n HAS_SKYFIELD = True\n\nde432s_separation_tolerance_planets = 5*u.arcsec\nde432s_separation_tolerance_moon = 5*u.arcsec\nde432s_distance_tolerance = 20*u.km\n\nskyfield_angular_separation_tolerance = 1*u.arcsec\nskyfield_separation_tolerance = 10*u.km\n\n\n@pytest.mark.remote_data\n@pytest.mark.skipif(str('not HAS_SKYFIELD'))\ndef test_positions_skyfield():\n \"\"\"\n Test positions against those generated by skyfield.\n \"\"\"\n\n t = Time('1980-03-25 00:00')\n location = None\n\n # skyfield ephemeris\n planets = load('de421.bsp')\n ts = load.timescale()\n mercury, jupiter, moon = planets['mercury'], planets['jupiter barycenter'], planets['moon']\n earth = planets['earth']\n\n skyfield_t = ts.from_astropy(t)\n\n if location is not None:\n earth = earth.topos(latitude_degrees=location.lat.to_value(u.deg),\n longitude_degrees=location.lon.to_value(u.deg),\n elevation_m=location.height.to_value(u.m))\n\n skyfield_mercury = earth.at(skyfield_t).observe(mercury).apparent()\n skyfield_jupiter = earth.at(skyfield_t).observe(jupiter).apparent()\n skyfield_moon = earth.at(skyfield_t).observe(moon).apparent()\n\n if location is not None:\n obsgeoloc, obsgeovel = location.get_gcrs_posvel(t)\n frame = GCRS(obstime=t, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)\n else:\n frame = GCRS(obstime=t)\n\n ra, dec, dist = skyfield_mercury.radec(epoch='date')\n skyfield_mercury = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),\n frame=frame)\n ra, dec, dist = skyfield_jupiter.radec(epoch='date')\n skyfield_jupiter = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),\n frame=frame)\n ra, dec, dist = skyfield_moon.radec(epoch='date')\n skyfield_moon = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),\n frame=frame)\n\n moon_astropy = get_moon(t, location, ephemeris='de430')\n mercury_astropy = get_body('mercury', t, location, ephemeris='de430')\n jupiter_astropy = get_body('jupiter', t, location, ephemeris='de430')\n\n # convert to true equator and equinox\n jupiter_astropy = _apparent_position_in_true_coordinates(jupiter_astropy)\n mercury_astropy = _apparent_position_in_true_coordinates(mercury_astropy)\n moon_astropy = _apparent_position_in_true_coordinates(moon_astropy)\n\n assert (moon_astropy.separation(skyfield_moon) <\n skyfield_angular_separation_tolerance)\n assert (moon_astropy.separation_3d(skyfield_moon) < skyfield_separation_tolerance)\n\n assert (jupiter_astropy.separation(skyfield_jupiter) <\n skyfield_angular_separation_tolerance)\n assert (jupiter_astropy.separation_3d(skyfield_jupiter) <\n skyfield_separation_tolerance)\n\n assert (mercury_astropy.separation(skyfield_mercury) <\n skyfield_angular_separation_tolerance)\n assert (mercury_astropy.separation_3d(skyfield_mercury) <\n skyfield_separation_tolerance)\n\n\nclass TestPositionsGeocentric:\n \"\"\"\n Test positions against those generated by JPL Horizons accessed on\n 2016-03-28, with refraction turned on.\n \"\"\"\n\n def setup(self):\n self.t = Time('1980-03-25 00:00')\n self.frame = GCRS(obstime=self.t)\n # Results returned by JPL Horizons web interface\n self.horizons = {\n 'mercury': SkyCoord(ra='22h41m47.78s', dec='-08d29m32.0s',\n distance=c*6.323037*u.min, frame=self.frame),\n 'moon': SkyCoord(ra='07h32m02.62s', dec='+18d34m05.0s',\n distance=c*0.021921*u.min, frame=self.frame),\n 'jupiter': SkyCoord(ra='10h17m12.82s', dec='+12d02m57.0s',\n distance=c*37.694557*u.min, frame=self.frame),\n 'sun': SkyCoord(ra='00h16m31.00s', dec='+01d47m16.9s',\n distance=c*8.294858*u.min, frame=self.frame)}\n\n @pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'),\n (('mercury', 7.*u.arcsec, 1000*u.km),\n ('jupiter', 78.*u.arcsec, 76000*u.km),\n ('moon', 20.*u.arcsec, 80*u.km),\n ('sun', 5.*u.arcsec, 11.*u.km)))\n def test_erfa_planet(self, body, sep_tol, dist_tol):\n \"\"\"Test predictions using erfa/plan94.\n\n Accuracies are maximum deviations listed in erfa/plan94.c, for Jupiter and\n Mercury, and that quoted in Meeus \"Astronomical Algorithms\" (1998) for the Moon.\n \"\"\"\n astropy = get_body(body, self.t, ephemeris='builtin')\n horizons = self.horizons[body]\n\n # convert to true equator and equinox\n astropy = _apparent_position_in_true_coordinates(astropy)\n\n # Assert sky coordinates are close.\n assert astropy.separation(horizons) < sep_tol\n\n # Assert distances are close.\n assert_quantity_allclose(astropy.distance, horizons.distance,\n atol=dist_tol)\n\n @pytest.mark.remote_data\n @pytest.mark.skipif('not HAS_JPLEPHEM')\n @pytest.mark.parametrize('body', ('mercury', 'jupiter', 'sun'))\n def test_de432s_planet(self, body):\n astropy = get_body(body, self.t, ephemeris='de432s')\n horizons = self.horizons[body]\n\n # convert to true equator and equinox\n astropy = _apparent_position_in_true_coordinates(astropy)\n\n # Assert sky coordinates are close.\n assert (astropy.separation(horizons) <\n de432s_separation_tolerance_planets)\n\n # Assert distances are close.\n assert_quantity_allclose(astropy.distance, horizons.distance,\n atol=de432s_distance_tolerance)\n\n @pytest.mark.remote_data\n @pytest.mark.skipif('not HAS_JPLEPHEM')\n def test_de432s_moon(self):\n astropy = get_moon(self.t, ephemeris='de432s')\n horizons = self.horizons['moon']\n\n # convert to true equator and equinox\n astropy = _apparent_position_in_true_coordinates(astropy)\n\n # Assert sky coordinates are close.\n assert (astropy.separation(horizons) <\n de432s_separation_tolerance_moon)\n\n # Assert distances are close.\n assert_quantity_allclose(astropy.distance, horizons.distance,\n atol=de432s_distance_tolerance)\n\n\n@pytest.mark.remote_data\nclass TestPositionKittPeak:\n \"\"\"\n Test positions against those generated by JPL Horizons accessed on\n 2016-03-28, with refraction turned on.\n \"\"\"\n\n def setup(self):\n kitt_peak = EarthLocation.from_geodetic(lon=-111.6*u.deg,\n lat=31.963333333333342*u.deg,\n height=2120*u.m)\n self.t = Time('2014-09-25T00:00', location=kitt_peak)\n obsgeoloc, obsgeovel = kitt_peak.get_gcrs_posvel(self.t)\n self.frame = GCRS(obstime=self.t,\n obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)\n # Results returned by JPL Horizons web interface\n self.horizons = {\n 'mercury': SkyCoord(ra='13h38m58.50s', dec='-13d34m42.6s',\n distance=c*7.699020*u.min, frame=self.frame),\n 'moon': SkyCoord(ra='12h33m12.85s', dec='-05d17m54.4s',\n distance=c*0.022054*u.min, frame=self.frame),\n 'jupiter': SkyCoord(ra='09h09m55.55s', dec='+16d51m57.8s',\n distance=c*49.244937*u.min, frame=self.frame)}\n\n @pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'),\n (('mercury', 7.*u.arcsec, 500*u.km),\n ('jupiter', 78.*u.arcsec, 82000*u.km)))\n def test_erfa_planet(self, body, sep_tol, dist_tol):\n \"\"\"Test predictions using erfa/plan94.\n\n Accuracies are maximum deviations listed in erfa/plan94.c.\n \"\"\"\n # Add uncertainty in position of Earth\n dist_tol = dist_tol + 1300 * u.km\n\n astropy = get_body(body, self.t, ephemeris='builtin')\n horizons = self.horizons[body]\n\n # convert to true equator and equinox\n astropy = _apparent_position_in_true_coordinates(astropy)\n\n # Assert sky coordinates are close.\n assert astropy.separation(horizons) < sep_tol\n\n # Assert distances are close.\n assert_quantity_allclose(astropy.distance, horizons.distance,\n atol=dist_tol)\n\n @pytest.mark.remote_data\n @pytest.mark.skipif('not HAS_JPLEPHEM')\n @pytest.mark.parametrize('body', ('mercury', 'jupiter'))\n def test_de432s_planet(self, body):\n astropy = get_body(body, self.t, ephemeris='de432s')\n horizons = self.horizons[body]\n\n # convert to true equator and equinox\n astropy = _apparent_position_in_true_coordinates(astropy)\n\n # Assert sky coordinates are close.\n assert (astropy.separation(horizons) <\n de432s_separation_tolerance_planets)\n\n # Assert distances are close.\n assert_quantity_allclose(astropy.distance, horizons.distance,\n atol=de432s_distance_tolerance)\n\n @pytest.mark.remote_data\n @pytest.mark.skipif('not HAS_JPLEPHEM')\n def test_de432s_moon(self):\n astropy = get_moon(self.t, ephemeris='de432s')\n horizons = self.horizons['moon']\n\n # convert to true equator and equinox\n astropy = _apparent_position_in_true_coordinates(astropy)\n\n # Assert sky coordinates are close.\n assert (astropy.separation(horizons) <\n de432s_separation_tolerance_moon)\n\n # Assert distances are close.\n assert_quantity_allclose(astropy.distance, horizons.distance,\n atol=de432s_distance_tolerance)\n\n @pytest.mark.remote_data\n @pytest.mark.skipif('not HAS_JPLEPHEM')\n @pytest.mark.parametrize('bodyname', ('mercury', 'jupiter'))\n def test_custom_kernel_spec_body(self, bodyname):\n \"\"\"\n Checks that giving a kernel specifier instead of a body name works\n \"\"\"\n coord_by_name = get_body(bodyname, self.t, ephemeris='de432s')\n kspec = BODY_NAME_TO_KERNEL_SPEC[bodyname]\n coord_by_kspec = get_body(kspec, self.t, ephemeris='de432s')\n\n assert_quantity_allclose(coord_by_name.ra, coord_by_kspec.ra)\n assert_quantity_allclose(coord_by_name.dec, coord_by_kspec.dec)\n assert_quantity_allclose(coord_by_name.distance, coord_by_kspec.distance)\n\n\n@pytest.mark.remote_data\n@pytest.mark.skipif('not HAS_JPLEPHEM')\n@pytest.mark.parametrize('time', (Time('1960-01-12 00:00'),\n Time('1980-03-25 00:00'),\n Time('2010-10-13 00:00')))\ndef test_get_sun_consistency(time):\n \"\"\"\n Test that the sun from JPL and the builtin get_sun match\n \"\"\"\n sun_jpl_gcrs = get_body('sun', time, ephemeris='de432s')\n builtin_get_sun = get_sun(time)\n sep = builtin_get_sun.separation(sun_jpl_gcrs)\n assert sep < 0.1*u.arcsec\n\n\ndef test_get_moon_nonscalar_regression():\n \"\"\"\n Test that the builtin ephemeris works with non-scalar times.\n\n See Issue #5069.\n \"\"\"\n times = Time([\"2015-08-28 03:30\", \"2015-09-05 10:30\"])\n # the following line will raise an Exception if the bug recurs.\n get_moon(times, ephemeris='builtin')\n\n\ndef test_barycentric_pos_posvel_same():\n # Check that the two routines give identical results.\n ep1 = get_body_barycentric('earth', Time('2016-03-20T12:30:00'))\n ep2, _ = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00'))\n assert np.all(ep1.xyz == ep2.xyz)\n\n\ndef test_earth_barycentric_velocity_rough():\n # Check that a time near the equinox gives roughly the right result.\n ep, ev = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00'))\n assert_quantity_allclose(ep.xyz, [-1., 0., 0.]*u.AU, atol=0.01*u.AU)\n expected = u.Quantity([0.*u.one,\n np.cos(23.5*u.deg),\n np.sin(23.5*u.deg)]) * -30. * u.km / u.s\n assert_quantity_allclose(ev.xyz, expected, atol=1.*u.km/u.s)\n\n\ndef test_earth_barycentric_velocity_multi_d():\n # Might as well test it with a multidimensional array too.\n t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2.\n ep, ev = get_body_barycentric_posvel('earth', t)\n # note: assert_quantity_allclose doesn't like the shape mismatch.\n # this is a problem with np.testing.assert_allclose.\n assert quantity_allclose(ep.get_xyz(xyz_axis=-1),\n [[-1., 0., 0.], [+1., 0., 0.]]*u.AU,\n atol=0.06*u.AU)\n expected = u.Quantity([0.*u.one,\n np.cos(23.5*u.deg),\n np.sin(23.5*u.deg)]) * ([[-30.], [30.]] * u.km / u.s)\n assert quantity_allclose(ev.get_xyz(xyz_axis=-1), expected,\n atol=2.*u.km/u.s)\n\n\n@pytest.mark.remote_data\n@pytest.mark.skipif('not HAS_JPLEPHEM')\n@pytest.mark.parametrize(('body', 'pos_tol', 'vel_tol'),\n (('mercury', 1000.*u.km, 1.*u.km/u.s),\n ('jupiter', 100000.*u.km, 2.*u.km/u.s),\n ('earth', 10*u.km, 10*u.mm/u.s)))\ndef test_barycentric_velocity_consistency(body, pos_tol, vel_tol):\n # Tolerances are about 1.5 times the rms listed for plan94 and epv00,\n # except for Mercury (which nominally is 334 km rms)\n t = Time('2016-03-20T12:30:00')\n ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin')\n dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s')\n assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)\n assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)\n # Might as well test it with a multidimensional array too.\n t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2.\n ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin')\n dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s')\n assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)\n assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)\n\n\n@pytest.mark.remote_data\n@pytest.mark.skipif('not HAS_JPLEPHEM')\n@pytest.mark.parametrize('time', (Time('1960-01-12 00:00'),\n Time('1980-03-25 00:00'),\n Time('2010-10-13 00:00')))\ndef test_url_or_file_ephemeris(time):\n # URL for ephemeris de432s used for testing:\n url = 'http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/de432s.bsp'\n\n # Pass the ephemeris directly as a URL.\n coord_by_url = get_body('earth', time, ephemeris=url)\n\n # Translate the URL to the cached location on the filesystem.\n # Since we just used the url above, it should already have been downloaded.\n filepath = download_file(url, cache=True)\n\n # Get the coordinates using the file path directly:\n coord_by_filepath = get_body('earth', time, ephemeris=filepath)\n\n # Using the URL or filepath should give exactly the same results:\n assert_quantity_allclose(coord_by_url.ra, coord_by_filepath.ra)\n assert_quantity_allclose(coord_by_url.dec, coord_by_filepath.dec)\n assert_quantity_allclose(coord_by_url.distance, coord_by_filepath.distance)\n\n\n@pytest.mark.remote_data\n@pytest.mark.skipif('not HAS_JPLEPHEM')\ndef test_url_ephemeris_wrong_input():\n # Try loading a non-existing URL:\n time = Time('1960-01-12 00:00')\n with pytest.raises(HTTPError):\n get_body('earth', time, ephemeris='http://data.astropy.org/path/to/nonexisting/file.bsp')\n\n\n@pytest.mark.skipif('not HAS_JPLEPHEM')\ndef test_file_ephemeris_wrong_input():\n time = Time('1960-01-12 00:00')\n # Try loading a non-existing file:\n with pytest.raises(ValueError):\n get_body('earth', time, ephemeris='/path/to/nonexisting/file.bsp')\n # Try loading a file that does exist, but is not an ephemeris file:\n with pytest.raises(ValueError):\n get_body('earth', time, ephemeris=__file__)\n"
] |
[
[
"numpy.append",
"numpy.array",
"numpy.isnan",
"numpy.array_equal",
"numpy.zeros",
"numpy.rec.recarray.field",
"numpy.any",
"numpy.ndarray",
"numpy.rec.array",
"numpy.arange",
"numpy.char.array",
"numpy.where",
"numpy.absolute",
"numpy.all",
"numpy.dtype"
],
[
"numpy.all",
"numpy.sin",
"numpy.arange",
"numpy.cos"
]
] |
wms2537/incubator-mxnet
|
[
"b7d7e02705deb1dc4942bf39efc19f133e2181f7"
] |
[
"tests/python/unittest/test_gluon.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport gc\n\nimport mxnet as mx\nfrom mxnet import gluon\nfrom mxnet import init\nfrom mxnet.gluon import nn\nfrom mxnet.base import py_str, MXNetError\nfrom mxnet.test_utils import assert_almost_equal, default_context, assert_allclose\nfrom mxnet.util import is_np_array\nfrom mxnet.ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID\nfrom mxnet.test_utils import use_np\nfrom common import assertRaises, assert_raises_cudnn_not_satisfied, \\\n xfail_when_nonstandard_decimal_separator, environment\nimport numpy as onp\nfrom numpy.testing import assert_array_equal\nimport pytest\nfrom copy import deepcopy\nimport warnings\nimport json\nimport random\nimport tempfile\n\nmx.npx.reset_np()\n\ndef test_parameter():\n p = gluon.Parameter('weight', shape=(10, 10))\n p.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])\n assert len(p.list_data()) == 2\n assert len(p.list_grad()) == 2\n assert p.data(mx.cpu(1)).context == mx.cpu(1)\n assert p.data(mx.cpu(0)).shape == (10, 10)\n assert p.grad(mx.cpu(0)).stype == 'default'\n assert p.data(mx.cpu(0)).stype == 'default'\n\n p.reset_ctx(ctx=[mx.cpu(1), mx.cpu(2)])\n assert p.list_ctx() == [mx.cpu(1), mx.cpu(2)]\n\ndef test_invalid_parameter_stype():\n with pytest.raises(AssertionError):\n p = gluon.Parameter('weight', shape=(10, 10), stype='invalid')\n\ndef test_invalid_parameter_grad_stype():\n with pytest.raises(AssertionError):\n p = gluon.Parameter('weight', shape=(10, 10), grad_stype='invalid')\n\ndef test_sparse_parameter():\n p = gluon.Parameter('weight', shape=(10, 10), stype='row_sparse', grad_stype='row_sparse')\n p.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])\n row_id = mx.np.arange(0, 10, ctx=mx.cpu(1))\n assert len(p.list_grad()) == 2\n # getting row_sparse data without trainer throws an exception\n assertRaises(RuntimeError, p.list_row_sparse_data, row_id)\n trainer = mx.gluon.Trainer([p], 'sgd')\n assert len(p.list_row_sparse_data(row_id)) == 2\n weight = p.row_sparse_data(row_id)\n assert weight.context == mx.cpu(1)\n assert weight.shape == (10, 10)\n assert weight.stype == 'row_sparse'\n assert p.var().attr('__storage_type__') == str(_STORAGE_TYPE_STR_TO_ID['row_sparse'])\n assert p.grad(mx.cpu(0)).stype == 'row_sparse'\n\n p.reset_ctx(ctx=[mx.cpu(1), mx.cpu(2)])\n assert p.list_ctx() == [mx.cpu(1), mx.cpu(2)]\n\ndef test_parameter_invalid_access():\n # cannot call data on row_sparse parameters\n p0 = gluon.Parameter('weight', shape=(10, 10), stype='row_sparse', grad_stype='row_sparse')\n p0.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])\n assertRaises(RuntimeError, p0.data)\n assertRaises(RuntimeError, p0.list_data)\n row_id = mx.np.arange(0, 10)\n # cannot call row_sparse_data on dense parameters\n p1 = gluon.Parameter('weight', shape=(10, 10))\n p1.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])\n assertRaises(RuntimeError, p1.row_sparse_data, row_id.copyto(mx.cpu(0)))\n assertRaises(RuntimeError, p1.list_row_sparse_data, row_id)\n\n\ndef test_parameter_row_sparse_data():\n ctx0 = mx.cpu(1)\n ctx1 = mx.cpu(2)\n dim0 = 4\n x = gluon.Parameter('x', shape=(dim0, 2), stype='row_sparse')\n x.initialize(init='xavier', ctx=[ctx0, ctx1])\n trainer = gluon.Trainer([x], 'sgd')\n x_param = x._data[0].copy()\n assert x_param.stype == 'row_sparse'\n row_id_0 = mx.nd.array([0,1], ctx=ctx0)\n retained_0 = x.row_sparse_data(row_id_0)\n retained_target_0 = mx.nd.sparse.retain(x_param, row_id_0.as_in_context(ctx0))\n mx.test_utils.assert_almost_equal(retained_0.asnumpy(), retained_target_0.asnumpy())\n assert retained_0.context == ctx0\n row_id_1 = mx.nd.arange(0, dim0, ctx=ctx1)\n retained_1 = x.row_sparse_data(row_id_1)\n retained_target_1 = x_param\n mx.test_utils.assert_almost_equal(retained_1.asnumpy(), retained_target_1.asnumpy())\n assert retained_1.context == ctx1\n row_id_2 = mx.nd.array([0,1,2])\n retained_2 = x.list_row_sparse_data(row_id_2)\n retained_target_2 = mx.nd.sparse.retain(x_param, row_id_2.as_in_context(ctx0))\n mx.test_utils.assert_almost_equal(retained_2[0].asnumpy(), retained_target_2.asnumpy())\n\n\n@use_np\ndef test_constant():\n class Test(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Test, self).__init__(**kwargs)\n self.value = onp.asarray([[1,2], [3,4]])\n self.const = gluon.Constant(self.value)\n\n def forward(self, x):\n return x + self.const.data()\n\n test = Test()\n test.initialize()\n trainer = gluon.Trainer(test.collect_params(), 'sgd',\n {'learning_rate': 1.0, 'momentum': 0.5})\n\n with mx.autograd.record():\n x = mx.np.ones((2,2))\n x.attach_grad()\n y = test(x)\n y.backward()\n\n trainer.step(1)\n\n assert (test.const.data().asnumpy() == test.value).all()\n assert (x.grad.asnumpy() == 1).all()\n\n\n@use_np\ndef test_parameter_sharing():\n class Net(gluon.Block):\n def __init__(self, in_units=0, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.dense0 = nn.Dense(5, in_units=in_units)\n self.dense1 = nn.Dense(5, in_units=in_units)\n\n def forward(self, x):\n return self.dense1(self.dense0(x))\n\n net1 = Net(in_units=5)\n net2 = Net().share_parameters(net1.collect_params())\n net1.initialize()\n net2(mx.np.zeros((3, 5)))\n\n net1.save_parameters('net1.params')\n\n net3 = Net()\n net3.load_parameters('net1.params', mx.cpu())\n\n net4 = Net()\n net5 = Net(in_units=5).share_parameters(net4.collect_params())\n net4.initialize()\n net5(mx.np.zeros((3, 5)))\n\n net4.save_parameters('net4.params')\n\n net6 = Net()\n net6.load_parameters('net4.params', mx.cpu())\n\n\ndef test_parameter_str():\n class Net(gluon.Block):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.dense0 = nn.Dense(10, in_units=5, use_bias=False)\n\n net = Net()\n lines = str(net.collect_params()).splitlines()\n\n assert 'dense0.weight' in lines[0]\n assert '(10, 5)' in lines[0]\n assert 'float32' in lines[0]\n\n\ndef test_collect_parameters():\n net = nn.HybridSequential()\n net.add(nn.Conv2D(10, 3))\n net.add(nn.Dense(10, activation='relu'))\n assert set(net.collect_params().keys()) == \\\n set(['0.weight', '0.bias','1.weight','1.bias'])\n assert set(net.collect_params('.*weight').keys()) == \\\n set(['0.weight', '1.weight'])\n assert set(net.collect_params('0.bias|1.bias').keys()) == \\\n set(['0.bias', '1.bias'])\n\n@use_np\ndef test_basic():\n model = nn.Sequential()\n model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))\n model.add(nn.Dropout(0.5))\n model.add(nn.Dense(64, activation='tanh', in_units=256),\n nn.Dense(32, in_units=64))\n model.add(nn.Activation('relu'))\n\n # ndarray\n model.initialize(mx.init.Xavier(magnitude=2.24))\n x = model(mx.np.zeros((32, 2, 10)))\n assert x.shape == (32, 32)\n x.wait_to_read()\n\n model.setattr('grad_req', 'null')\n assert list(model.collect_params().values())[0]._grad is None\n model.setattr('grad_req', 'write')\n assert list(model.collect_params().values())[0]._grad is not None\n\n\ndef test_sparse_symbol_block():\n data = mx.sym.var('data')\n weight = mx.sym.var('weight', stype='row_sparse')\n bias = mx.sym.var('bias')\n out = mx.sym.broadcast_add(mx.sym.dot(data, weight), bias)\n with pytest.raises(AssertionError):\n # an exception is expected when creating a SparseBlock w/ sparse param\n net = gluon.SymbolBlock(out, data)\n\ndef test_sparse_hybrid_block():\n params = {}\n params['weight'] = gluon.Parameter('weight', shape=(5,5), stype='row_sparse', dtype='float32')\n params['bias'] = gluon.Parameter('bias', shape=(5), dtype='float32')\n net = gluon.nn.Dense(5).share_parameters(params)\n net.initialize()\n x = mx.np.ones((2,5))\n with pytest.raises(RuntimeError):\n # an exception is expected when forwarding a HybridBlock w/ sparse param\n y = net(x)\n\n\n@use_np\ndef test_hybrid_block_none_args():\n class Foo(gluon.HybridBlock):\n def forward(self, a, b):\n if a is None and b is not None:\n return b\n elif b is None and a is not None:\n return a\n elif a is not None and b is not None:\n return a + b\n else:\n raise NotImplementedError\n\n class FooDefault(gluon.HybridBlock):\n def forward(self, a, b=None):\n if a is None and b is not None:\n return b\n elif b is None and a is not None:\n return a\n elif a is not None and b is not None:\n return a + b\n else:\n raise NotImplementedError\n\n\n class FooNested(gluon.HybridBlock):\n def __init__(self):\n super(FooNested, self).__init__()\n self.f1 = Foo()\n self.f2 = Foo()\n self.f3 = Foo()\n\n def forward(self, a, b):\n data = self.f1(a, b)\n data = self.f2(a, data)\n data = self.f3(data, b)\n return data\n\n for arg_inputs in [(None, mx.np.ones((10,))),\n (mx.np.ones((10,)), mx.np.ones((10,))),\n (mx.np.ones((10,)), None)]:\n foo1 = FooNested()\n foo1.hybridize()\n foo2 = FooNested()\n for _ in range(2): # Loop for 2 times to trigger forwarding of the cached version\n out1 = foo1(*arg_inputs)\n out2 = foo2(*arg_inputs)\n if isinstance(out1, tuple):\n for lhs, rhs in zip(out1, out2):\n assert_almost_equal(lhs.asnumpy(), rhs.asnumpy())\n else:\n assert_almost_equal(out1.asnumpy(), out2.asnumpy())\n\n for do_hybridize in [True, False]:\n foo = FooNested()\n if do_hybridize:\n foo.hybridize()\n pytest.raises(ValueError, foo, None, None)\n\n # Make sure the ValueError is correctly raised\n foo = FooNested()\n foo.hybridize()\n foo(None, mx.np.ones((10,))) # Pass for the first time to initialize the cached op\n pytest.raises(ValueError, lambda: foo(mx.np.ones((10,)), mx.np.ones((10,))))\n foo = FooNested()\n pytest.raises(TypeError, lambda: foo(mx.np.ones((10,)), mx.sym.var('a')))\n foo = FooNested()\n pytest.raises(TypeError, lambda: foo(mx.sym.var('a'), mx.np.ones((10,))))\n\n # Test the case of the default values\n foo1 = FooDefault()\n foo1.hybridize()\n foo2 = FooDefault()\n out1 = foo1(mx.np.ones((10,)))\n out2 = foo2(mx.np.ones((10,)))\n out3 = foo1(mx.np.ones((10,)), None)\n out4 = foo2(mx.np.ones((10,)), None)\n assert_almost_equal(out1.asnumpy(), out2.asnumpy())\n assert_almost_equal(out1.asnumpy(), out3.asnumpy())\n assert_almost_equal(out1.asnumpy(), out4.asnumpy())\n foo1 = FooDefault()\n foo1.hybridize()\n out1 = foo1(mx.np.ones((10,)), None)\n out2 = foo1(mx.np.ones((10,)))\n assert_almost_equal(out1.asnumpy(), out2.asnumpy())\n pytest.raises(ValueError, lambda: foo1(mx.np.ones((10,)), mx.np.ones((10,))))\n\n\n@use_np\ndef test_hybrid_block_hybrid_no_hybrid():\n class FooHybrid(gluon.HybridBlock):\n def forward(self, a, b):\n if isinstance(a, (list, tuple)):\n a = sum(a)\n if isinstance(b, (list, tuple)):\n b = sum(b)\n return a + b\n\n class Foo(gluon.Block):\n def forward(self, a, b):\n if isinstance(a, (list, tuple)):\n a = sum(a)\n if isinstance(b, (list, tuple)):\n b = sum(b)\n return a + b\n # When hybridize is not called, HybridBlock acts the same as Block\n foo_hybrid = FooHybrid()\n foo = Foo()\n for a, b in [(mx.np.ones((10,)), 1),\n (mx.np.ones((20,)), 2),\n ([mx.np.ones((10,)), mx.np.ones((10,))],\n [mx.np.ones((10)), mx.np.ones((10,)), mx.np.ones((10,))]),\n ([mx.np.ones((10,)), mx.np.ones((10,))], 3)]:\n hybrid_block_out = foo_hybrid(a, b)\n block_out = foo(a, b)\n assert_almost_equal(hybrid_block_out.asnumpy(), block_out.asnumpy())\n # When hybridize is called, we need to make sure that the model raises for the unsupported cases\n # 1. Scalar values in the input\n # 2. No sym in the input\n # 3. No mixing of cpu ndarray and gpu ndarray (Tested in gpu/test_gluon_gpu.py)\n # 4. Allow mixing of cpu_pinned and cpu\n foo_hybrid = FooHybrid()\n foo_hybrid.hybridize()\n pytest.raises(ValueError, lambda: foo_hybrid(mx.np.ones((10,)), 1))\n foo_hybrid = FooHybrid()\n foo_hybrid.hybridize()\n pytest.raises(TypeError, lambda: foo_hybrid(mx.np.ones((10,)), mx.sym.var('a')))\n foo_hybrid = FooHybrid()\n foo_hybrid.hybridize()\n pytest.raises(ValueError, lambda: foo_hybrid(mx.np.ones((10,), ctx=mx.cpu(1)),\n mx.np.ones((10,), ctx=mx.cpu(2))))\n\n\ndef check_layer_forward(layer, dshape):\n print(\"checking layer {}\\nshape: {}.\".format(layer, dshape))\n layer.initialize()\n x = mx.np.ones(shape=dshape)\n x.attach_grad()\n with mx.autograd.record():\n out = layer(x)\n out.backward()\n\n np_out = out.asnumpy()\n np_dx = x.grad.asnumpy()\n\n layer.hybridize()\n\n x = mx.np.ones(shape=dshape)\n x.attach_grad()\n with mx.autograd.record():\n out = layer(x)\n out.backward()\n\n mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-5, atol=1e-6)\n mx.test_utils.assert_almost_equal(np_dx, x.grad.asnumpy(), rtol=1e-5, atol=1e-6)\n\n@pytest.mark.parametrize('layer,shape', [\n (nn.Conv1D(16, 3, in_channels=4), (1, 4, 10)),\n (nn.Conv1D(16, 3, groups=2, in_channels=4), (1, 4, 10)),\n (nn.Conv1D(16, 3, strides=3, groups=2, in_channels=4), (1, 4, 10)),\n (nn.Conv2D(16, (3, 4), in_channels=4), (1, 4, 20, 20)),\n (nn.Conv2D(16, (5, 4), in_channels=4), (1, 4, 20, 20)),\n (nn.Conv2D(16, (3, 4), groups=2, in_channels=4), (1, 4, 20, 20)),\n (nn.Conv2D(16, (3, 4), strides=4, in_channels=4), (1, 4, 20, 20)),\n (nn.Conv2D(16, (3, 4), dilation=4, in_channels=4), (1, 4, 20, 20)),\n (nn.Conv2D(16, (3, 4), padding=4, in_channels=4), (1, 4, 20, 20)),\n (nn.Conv3D(16, (1, 8, 4), in_channels=4, activation='relu'), (1, 4, 10, 10, 10)),\n (nn.Conv3D(16, (5, 4, 3), in_channels=4), (1, 4, 10, 10, 10)),\n (nn.Conv3D(16, (3, 3, 3), groups=2, in_channels=4), (1, 4, 10, 10, 10)),\n (nn.Conv3D(16, 4, strides=4, in_channels=4), (1, 4, 10, 10, 10)),\n (nn.Conv3D(16, (3, 3, 3), padding=4, in_channels=4), (1, 4, 10, 10, 10)),\n])\ndef test_conv(layer, shape):\n check_layer_forward(layer, shape)\n\n@pytest.mark.parametrize('layer,shape', [\n (nn.Conv2D(16, (3, 3), layout='NHWC', in_channels=4), (1, 10, 10, 4)),\n # (nn.Conv3D(16, (3, 3, 3), layout='NDHWC', in_channels=4), (1, 10, 10, 10, 4)),\n])\n@pytest.mark.skipif(mx.context.current_context().device_type!='gpu' or\n not mx.runtime.Features().is_enabled('CUDNN'),\n reason='nhwc/ndhwc layout is only supported with CUDNN.')\ndef test_conv_nhwc(layer, shape):\n check_layer_forward(layer, shape)\n\n\ndef test_deconv():\n # layers1d = [\n # nn.Conv1DTranspose(16, 3, in_channels=4),\n # nn.Conv1DTranspose(16, 3, groups=2, in_channels=4),\n # nn.Conv1DTranspose(16, 3, strides=3, groups=2, in_channels=4),\n # ]\n # for layer in layers1d:\n # check_layer_forward(layer, (1, 4, 10))\n\n\n layers2d = [\n nn.Conv2DTranspose(16, (3, 4), in_channels=4),\n nn.Conv2DTranspose(16, (5, 4), in_channels=4),\n nn.Conv2DTranspose(16, (3, 4), groups=2, in_channels=4),\n nn.Conv2DTranspose(16, (3, 4), strides=4, in_channels=4),\n nn.Conv2DTranspose(16, (3, 4), dilation=4, in_channels=4),\n # nn.Conv2DTranspose(16, (3, 4), padding=4, in_channels=4),\n nn.Conv2DTranspose(16, (3, 4), strides=4, output_padding=3, in_channels=4),\n ]\n for layer in layers2d:\n check_layer_forward(layer, (1, 4, 20, 20))\n\n\n # layers3d = [\n # nn.Conv3DTranspose(16, (1, 8, 4), in_channels=4),\n # nn.Conv3DTranspose(16, (5, 4, 3), in_channels=4),\n # nn.Conv3DTranspose(16, (3, 3, 3), groups=2, in_channels=4),\n # nn.Conv3DTranspose(16, 4, strides=4, in_channels=4),\n # nn.Conv3DTranspose(16, (3, 3, 3), padding=4, in_channels=4),\n # ]\n # for layer in layers3d:\n # check_layer_forward(layer, (1, 4, 10, 10, 10))\n #\n #\n # layer = nn.Conv2DTranspose(16, (3, 3), layout='NHWC', in_channels=4)\n # # check_layer_forward(layer, (1, 10, 10, 4))\n #\n # layer = nn.Conv3DTranspose(16, (3, 3, 3), layout='NDHWC', in_channels=4)\n # # check_layer_forward(layer, (1, 10, 10, 10, 4))\n\n\ndef test_pool():\n # transpose shape to bring feature dimension 'c' from 2nd position to last\n def transpose(shape):\n return (shape[0],) + shape[2:] + (shape[1],)\n\n for layout in ['NCW', 'NWC']:\n shape1d = (1, 2, 10)\n if layout == 'NWC':\n shape1d = transpose(shape1d)\n layers1d = [\n nn.MaxPool1D(layout=layout),\n nn.MaxPool1D(3, layout=layout),\n nn.MaxPool1D(3, 2, layout=layout),\n nn.AvgPool1D(layout=layout),\n nn.AvgPool1D(count_include_pad=False, layout=layout),\n nn.GlobalAvgPool1D(layout=layout),\n ]\n for layer in layers1d:\n check_layer_forward(layer, shape1d)\n\n\n for layout in ['NCHW', 'NHWC']:\n shape2d = (1, 2, 10, 10)\n if layout == 'NHWC':\n shape2d = transpose(shape2d)\n layers2d = [\n nn.MaxPool2D(layout=layout),\n nn.MaxPool2D((3, 3), layout=layout),\n nn.MaxPool2D(3, 2, layout=layout),\n nn.AvgPool2D(layout=layout),\n nn.AvgPool2D(count_include_pad=False, layout=layout),\n nn.GlobalAvgPool2D(layout=layout),\n ]\n for layer in layers2d:\n check_layer_forward(layer, shape2d)\n\n for layout in ['NCDHW', 'NDHWC']:\n shape3d = (1, 2, 10, 10, 10)\n if layout == 'NDHWC':\n shape3d = transpose(shape3d)\n layers3d = [\n nn.MaxPool3D(layout=layout),\n nn.MaxPool3D((3, 3, 3), layout=layout),\n nn.MaxPool3D(3, 2, layout=layout),\n nn.AvgPool3D(layout=layout),\n nn.AvgPool3D(count_include_pad=False, layout=layout),\n nn.GlobalAvgPool3D(layout=layout),\n ]\n for layer in layers3d:\n check_layer_forward(layer, shape3d)\n\n # test ceil_mode\n for layout in ['NCHW', 'NHWC']:\n xshape = (2, 2, 10, 10)\n noceil_out_shape = (2, 2, 3, 3)\n ceil_out_shape = (2, 2, 4, 4)\n if layout == 'NHWC':\n xshape = transpose(xshape)\n noceil_out_shape = transpose(noceil_out_shape)\n ceil_out_shape = transpose(ceil_out_shape)\n\n x = mx.np.zeros(xshape)\n\n layer = nn.MaxPool2D(3, ceil_mode=False, layout=layout)\n layer.initialize()\n assert (layer(x).shape==noceil_out_shape)\n\n layer = nn.MaxPool2D(3, ceil_mode=True, layout=layout)\n layer.initialize()\n assert (layer(x).shape==ceil_out_shape)\n\n\n@pytest.mark.parametrize('variable', ['running_var', 'running_mean'])\ndef test_batchnorm_backward_synchronization(variable):\n \"\"\"\n Tests if synchronization of BatchNorm running variables is done correctly.\n If not, the test sometimes fails - depending on the timing.\n \"\"\"\n ctx = mx.test_utils.default_context()\n\n for _ in range(20):\n layer = nn.BatchNorm()\n layer.initialize(ctx=ctx)\n for _ in range(3):\n data = mx.np.random.normal(loc=10, scale=2, size=(1, 3, 10, 10), ctx=ctx)\n with mx.autograd.record():\n out = layer(data)\n out.backward()\n\n # check if each read give the same value\n var1 = getattr(layer, variable).data().asnumpy()\n for _ in range(10):\n var2 = getattr(layer, variable).data().asnumpy()\n if (var1 != var2).any():\n raise AssertionError(\"Two consecutive reads of \" + variable + \" give different results\")\n\n\ndef test_batchnorm():\n layer = nn.BatchNorm(in_channels=10)\n check_layer_forward(layer, (2, 10, 10, 10))\n\n\n@use_np\n@xfail_when_nonstandard_decimal_separator\ndef test_sync_batchnorm():\n def _check_batchnorm_result(input, num_devices=1, cuda=False):\n from mxnet.gluon.utils import split_and_load\n\n def _find_bn(module):\n if isinstance(module, (mx.gluon.nn.BatchNorm, mx.gluon.nn.SyncBatchNorm)):\n return module\n elif isinstance(module.module, (mx.gluon.nn.BatchNorm, mx.gluon.nn.SyncBatchNorm)):\n return module.module\n\n raise RuntimeError('BN not found')\n\n def _syncParameters(bn1, bn2, ctx):\n ctx = input.context\n bn2.gamma.set_data(bn1.gamma.data(ctx))\n bn2.beta.set_data(bn1.beta.data(ctx))\n bn2.running_mean.set_data(bn1.running_mean.data(ctx))\n bn2.running_var.set_data(bn1.running_var.data(ctx))\n\n input1 = input.copy()\n input2 = input.copy()\n\n if cuda:\n input1 = input.as_in_context(mx.gpu(0))\n ctx_list = [mx.gpu(i) for i in range(num_devices)]\n else:\n ctx_list = [mx.cpu(0) for _ in range(num_devices)]\n\n nch = input.shape[1] if input.ndim > 1 else 1\n bn1 = mx.gluon.nn.BatchNorm(in_channels=nch)\n bn2 = mx.gluon.nn.SyncBatchNorm(\n in_channels=nch, num_devices=num_devices)\n\n bn1.initialize(ctx=ctx_list[0])\n bn2.initialize(ctx=ctx_list)\n\n # using the same values for gamma and beta\n #_syncParameters(_find_bn(bn1), _find_bn(bn2), ctx_list[0])\n\n input1.attach_grad()\n inputs2 = split_and_load(input2, ctx_list, batch_axis=0)\n for xi in inputs2:\n xi.attach_grad()\n\n with mx.autograd.record():\n output1 = bn1(input1)\n output2 = [bn2(xi) for xi in inputs2]\n loss1 = (output1 ** 2).sum()\n loss2 = [(output ** 2).sum() for output in output2]\n mx.autograd.backward(loss1)\n mx.autograd.backward(loss2)\n\n output2 = mx.np.concatenate([output.as_in_context(input.context)\n for output in output2], axis=1)\n # check bn1\n\n momentum = 0.9\n epsilon = 1e-5\n axis = 1\n data = input1\n running_mean = mx.np.zeros(nch, ctx=data.context)\n running_var = mx.np.ones(nch, ctx=data.context)\n\n axes = list(range(data.ndim))\n del axes[axis]\n data_mean = data.mean(axis=axes, keepdims=True)\n data_var = mx.np.square(data - data_mean).mean(axis=axes, keepdims=True)\n\n target_output = (data - data_mean) / mx.np.sqrt(data_var + epsilon)\n\n # squeeze data_mean and data_var\n data_mean_flat = data_mean.squeeze()\n data_var_flat = data_var.squeeze()\n\n running_mean = running_mean * momentum + \\\n data_mean_flat * (1 - momentum)\n running_var = running_var * momentum + \\\n data_var_flat * (1 - momentum)\n\n atol = 1e-2\n rtol = 1e-2\n assert_almost_equal(output1.asnumpy(), target_output.asnumpy(),\n atol=atol, rtol=rtol)\n assert_almost_equal(_find_bn(bn1).running_mean.data(ctx_list[0]).asnumpy(),\n running_mean.asnumpy(),\n atol=atol, rtol=rtol)\n assert_almost_equal(_find_bn(bn1).running_var.data(ctx_list[0]).asnumpy(),\n running_var.asnumpy(),\n atol=atol, rtol=rtol)\n # assert forwarding\n assert_almost_equal(input1.asnumpy(), input2.asnumpy(),\n atol=atol, rtol=rtol)\n assert_almost_equal(output1.asnumpy(),\n output2.asnumpy(), atol=atol, rtol=rtol)\n assert_almost_equal(_find_bn(bn1).running_mean.data(ctx_list[0]).asnumpy(),\n _find_bn(bn2).running_mean.data(ctx_list[0]).asnumpy(),\n atol=atol, rtol=rtol)\n assert_almost_equal(_find_bn(bn1).running_var.data(ctx_list[0]).asnumpy(),\n _find_bn(bn2).running_var.data(ctx_list[0]).asnumpy(),\n atol=atol, rtol=rtol)\n input2grad = mx.np.concatenate(\n [output.grad.as_in_context(input.ctx) for output in inputs2], axis=0)\n assert_almost_equal(input1.grad.asnumpy(),\n input2grad.asnumpy(), atol=atol, rtol=rtol)\n\n cfgs = [(1, False)]\n num_gpus = 0 if default_context().device_type != 'gpu' else mx.context.num_gpus()\n batch_size = 24\n for i in range(1, num_gpus + 1):\n if batch_size % i == 0:\n cfgs.append((i, True))\n for ndev, cuda in cfgs:\n # check with unsync version\n for shape in [(batch_size, 2), (batch_size, 3, 4), (batch_size, 4, 4, 4), (batch_size, 5, 6, 4, 4)]:\n print(str((ndev, cuda, shape)))\n for _ in range(10):\n _check_batchnorm_result(mx.np.random.uniform(size=shape,\n ctx=mx.cpu(0)),\n num_devices=ndev, cuda=cuda)\n\n\ndef test_instancenorm():\n layer = nn.InstanceNorm(in_channels=10)\n check_layer_forward(layer, (2, 10, 10, 10))\n\ndef test_layernorm():\n layer = nn.LayerNorm(in_channels=10)\n check_layer_forward(layer, (2, 10, 10, 10))\n # Check for the case of error raising\n for hybridize in [False, True]:\n layer = nn.LayerNorm(in_channels=10)\n layer.initialize()\n if hybridize:\n layer.hybridize()\n pytest.raises(AssertionError, lambda: layer(mx.np.ones((2, 11))))\n\ndef test_groupnorm():\n layer = nn.GroupNorm()\n check_layer_forward(layer, (2, 10, 10, 10))\n layer = nn.GroupNorm(num_groups=2)\n check_layer_forward(layer, (2, 10, 10, 10))\n layer = nn.GroupNorm(num_groups=5)\n check_layer_forward(layer, (2, 10, 10, 10))\n\ndef test_reflectionpad():\n layer = nn.ReflectionPad2D(3)\n check_layer_forward(layer, (2, 3, 24, 24))\n\n\ndef test_reshape():\n x = mx.np.ones((2, 4, 10, 10))\n layer = nn.Conv2D(10, 2, in_channels=4)\n layer.initialize()\n with mx.autograd.record():\n x = layer(x)\n x = x.reshape((-1,))\n x = x + 10\n x.backward()\n\n\ndef test_slice():\n x = mx.np.ones((5, 4, 10, 10))\n layer = nn.Conv2D(10, 2, in_channels=4)\n layer.initialize()\n with mx.autograd.record():\n x = layer(x)\n x = x[1:3]\n x = x + 10\n x.backward()\n\n\ndef test_at():\n x = mx.np.ones((5, 4, 10, 10))\n layer = nn.Conv2D(10, 2, in_channels=4)\n layer.initialize()\n with mx.autograd.record():\n x = layer(x)\n x = x[1]\n x = x + 10\n x.backward()\n\n\ndef test_deferred_init():\n x = mx.np.ones((5, 4, 10, 10))\n layer = nn.Conv2D(10, 2)\n layer.initialize()\n layer(x)\n\n\n\n@use_np\ndef check_split_data(x, num_slice, batch_axis, **kwargs):\n res = gluon.utils.split_data(x, num_slice, batch_axis, **kwargs)\n assert len(res) == num_slice\n mx.test_utils.assert_almost_equal(mx.np.concatenate(res, axis=batch_axis).asnumpy(),\n x.asnumpy())\n np_res = onp.array_split(x.asnumpy(), num_slice, axis=batch_axis)\n res_asnp = [s.asnumpy() for s in res]\n for r1, r2 in zip(np_res, res_asnp):\n assert all(r1.reshape(-1) == r2.reshape(-1))\n\n\n@use_np\ndef test_split_data_np():\n x = mx.np.random.uniform(size=(128, 33, 64))\n check_split_data(x, 8, 0)\n check_split_data(x, 3, 1)\n check_split_data(x, 4, 1, even_split=False)\n check_split_data(x, 15, 1, even_split=False)\n try:\n check_split_data(x, 4, 1)\n except ValueError:\n return\n assert False, \"Should have failed\"\n\ndef test_split_data():\n x = mx.np.random.uniform(size=(128, 33, 64))\n check_split_data(x, 8, 0)\n check_split_data(x, 3, 1)\n check_split_data(x, 4, 1, even_split=False)\n check_split_data(x, 15, 1, even_split=False)\n try:\n check_split_data(x, 4, 1)\n except ValueError:\n return\n assert False, \"Should have failed\"\n\ndef test_flatten():\n flatten = nn.Flatten()\n x = mx.np.zeros((3,4,5,6))\n assert flatten(x).shape == (3, 4*5*6)\n x = mx.np.zeros((3,6))\n assert flatten(x).shape == (3, 6)\n x = mx.np.zeros((3,))\n assert flatten(x).shape == (3, 1)\n\ndef test_block_attr_hidden():\n b = gluon.Block()\n\n # regular attributes can change types\n b.a = None\n b.a = 1\n\n\ndef test_block_attr_block():\n b = gluon.Block()\n\n with pytest.raises(TypeError):\n # regular variables can't change types\n b.b = gluon.Block()\n b.b = (2,)\n\n\ndef test_block_attr_param():\n b = gluon.Block()\n\n with pytest.raises(TypeError):\n # regular variables can't change types\n b.b = gluon.Parameter()\n b.b = (2,)\n\n\ndef test_block_attr_regular():\n b = gluon.Block()\n\n # set block attribute also sets a weakref in _children\n b.c = gluon.Block()\n c2 = gluon.Block()\n b.c = c2\n assert b.c is c2 and list(b._children.values())[0]() is c2\n\n\ndef test_block_attr_list_of_block():\n class Model1(gluon.Block):\n def __init__(self, **kwargs):\n super(Model1, self).__init__(**kwargs)\n self.layers = [nn.Dense(i * 10) for i in range(6)]\n\n class Model2(gluon.Block):\n def __init__(self, **kwargs):\n super(Model2, self).__init__(**kwargs)\n self.layers = dict()\n self.layers['a'] = [nn.Dense(10), nn.Dense(10)]\n\n class Model3(gluon.Block):\n def __init__(self, **kwargs):\n super(Model3, self).__init__(**kwargs)\n self.layers = nn.Sequential()\n self.layers.add(*[nn.Dense(i * 10) for i in range(6)])\n\n class Model4(gluon.Block):\n def __init__(self, **kwargs):\n super(Model4, self).__init__(**kwargs)\n self.data = {'a': '4', 'b': 123}\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n model = Model1()\n model.collect_params()\n assert len(w) > 0\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n model = Model2()\n model.collect_params()\n assert len(w) > 0\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n model = Model3()\n model.collect_params()\n assert len(w) == 0\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n model = Model4()\n model.collect_params()\n assert len(w) == 0\n\ndef check_sequential(net):\n dense1 = gluon.nn.Dense(10)\n net.add(dense1)\n dense2 = gluon.nn.Dense(10)\n net.add(dense2)\n dense3 = gluon.nn.Dense(10)\n net.add(dense3)\n net.initialize()\n\n net(mx.np.zeros((10, 10)))\n net.hybridize()\n assert net[1] is dense2\n assert net[-1] is dense3\n slc = net[1:3]\n assert len(slc) == 2 and slc[0] is dense2 and slc[1] is dense3\n assert isinstance(slc, type(net))\n\n@use_np\ndef check_sequential_dc(net):\n class MyBlock(mx.gluon.HybridBlock):\n def __init__(self):\n super().__init__()\n self.dense = mx.gluon.nn.Dense(units=10, in_units=10)\n self.weight = mx.gluon.Parameter('weight', shape=(10, ))\n\n def forward(self, x):\n return self.dense(x) + self.weight.data()\n\n dense1 = MyBlock()\n net.add(dense1)\n dense2 = MyBlock()\n net.add(dense2)\n dense3 = MyBlock()\n net.add(dense3)\n\n net.initialize()\n net.hybridize()\n net(mx.np.zeros((10, 10)))\n assert net[1] is dense2\n assert net[-1] is dense3\n slc = net[1:3]\n assert len(slc) == 2 and slc[0] is dense2 and slc[1] is dense3\n assert isinstance(slc, type(net))\n\n@use_np\n@pytest.mark.garbage_expected\ndef test_sequential():\n check_sequential(gluon.nn.Sequential())\n check_sequential(gluon.nn.HybridSequential())\n check_sequential_dc(gluon.nn.HybridSequential())\n\ndef test_sequential_warning():\n with warnings.catch_warnings(record=True) as w:\n # The following line permits the test to pass if run multiple times\n warnings.simplefilter('always')\n b = gluon.nn.Sequential()\n b.add(gluon.nn.Dense(20))\n b.hybridize()\n assert len(w) == 1\n\n\n@use_np\ndef test_global_norm_clip():\n def check_global_norm_clip(check_isfinite):\n x1 = mx.np.ones((3,3))\n x2 = mx.np.ones((4,4))\n norm = gluon.utils.clip_global_norm([x1, x2], 1.0, check_isfinite=check_isfinite)\n assert norm == 5.0\n assert_almost_equal(x1.asnumpy(), onp.ones((3,3))/5)\n assert_almost_equal(x2.asnumpy(), onp.ones((4,4))/5)\n\n x3 = mx.np.array([1.0, 2.0, float('nan')])\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n gluon.utils.clip_global_norm([x1, x3], 2.0, check_isfinite=check_isfinite)\n assert len(w) == check_isfinite\n\n for check_isfinite in [True, False]:\n check_global_norm_clip(check_isfinite)\n\n\ndef test_embedding():\n def check_embedding():\n layer = gluon.nn.Embedding(10, 100)\n layer.initialize()\n x = mx.np.array([3,4,2,0,1])\n with mx.autograd.record():\n y = layer(x)\n y.backward()\n assert (layer.weight.grad().asnumpy()[:5] == 1).all()\n assert (layer.weight.grad().asnumpy()[5:] == 0).all()\n\n def check_embedding_large_input():\n embedding = mx.gluon.nn.Embedding(10, 1)\n embedding.initialize()\n embedding.hybridize()\n shape = (20481,)\n with mx.autograd.record():\n emb_in = embedding(mx.np.ones(shape))\n loss = emb_in.sum()\n loss.backward()\n assert embedding.weight.grad().sum().item() == 20481\n\n check_embedding()\n check_embedding_large_input()\n\ndef test_export(tmpdir):\n tmpfile = os.path.join(str(tmpdir), 'gluon')\n ctx = mx.context.current_context()\n model = gluon.model_zoo.vision.resnet18_v1(\n ctx=ctx, pretrained=False)\n model.initialize()\n model.hybridize()\n data = mx.np.random.normal(size=(1, 3, 32, 32))\n out = model(data)\n\n symbol_filename, params_filename = model.export(tmpfile)\n assert symbol_filename == tmpfile+'-symbol.json'\n assert params_filename == tmpfile+'-0000.params'\n\n@use_np\ndef test_import():\n ctx = mx.context.current_context()\n net1 = gluon.model_zoo.vision.resnet18_v1(\n ctx=ctx, pretrained=False)\n net1.initialize()\n net1.hybridize()\n data = mx.np.random.normal(size=(1, 3, 32, 32))\n out1 = net1(data)\n\n net1.export('net1', epoch=1)\n\n net2 = gluon.SymbolBlock.imports(\n 'net1-symbol.json', ['data'], 'net1-0001.params', ctx)\n out2 = net2(data)\n lines = str(net2).splitlines()\n\n assert_almost_equal(out1.asnumpy(), out2.asnumpy())\n assert lines[0] == 'SymbolBlock('\n assert lines[1]\n assert lines[2] == ')'\n\n\ndef test_hybrid_stale_cache():\n net = mx.gluon.nn.HybridSequential()\n net.add(mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=False))\n\n net.hybridize()\n net.initialize()\n net(mx.np.ones((2,3,5)))\n\n net.add(mx.gluon.nn.Flatten())\n assert net(mx.np.ones((2,3,5))).shape == (2, 30)\n\n net = mx.gluon.nn.HybridSequential()\n net.fc1 = mx.gluon.nn.Dense(10, weight_initializer='zeros',\n bias_initializer='ones', flatten=False)\n net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',\n bias_initializer='ones', flatten=False)\n net.hybridize()\n net.initialize()\n net(mx.np.ones((2,3,5)))\n\n net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',\n bias_initializer='ones', flatten=True)\n net.initialize()\n assert net(mx.np.ones((2,3,5))).shape == (2, 10)\n\n\ndef test_lambda():\n net1 = mx.gluon.nn.HybridSequential()\n net1.add(nn.Activation('tanh'),\n nn.LeakyReLU(0.1))\n\n net2 = mx.gluon.nn.HybridSequential()\n op3 = lambda x, *args: mx.npx.leaky_relu(x, *args, slope=0.1)\n net2.add(nn.HybridLambda('tanh'),\n nn.HybridLambda(op3))\n\n op4 = lambda x: mx.npx.leaky_relu(x, slope=0.1)\n net3 = mx.gluon.nn.Sequential()\n net3.add(nn.Lambda('tanh'),\n nn.Lambda(op4))\n\n input_data = mx.np.random.uniform(size=(2, 3, 5, 7))\n out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data)\n assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-3, atol=1e-3)\n assert_almost_equal(out1.asnumpy(), out3.asnumpy(), rtol=1e-3, atol=1e-3)\n\n\n@use_np\ndef test_fill_shape_deferred():\n net = nn.HybridSequential()\n net.add(nn.Conv2D(64, kernel_size=2, padding=1),\n nn.BatchNorm(),\n nn.Dense(10))\n net\n net.hybridize()\n net.initialize()\n net(mx.np.ones((2,3,5,7)))\n assert net[0].weight.shape[1] == 3, net[0].weight.shape[1]\n assert net[1].gamma.shape[0] == 64, net[1].gamma.shape[0]\n assert net[2].weight.shape[1] == 3072, net[2].weight.shape[1]\n\n\n@use_np\ndef test_dtype():\n net = mx.gluon.model_zoo.vision.resnet18_v1()\n net.initialize()\n net.cast('float64')\n with mx.autograd.record():\n y = net(mx.np.ones((16, 3, 32, 32), dtype='float64'))\n y.backward()\n\n net = mx.gluon.model_zoo.vision.resnet18_v1()\n net.initialize()\n net.hybridize()\n net(mx.np.ones((16, 3, 32, 32), dtype='float32'))\n\n net.cast('float64')\n net(mx.np.ones((16, 3, 32, 32), dtype='float64'))\n\n mx.npx.waitall()\n\n class Net(gluon.Block):\n def __init__(self, in_dim, output_dim):\n super(Net, self).__init__()\n self.embed = gluon.nn.Embedding(input_dim=in_dim, output_dim=output_dim,dtype=onp.float64)\n self.dense = gluon.nn.Dense(2, dtype=onp.float64)\n\n def forward(self, x):\n e = self.embed(x)\n assert(e.dtype == onp.float64)\n y = self.dense(e)\n assert(y.dtype == onp.float64)\n return y\n\n net = Net(5, 10)\n net.initialize()\n out = net(mx.np.ones((3,), dtype=onp.float64))\n mx.npx.waitall()\n\ndef test_fill_shape_load():\n ctx = mx.context.current_context()\n net1 = nn.HybridSequential()\n net1.add(nn.Conv2D(64, kernel_size=2, padding=1),\n nn.BatchNorm(),\n nn.Dense(10))\n net1\n net1.hybridize()\n net1.initialize(ctx=ctx)\n net1(mx.np.ones((2,3,5,7), ctx=ctx))\n net1.save_parameters('net_fill.params')\n\n net2 = nn.HybridSequential()\n net2.add(nn.Conv2D(64, kernel_size=2, padding=1),\n nn.BatchNorm(),\n nn.Dense(10))\n net2.hybridize()\n net2.initialize()\n net2.load_parameters('net_fill.params', ctx)\n assert net2[0].weight.shape[1] == 3, net2[0].weight.shape[1]\n assert net2[1].gamma.shape[0] == 64, net2[1].gamma.shape[0]\n assert net2[2].weight.shape[1] == 3072, net2[2].weight.shape[1]\n\n\ndef test_inline():\n net = mx.gluon.nn.HybridSequential()\n net.add(mx.gluon.nn.Dense(10))\n net.add(mx.gluon.nn.Dense(10))\n net.add(mx.gluon.nn.Dense(10))\n\n net.initialize()\n net.hybridize(inline_limit=3)\n with mx.autograd.record():\n y = net(mx.np.zeros((1,10)))\n\n len_1 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])\n y.backward()\n\n net.hybridize(inline_limit=0)\n with mx.autograd.record():\n y = net(mx.np.zeros((1,10)))\n\n len_2 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])\n y.backward()\n\n assert len_1 == len_2 + 2\n\n\n@xfail_when_nonstandard_decimal_separator\ndef test_activations():\n point_to_validate = mx.np.array([-0.1, 0.1] * 3)\n\n swish = mx.gluon.nn.Swish()\n def swish_test(x):\n return x * mx.npx.sigmoid(x)\n\n for test_point, ref_point in zip(swish_test(point_to_validate), swish(point_to_validate)):\n assert test_point == ref_point\n\n silu = mx.gluon.nn.SiLU()\n def silu_test(x):\n return x * mx.npx.sigmoid(x)\n\n for test_point, ref_point in zip(silu_test(point_to_validate), silu(point_to_validate)):\n assert test_point == ref_point\n\n elu = mx.gluon.nn.ELU()\n def elu_test(x):\n def elu(x):\n return mx.np.expm1(x) if x <= 0.0 else x\n return [elu(x_i) for x_i in x]\n\n for test_point, ref_point in zip(elu_test(point_to_validate), elu(point_to_validate)):\n assert_almost_equal(test_point.asnumpy(), ref_point.asnumpy())\n\n selu = mx.gluon.nn.SELU()\n def selu_test(x):\n def selu(x):\n scale, alpha = 1.0507009873554804934193349852946, 1.6732632423543772848170429916717\n return scale * x if x >= 0 else scale * alpha * mx.np.expm1(x)\n return [selu(x_i) for x_i in x]\n\n for test_point, ref_point in zip(selu_test(point_to_validate), selu(point_to_validate)):\n assert test_point == ref_point\n\n prelu = mx.gluon.nn.PReLU()\n prelu.initialize()\n x = point_to_validate.reshape((1, 3, 2))\n assert_almost_equal(prelu(x).asnumpy(), mx.np.where(x >= 0, x, 0.25 * x).asnumpy())\n\n multichannel_init = mx.initializer.Constant(mx.np.array([0.1, 0.25, 0.5]))\n prelu_multichannel = mx.gluon.nn.PReLU(alpha_initializer=multichannel_init, in_channels=3)\n prelu_multichannel.initialize()\n assert_almost_equal(prelu_multichannel(x).asnumpy(), onp.array([[-0.01, 0.1], [-0.025, 0.1], [-0.05, 0.1]]))\n\n # https://github.com/apache/incubator-mxnet/issues/18381\n # gelu = mx.gluon.nn.GELU()\n # def gelu_test(x):\n # CUBE_CONSTANT = 0.044715\n # ROOT_TWO_OVER_PI = 0.7978845608028654\n # def g(x):\n # return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * x * x * x)\n # def f(x):\n # return 1.0 + mx.nd.tanh(g(x))\n # def gelu(x):\n # return 0.5 * x * f(x)\n # return [gelu(x_i) for x_i in x]\n\n # for test_point, ref_point in zip(gelu_test(point_to_validate), gelu(point_to_validate)):\n # assert test_point == ref_point\n\n\n@use_np\ndef test_dropout():\n def get_slice(x, axis, idx):\n ix = ()\n for i in range(x.ndim):\n if i == axis:\n ix += (idx,)\n else:\n ix += (slice(None, None, None),)\n return x[ix]\n\n def check_dropout_axes(ratio, shape, axes):\n compactshape = list(shape)\n for axis in axes:\n compactshape[axis] = 1\n compactx = mx.np.random.uniform(size=tuple(compactshape))\n broadcastx = compactx.broadcast_to(shape)\n dropouty = mx.gluon.nn.Dropout(rate=ratio, axes=axes)(broadcastx)\n for axis in axes:\n target = get_slice(dropouty, axis, 0).asnumpy()\n for i in range(1, shape[axis]):\n assert(get_slice(dropouty, axis, i).asnumpy() == target).all()\n\n nshape = (10, 10, 10, 10)\n with mx.autograd.train_mode():\n check_dropout_axes(0.25, nshape, axes = (0,))\n check_dropout_axes(0.25, nshape, axes = (1,))\n check_dropout_axes(0.25, nshape, axes = (2,))\n check_dropout_axes(0.25, nshape, axes = (3,))\n check_dropout_axes(0.25, nshape, axes = (0, 1))\n check_dropout_axes(0.25, nshape, axes = (0, 2))\n check_dropout_axes(0.25, nshape, axes = (0, 3))\n check_dropout_axes(0.25, nshape, axes = (1, 2))\n check_dropout_axes(0.25, nshape, axes = (1, 3))\n check_dropout_axes(0.25, nshape, axes = (2, 3))\n check_dropout_axes(0.25, nshape, axes = (0, 1, 2))\n check_dropout_axes(0.25, nshape, axes = (0, 2, 3))\n check_dropout_axes(0.25, nshape, axes = (1, 2, 3))\n\ndef test_req():\n data = mx.np.random.uniform(size=(1,3,224,224))\n label = mx.np.random.uniform(size=(1))\n label[:] = 1\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n\n net = nn.HybridSequential()\n net1 = nn.HybridSequential()\n net1.add(nn.Dense(4))\n net2 = nn.HybridSequential()\n net2.add(nn.Dense(3))\n net2.add(nn.Dense(2))\n net.add(net1)\n net.add(net2)\n net.initialize()\n\n net.hybridize()\n\n for v in net.collect_params().values():\n v.grad_req = 'add'\n\n net.zero_grad()\n with mx.autograd.record():\n pred = net(data)\n l = loss(pred, label)\n l.backward()\n grad = net[0][0].weight.grad().mean().asnumpy()\n # run twice to check req = add\n pred = net(data)\n l = loss(pred, label)\n l.backward()\n\n grad_double = net[0][0].weight.grad().mean().asnumpy()\n assert_almost_equal(grad * 2, grad_double)\n\n\n@use_np\ndef test_save_load(tmpdir):\n net = mx.gluon.model_zoo.vision.get_resnet(1, 18, pretrained=False, root=str(tmpdir))\n net.initialize()\n net(mx.np.ones((1,3,224,224)))\n net.save_parameters(os.path.join(str(tmpdir), 'test_save_load.params'))\n\n net = mx.gluon.model_zoo.vision.get_resnet(1, 18)\n net.output = mx.gluon.nn.Dense(1000)\n\n net.load_parameters(os.path.join(str(tmpdir), 'test_save_load.params'))\n\n class Network(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Network, self).__init__(**kwargs)\n self.encoders = gluon.nn.HybridSequential()\n for _ in range(2):\n lstm = mx.gluon.rnn.LSTM(200, 1, bidirectional=True)\n self.encoders.add(lstm)\n\n def forward(self, x):\n for i in range(2):\n x = self.encoders[i](x)\n return x\n net = Network()\n net.initialize(mx.init.Xavier(), ctx=mx.cpu())\n net.hybridize()\n x = onp.random.rand(32, 10, 10)\n x = mx.np.array(x).as_in_context(mx.cpu())\n net(x)\n # _, param_path = tempfile.mkstemp(suffix='.params', dir=str(tmpdir))\n param_path = os.path.join(str(tmpdir), 'test_save_load_network.params')\n net.save_parameters(param_path)\n net2 = Network()\n net2.load_parameters(param_path)\n\n@use_np\ndef test_save_load_deduplicate_with_shared_params(tmpdir):\n class B(mx.gluon.Block):\n def __init__(self):\n super(B, self).__init__()\n self.weight = gluon.Parameter('weight', shape=(10, 10))\n\n class C(mx.gluon.Block):\n def __init__(self, b1, b2):\n super(C, self).__init__()\n self.b1 = b1\n self.b2 = b2\n\n b1 = B()\n b2 = B().share_parameters(b1.collect_params())\n c = C(b1, b2)\n c.initialize()\n # _, param_path = tempfile.mkstemp(suffix='.params', dir=str(tmpdir))\n param_path = os.path.join(str(tmpdir), 'test_save_load_deduplicate_with_shared_params.params')\n c.save_parameters(param_path, deduplicate=True)\n\n params = mx.npx.load(param_path)\n assert len(params) == 1 # Only a single copy of the shared parameter is saved\n\n b1 = B()\n b2 = B().share_parameters(b1.collect_params())\n c = C(b1, b2)\n c.load_parameters(param_path)\n\n # Test default behavior\n c.save_parameters(param_path, deduplicate=False)\n\n params = mx.npx.load(param_path)\n assert len(params) == 2 # Only a single copy of the shared parameter is saved\n\n b1 = B()\n b2 = B().share_parameters(b1.collect_params())\n c = C(b1, b2)\n c.load_parameters(param_path)\n\n\ndef test_hybrid_multi_context():\n net = mx.gluon.model_zoo.vision.get_resnet(1, 18)\n net.initialize(ctx=[mx.cpu(0), mx.cpu(1)])\n net.hybridize()\n net(mx.np.zeros((1, 3, 32, 32), ctx=mx.cpu(0))).asnumpy()\n\ndef test_zero_grad():\n def _test_grad_reset(ctx, dtype='float32', sparse=False, embeddingType=None):\n data = mx.np.random.uniform(size=(3,3), dtype=dtype, ctx=ctx)\n if embeddingType is None:\n embeddingType = dtype\n net = nn.Embedding(3, 4, sparse_grad=sparse, dtype=embeddingType)\n net.initialize(ctx=ctx)\n with mx.autograd.record():\n l = net(data)\n l.backward()\n net.zero_grad()\n grad = net.collect_params()['weight'].grad()\n assert_almost_equal(grad.asnumpy(), grad.asnumpy() * 0)\n\n def _test_multi_reset(nArrays, dtype, ctx):\n # Construct the list of non-zeros arrays with random shapes\n arr = []\n for _ in range(nArrays):\n arrType = random.choice(dtype) if isinstance(dtype, list) else dtype\n shape = ()\n for _ in range(onp.random.randint(1, 5)):\n shape = shape + (onp.random.randint(1, 10),)\n arr.append(mx.nd.random.uniform(shape=shape, dtype=arrType, ctx=ctx))\n\n # Reset all arrays\n mx.nd.reset_arrays(*arr, num_arrays=len(arr))\n\n # Check results\n for i in range(nArrays):\n grad = arr[i].asnumpy()\n assert_almost_equal(grad, grad * 0)\n\n\n # Setting context for current test\n ctx = mx.context.current_context()\n\n # Launching _test_multi_reset 10 times with different types & randomly chosen nArrays\n testedTypes = ['float16', 'float32', 'float64']\n for _ in range(10):\n for type in [testedTypes] + testedTypes:\n _test_multi_reset(onp.random.randint(1, 50), type, ctx)\n\n with environment('MXNET_STORAGE_FALLBACK_LOG_VERBOSE', '0'):\n for type in ['float16', 'float32', 'float64']:\n for embType in ['float32', 'float64']:\n _test_grad_reset(ctx, dtype=type, sparse=False, embeddingType=embType)\n\n\n@pytest.mark.parametrize('static_alloc', [False, True])\n@pytest.mark.parametrize('static_shape', [False, True])\ndef test_hybrid_static_memory(static_alloc, static_shape):\n if static_shape and not static_alloc:\n pytest.skip()\n x = mx.np.random.uniform(size=(2, 3, 32, 32))\n x.attach_grad()\n\n net = gluon.model_zoo.vision.get_resnet(\n 1, 18, pretrained=False, ctx=mx.context.current_context())\n net.initialize()\n net(x)\n\n def test(net, x):\n with mx.autograd.record():\n y = net(x) + net(x)\n y.backward()\n\n grads = {k: v.grad() for k, v in net.collect_params().items() if v.grad_req != 'null'}\n\n return y, grads\n\n y1, grads1 = test(net, x)\n net.hybridize(static_alloc=static_alloc, static_shape=static_shape)\n y2, grads2 = test(net, x)\n\n assert_almost_equal(y1.asnumpy(), y2.asnumpy(), rtol=1e-3, atol=1e-5)\n for key in grads1:\n assert_almost_equal(grads1[key].asnumpy(), grads2[key].asnumpy(), rtol=1e-3, atol=1e-4)\n\n\n@pytest.mark.parametrize('static_alloc', [False, True])\n@pytest.mark.parametrize('static_shape', [False, True])\ndef test_hybrid_static_memory_switching(static_alloc, static_shape):\n if static_shape and not static_alloc:\n pytest.skip()\n net = gluon.model_zoo.vision.get_resnet(\n 1, 18, pretrained=False, ctx=mx.context.current_context())\n net.initialize()\n net.hybridize(static_alloc=static_alloc, static_shape=static_shape)\n\n x = mx.np.random.uniform(size=(4, 3, 32, 32))\n net(x)\n with mx.autograd.record():\n y = net(x)\n y.backward()\n x = mx.np.random.uniform(size=(2, 3, 32, 32))\n net(x)\n with mx.autograd.record():\n y = net(x)\n y.backward()\n mx.npx.waitall()\n\ndef test_hook():\n global hook_call_count\n hook_call_count = 0\n global pre_hook_call_count\n pre_hook_call_count = 0\n\n def call_hook(block, x, y):\n global hook_call_count\n hook_call_count += 1\n\n def call_pre_hook(block, x):\n global pre_hook_call_count\n pre_hook_call_count += 1\n\n block = nn.Dense(10)\n block.initialize()\n handle = block.register_forward_hook(call_hook)\n pre_handle = block.register_forward_pre_hook(call_pre_hook)\n block(mx.np.ones((3, 5)))\n\n assert hook_call_count == 1\n assert pre_hook_call_count == 1\n\n handle.detach()\n block(mx.np.ones((3, 5)))\n\n assert hook_call_count == 1\n assert pre_hook_call_count == 2\n\n pre_handle.detach()\n block(mx.np.ones((3, 5)))\n assert hook_call_count == 1\n assert pre_hook_call_count == 2\n\n@use_np\ndef test_op_hook_output_names():\n def check_name(block, expected_names, inputs=None, expected_opr_names=None, monitor_all=False):\n opr_names = []\n output_names = []\n\n def mon_callback(node_name, opr_name, arr):\n output_names.append(node_name)\n opr_names.append(opr_name)\n assert isinstance(arr, mx.nd.NDArray)\n\n block.register_op_hook(mon_callback, monitor_all)\n if not inputs:\n block(mx.np.ones((2, 3, 4)))\n else:\n block(inputs)\n\n for output_name, expected_name in zip(output_names, expected_names):\n output_name_list = output_name.split('_')\n output_name_list.pop(1)\n expected_name_list = expected_name.split('_')\n expected_name_list.pop(1)\n assert output_name_list == expected_name_list\n\n if expected_opr_names:\n for opr_name, expected_opr_name in zip(opr_names, expected_opr_names):\n assert opr_name == expected_opr_name\n\n # Test with Dense layer\n model = mx.gluon.nn.HybridSequential()\n model.add(mx.gluon.nn.Dense(2))\n model.initialize()\n model.hybridize()\n check_name(model, [\"node_0_output\"])\n\n # Test with Activation, FListInputNames not registered, input name will have _input appended\n model = mx.gluon.nn.HybridSequential()\n model.add(mx.gluon.nn.Activation(\"relu\"))\n model.initialize()\n model.hybridize()\n check_name(model, [\"node_1_output\"])\n\n # Test with Pooling, monitor_all is set to True\n model = mx.gluon.nn.HybridSequential()\n model.add(mx.gluon.nn.AvgPool1D())\n model.initialize()\n model.hybridize()\n check_name(model, ['node_2_data', 'node_2_output'],\n expected_opr_names=[\"Pooling\"], monitor_all=True)\n\n # stack two layers and test\n model = mx.gluon.nn.HybridSequential()\n model.add(mx.gluon.nn.Dense(2))\n model.add(mx.gluon.nn.Activation(\"relu\"))\n model.initialize()\n model.hybridize()\n check_name(model,\n ['node_3_data', 'node_3_weight',\n 'node_3_bias', 'node_3_output',\n 'node_4_input0', 'node_4_output'], monitor_all=True)\n\n # check with different hybridize modes\n model.hybridize(static_alloc=True)\n check_name(model,\n ['node_5_data', 'node_5_weight',\n 'node_5_bias', 'node_5_output',\n 'node_6_input0', 'node_6_output'], monitor_all=True)\n\ndef test_apply():\n global called_blocks\n called_blocks = []\n\n def record_name(block):\n global called_blocks\n called_blocks.append(type(block))\n\n block = nn.HybridSequential()\n block.add(nn.Dense(10))\n block.add(nn.Dropout(0.5))\n block.apply(record_name)\n\n assert called_blocks == [type(block[0]), type(block[1]), type(block)]\n\n\n@use_np\n@assert_raises_cudnn_not_satisfied(min_version='5.1.10')\ndef test_summary():\n net = gluon.model_zoo.vision.resnet50_v1()\n net.initialize()\n net.summary(mx.np.ones((32, 3, 224, 224)))\n\n net2 = nn.Sequential()\n net2.add(nn.Embedding(40, 30))\n net2.add(gluon.rnn.LSTM(30))\n net2.add(nn.Dense(40, flatten=False).share_parameters(net2[0].params))\n net2.initialize()\n with mx.util.np_shape(True), mx.util.np_array(True):\n net2.summary(mx.np.ones((80, 32)))\n\n net3 = gluon.rnn.LSTM(30)\n net3.initialize()\n begin_state = net3.begin_state(32)\n net3.summary(mx.np.ones((80, 32, 5)), begin_state)\n\n net.hybridize()\n pytest.raises(AssertionError, net.summary, mx.np.ones((32, 3, 224, 224)))\n\n@use_np\n@pytest.mark.skip(reason='Currently, sparse feature is not supported in Gluon2.0')\ndef test_sparse_hybrid_block_grad():\n class Embedding(mx.gluon.HybridBlock):\n def __init__(self, num_tokens, embedding_size):\n super(Embedding, self).__init__()\n self.num_tokens = num_tokens\n\n self.embedding = mx.gluon.nn.Embedding(\n num_tokens, embedding_size, sparse_grad=True)\n\n def forward(self, words):\n emb = self.embedding(words)\n return emb + mx.np.ones_like(emb)\n\n embedding = Embedding(20, 3)\n embedding.initialize()\n embedding.hybridize()\n\n with mx.autograd.record():\n emb0 = embedding(mx.np.arange(10)).sum()\n emb1 = embedding(mx.np.arange(10)).sum()\n loss = emb0 + emb1\n loss.backward()\n grad = embedding.embedding.weight.grad().asnumpy()\n assert (grad[:10] == 2).all()\n assert (grad[10:] == 0).all()\n\n@use_np\n@pytest.mark.skip(reason='Currently, sparse feature is not supported in Gluon2.0')\ndef test_sparse_hybrid_block():\n class Linear(mx.gluon.HybridBlock):\n def __init__(self, units):\n super(Linear, self).__init__()\n self.w = gluon.Parameter('w', shape=(units, units))\n\n def forward(self, x, w):\n return mx.np.dot(x, w)\n\n class SparseBlock(mx.gluon.HybridBlock):\n def __init__(self, units):\n super(SparseBlock, self).__init__()\n self.net = Linear(units)\n\n def forward(self, x):\n return self.net(x) * x\n\n block = SparseBlock(2)\n block.initialize()\n block.hybridize()\n x = mx.np.ones((2,2)).tostype('csr')\n with mx.autograd.record():\n z = block(x) + block(x)\n z.backward()\n assert (block.net.w.grad().asnumpy() == 4).all()\n\ndef test_hybrid_static_memory_recording():\n net = gluon.model_zoo.vision.get_resnet(\n 1, 18, pretrained=False, ctx=mx.context.current_context())\n net.initialize()\n net.hybridize(static_alloc=True)\n\n x = mx.np.random.uniform(size=(1, 3, 32, 32))\n with mx.autograd.record(True):\n net(x)\n net(x)\n\n\n@use_np\ndef test_share_inputs_outputs():\n class TestIOBackward(gluon.HybridBlock):\n def __init__(self):\n super(TestIOBackward, self).__init__()\n\n def forward(self, in1, in2):\n return in1 + in2\n\n class TestIOForward(gluon.HybridBlock):\n def __init__(self):\n super(TestIOForward, self).__init__()\n\n def forward(self, in1):\n return in1\n\n d1 = mx.np.arange(10)\n d2 = mx.np.arange(10)\n\n params=[{'inline_limit':0},\n {'inline_limit':0, 'static_alloc':True},\n {'inline_limit':0, 'static_alloc':True, 'static_shape':True}]\n # Test the case that inputs and outputs of a forward graph share NDArrays.\n for param in params:\n t = TestIOForward()\n t.hybridize(**param)\n for _ in range(5):\n d1.attach_grad()\n out_grad = mx.np.random.uniform(size=(10))\n res = t(d1)\n assert_almost_equal(res.asnumpy(), d1.asnumpy())\n\n # Test the case that inputs and outputs of a backward graph share NDArrays.\n for param in params:\n t = TestIOBackward()\n t.hybridize(**param)\n for _ in range(5):\n d1.attach_grad()\n d2.attach_grad()\n out_grad = mx.np.random.uniform(size=(10))\n with mx.autograd.record():\n res = t(d1, d2)\n res.backward(out_grad=out_grad)\n assert_almost_equal(out_grad.asnumpy(), d1.grad.asnumpy())\n assert_almost_equal(out_grad.asnumpy(), d2.grad.asnumpy())\n\n\n@use_np\ndef test_grad_graph_change():\n class Model(mx.gluon.HybridBlock):\n def forward(self, array, index):\n row = array.take(index)\n return row, index\n array = mx.np.arange(3)\n index = mx.np.array([2])\n array.attach_grad()\n model = Model()\n model.hybridize(inline_limit=0)\n with mx.autograd.record(train_mode=True):\n row, _ = model(array, index)\n row.backward()\n\n\ndef check_layer_forward_withinput(net, x):\n x_hybrid = x.copy()\n x.attach_grad()\n x_hybrid.attach_grad()\n net.initialize()\n with mx.autograd.record():\n out1 = net(x_hybrid)\n out1.backward()\n net.hybridize()\n with mx.autograd.record():\n out2 = net(x)\n out2.backward()\n mx.test_utils.assert_almost_equal(x.grad.asnumpy(), x_hybrid.grad.asnumpy(), rtol=1e-5, atol=1e-6)\n mx.test_utils.assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-5, atol=1e-6)\n\n@use_np\n@pytest.mark.parametrize('chn_num', [16, 256])\n@pytest.mark.parametrize('kernel', [1, 3, 224])\ndef test_conv2d_16c(chn_num, kernel):\n batch_size = 4\n class Net(gluon.HybridBlock):\n def __init__(self,\n chn_num,\n kernel,\n **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = gluon.nn.Conv2D(chn_num, (kernel, kernel))\n\n def forward(self, x):\n out = self.conv0(x)\n return out\n\n x = mx.np.random.uniform(-1.0, 1.0, size=(batch_size, 3, 224, 224))\n net = Net(chn_num, kernel)\n check_layer_forward_withinput(net, x)\n\n@use_np\n@pytest.mark.parametrize('grp', [16])\n@pytest.mark.parametrize('kernel_size', [1, 3])\ndef test_group_conv2d_16c(grp, kernel_size):\n input_size_list = onp.random.randint(low=3, high=65, size=10).tolist()\n batch_size = 4\n class Net(gluon.HybridBlock):\n def __init__(self,\n chn_num,\n kernel,\n **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = gluon.nn.Conv2D(chn_num, (1, 1))\n self.conv1 = gluon.nn.Conv2D(chn_num, (kernel, kernel), groups=chn_num)\n\n def forward(self, x):\n y = self.conv0(x)\n out = self.conv1(y)\n return out\n\n for i in range(len(input_size_list)):\n x = mx.np.random.uniform(-1.0, 1.0, size=(batch_size, 3, input_size_list[i], input_size_list[i]))\n net = Net(grp, kernel_size)\n check_layer_forward_withinput(net, x)\n\n@use_np\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\ndef test_deconv2d_16c():\n in_chn_list = [1024, 512, 256, 128, 64, 32, 16]\n out_chn_list = [512, 256, 128, 64, 32, 16, 3]\n kernel_list = [1, 3, 5, 7]\n in_shape = [4, 8, 16, 32, 64, 224]\n batch_size = 4\n class Net(gluon.HybridBlock):\n def __init__(self, chn_num, kernel, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.deconv0 = gluon.nn.Conv2DTranspose(chn_num, (kernel, kernel))\n\n def forward(self, x):\n out = self.deconv0(x)\n return out\n for i in range(len(in_shape)):\n x = mx.np.random.uniform(-1.0, 1.0, size=(batch_size, in_chn_list[i], in_shape[i], in_shape[i]))\n for j in range(len(kernel_list)):\n net = Net(out_chn_list[i], kernel_list[j])\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\ndef test_batchnorm_16c():\n chn_list = [16, 1024]\n shape = onp.random.randint(low=1, high=300, size=10)\n shape_list = []\n for i in range(len(shape)):\n shape_list.append((shape[i], shape[i]))\n batch_size = 4\n class Net(gluon.HybridBlock):\n def __init__(self,\n chn_num,\n kernel,\n axis,\n **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = gluon.nn.Conv2D(chn_num, (kernel, kernel))\n self.bn0 = gluon.nn.BatchNorm(axis=axis)\n\n def forward(self, x):\n conv = self.conv0(x)\n out = self.bn0(conv)\n return out\n\n for i in range(len(chn_list)):\n for j in range(len(shape_list)):\n shape = (batch_size, ) + (3,) + shape_list[j]\n x = mx.np.random.uniform(-1.0, 1.0, size=shape)\n net = Net(chn_list[i], 1, 1)\n check_layer_forward_withinput(net, x)\n\n\n@use_np\ndef test_batchnorm_chnls():\n chn_list = [1024, 512, 256, 128, 64, 45, 32, 16, 3]\n class Net(gluon.HybridBlock):\n def __init__(self,\n chn_num,\n norm_kwargs=None,\n in_channels=3,\n **kwargs):\n super(Net, self).__init__(**kwargs)\n self.in_channels = in_channels\n self.conv1 = gluon.nn.Conv3D(\n in_channels=self.in_channels,\n channels=chn_num,\n kernel_size=(1, 7, 7),\n strides=(1, 2, 2),\n padding=(0, 3, 3),\n use_bias=False,\n )\n self.bn1 = gluon.nn.BatchNorm(in_channels=chn_num, **({} if norm_kwargs is None else norm_kwargs))\n\n def forward(self, x):\n \"\"\"Hybrid forward of R2+1D net\"\"\"\n conv = self.conv1(x)\n out = self.bn1(conv)\n return out\n\n for i in range(len(chn_list)):\n net = Net(chn_list[i])\n net.initialize(init=init.Constant(1))\n x = mx.np.zeros((1, 3, 8, 160, 160))\n net(x).asnumpy()\n\n\n@use_np\ndef test_concat():\n chn_list = [16, 64]\n shapes = [1, 3, 5]\n input_num = onp.random.randint(low=2, high=11)\n shape_list = []\n for i in range(len(shapes)):\n shape_list.append((shapes[i], shapes[i]))\n batch_size = 4\n class Net(gluon.HybridBlock):\n def __init__(self,\n check_dim,\n input_num,\n chn_num,\n kernel,\n **kwargs):\n super(Net, self).__init__(**kwargs)\n self.concat = nn.HybridConcatenate(axis=check_dim)\n for _ in range(input_num):\n self.concat.add(gluon.nn.Conv2D(chn_num, (kernel, kernel)))\n\n def forward(self, x):\n return self.concat(x)\n\n for _ in range(len(shape_list)):\n shape = (batch_size,) + (3,) + shape_list[i]\n x = mx.np.random.uniform(-1.0, 1.0, size=shape)\n for i in range(len(chn_list)):\n for axis in range(4):\n net = Net(axis, input_num, chn_list[i], 1)\n check_layer_forward_withinput(net, x)\n\n@use_np\ndef test_reshape_conv():\n class Net(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(64, (3, 3))\n\n def forward(self, x):\n x_reshape = x.reshape((-1, 3, 128, 32))\n out = self.conv0(x_reshape)\n return out\n x = mx.np.random.uniform(size=(4, 3, 64, 64))\n net = Net()\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\ndef test_reshape_conv_reshape_conv():\n class Net(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(64, (3, 3))\n self.conv1 = nn.Conv2D(128, (3, 3))\n\n def forward(self, x):\n x_reshape = x.reshape((0, 0, 128, 32))\n y = self.conv0(x_reshape)\n \"spatial shape of y is (62, 62)\"\n y_reshape = y.reshape((0, 0, 124, 31))\n out = self.conv1(y_reshape)\n return out\n x = mx.np.random.uniform(size=(4, 3, 64, 64))\n net = Net()\n check_layer_forward_withinput(net, x)\n\n@use_np\ndef test_slice_conv():\n class Net(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(16, (3, 3))\n\n def forward(self, x):\n x_slice = mx.npx.slice(x, begin=(0, 2, 0, 0), end=(4, 5, 32, 32))\n out = self.conv0(x_slice)\n return out\n x = mx.np.random.uniform(size=(8, 6, 32, 32))\n net = Net()\n check_layer_forward_withinput(net, x)\n\n\n@use_np\ndef test_slice_conv_slice_conv():\n class Net(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(32, (3, 3))\n self.conv1 = nn.Conv2D(16, (1, 1))\n\n def forward(self, x):\n x_slice = mx.npx.slice(x, begin=(0, 0, 0, 0), end=(4, 16, 16, 16))\n y = self.conv0(x_slice)\n \"shape of y is (4, 32, 14, 14)\"\n y_slice = mx.npx.slice(y, begin=(0, 0, 0, 0), end=(4, 16, 3, 3))\n out = self.conv1(y_slice)\n return out\n x = mx.np.random.uniform(size=(4, 32, 32, 32))\n net = Net()\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\ndef test_slice_conv_reshape_conv():\n class Net(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(64, (3, 3))\n self.conv1 = nn.Conv2D(128, (3, 3))\n\n def forward(self, x):\n x_slice = mx.npx.slice(x, begin=(0, 0, 1, 1), end=(4, 16, 33, 33))\n y = self.conv0(x_slice)\n \"shape of y is (4, 64, 30, 30)\"\n y_reshape = y.reshape((0, 0, 60, 15))\n out = self.conv1(y_reshape)\n return out\n\n x = mx.np.random.uniform(size=(4, 32, 64, 64))\n net = Net()\n check_layer_forward_withinput(net, x)\n\n@use_np\ndef test_reshape_conv_slice_conv():\n \"\"\"\n This test will test gluon Conv2d computation with ndarray reshape and slice\n \"\"\"\n class Net(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(16, (3, 3))\n self.conv1 = nn.Conv2D(32, (3, 3))\n\n def forward(self, x):\n x_reshape = x.reshape((-1, 3, 64, 16))\n y = self.conv0(x_reshape)\n \"shape of y is (4, 16, 62, 14)\"\n y_slice = mx.npx.slice(y, begin=(0, 0, 0, 0), end=(2, 16, 14, 14))\n out = self.conv1(y_slice)\n return out\n x = mx.np.random.uniform(size=(4, 3, 32, 32))\n net = Net()\n check_layer_forward_withinput(net, x)\n\n@use_np\ndef test_reshape_dense():\n class Net(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n channel0 = onp.random.randint(1, 17)\n self.dense0 = nn.Dense(channel0)\n\n def forward(self, x):\n x_reshape = x.reshape((8, 64, 128, -1))\n out = self.dense0(x_reshape)\n return out\n\n x = mx.np.random.uniform(size=(4, 32, 64, 64))\n net = Net()\n check_layer_forward_withinput(net, x)\n\n\n@use_np\ndef test_slice_dense():\n class Net(gluon.HybridBlock):\n def __init__(self, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n channel0 = onp.random.randint(1, 17)\n self.dense0 = nn.Dense(channel0)\n self.slice = slice\n\n def forward(self, x):\n x_slice = mx.npx.slice(x, begin=tuple(self.slice[0]),\n end=tuple(self.slice[1]))\n out = self.dense0(x_slice)\n return out\n\n x = mx.np.random.uniform(size=(16, 32, 64, 64))\n slice = [[0, 16, 0, 0], [4, 32, 32, 32]]\n net = Net(slice)\n check_layer_forward_withinput(net, x)\n\n@use_np\ndef test_slice_dense_slice_dense():\n class Net(gluon.HybridBlock):\n def __init__(self, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n channel0 = 32\n channel1 = onp.random.randint(1, 17)\n self.dense0 = nn.Dense(channel0)\n self.dense1 = nn.Dense(channel1)\n self.slice = slice\n\n def forward(self, x):\n x_slice = mx.npx.slice(x, begin=tuple(self.slice[0]), end=tuple(self.slice[1]))\n y = self.dense0(x_slice)\n y_slice = mx.npx.slice(y, begin=(1, 0), end=(3, 10))\n out = self.dense1(y_slice)\n return out\n\n x = mx.np.random.uniform(size=(16, 32, 64, 64))\n slice = [[0, 16, 0, 0], [4, 32, 32, 32]]\n net = Net(slice)\n check_layer_forward_withinput(net, x)\n\n@use_np\ndef test_reshape_dense_reshape_dense():\n class Net(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n channel0 = onp.random.randint(1, 17)\n channel1 = onp.random.randint(1, 33)\n self.dense0 = nn.Dense(channel0)\n self.dense1 = nn.Dense(channel1)\n\n def forward(self, x):\n x_reshape = x.reshape((4, 16, 128, 32))\n y = self.dense0(x_reshape)\n y_reshape = y.reshape((1, -1))\n out = self.dense1(y_reshape)\n return out\n\n x = mx.np.random.uniform(size=(4, 16, 64, 64))\n net = Net()\n check_layer_forward_withinput(net, x)\n\n\n@use_np\ndef test_slice_dense_reshape_dense():\n class Net(gluon.HybridBlock):\n def __init__(self, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n channel0 = onp.random.randint(1, 17)\n channel1 = onp.random.randint(1, 17)\n self.dense0 = nn.Dense(channel0)\n self.dense1 = nn.Dense(channel1)\n self.slice = slice\n\n def forward(self, x):\n x_slice = mx.npx.slice(x, begin=tuple(self.slice[0]), end=tuple(self.slice[1]))\n y = self.dense0(x_slice)\n y_reshape = y.reshape((1, -1))\n out = self.dense1(y_reshape)\n return out\n\n x = mx.np.random.uniform(size=(16, 32, 64, 64))\n slice = [[0, 16, 0, 0], [4, 32, 32, 32]]\n net = Net(slice)\n check_layer_forward_withinput(net, x)\n\n\n@use_np\ndef test_reshape_dense_slice_dense():\n class Net(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n channel0 = 64\n channel1 = onp.random.randint(1, 17)\n self.dense0 = nn.Dense(channel0)\n self.dense1 = nn.Dense(channel1)\n\n def forward(self, x):\n x_reshape = x.reshape((4, 16, 128, 32))\n y = self.dense0(x_reshape)\n y_slice = mx.npx.slice(y, begin=(1, 32), end=(3, 64))\n out = self.dense1(y_slice)\n return out\n\n x = mx.np.random.uniform(size=(4, 16, 64, 64))\n net = Net()\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\ndef test_reshape_batchnorm():\n class Net(gluon.HybridBlock):\n def __init__(self, shape, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(96, (1, 1))\n self.bn0 = nn.BatchNorm()\n self.reshape = shape\n\n def forward(self, x):\n x_in = self.conv0(x)\n x_reshape = x_in.reshape(self.reshape)\n out = self.bn0(x_reshape)\n return out\n\n x = mx.np.random.uniform(size=(4, 32, 64, 64))\n shape = (4, 64, 64, -1)\n net = Net(shape)\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.serial\ndef test_slice_batchnorm():\n class Net(gluon.HybridBlock):\n def __init__(self, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(128, (1, 1))\n self.bn0 = nn.BatchNorm()\n self.slice = slice\n\n def forward(self, x):\n x_in = self.conv0(x)\n x_slice = mx.npx.slice(x_in, begin=tuple(self.slice[0]),\n end=tuple(self.slice[1]))\n out = self.bn0(x_slice)\n return out\n\n x = mx.np.random.uniform(size=(16, 128, 256, 256))\n slice = [[0, 0, 0, 0], [4, 32, 32, 32]]\n net = Net(slice)\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\n@pytest.mark.serial\ndef test_slice_batchnorm_slice_batchnorm():\n class Net(gluon.HybridBlock):\n def __init__(self, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(128, (1, 1))\n self.bn0 = nn.BatchNorm()\n self.bn1 = nn.BatchNorm()\n self.slice = slice\n\n def forward(self, x):\n x_in = self.conv0(x)\n x_slice = mx.npx.slice(x_in, begin=tuple(self.slice[0][0]), end=tuple(self.slice[0][1]))\n y = self.bn0(x_slice)\n y_slice = mx.npx.slice(y, begin=tuple(self.slice[1][0]), end=tuple(self.slice[1][1]))\n out = self.bn1(y_slice)\n return out\n\n x = mx.np.random.uniform(size=(16, 128, 256, 256))\n slice = [[[0, 0, 0, 0], [4, 32, 32, 32]], [[0, 0, 0, 0], [2, 64, 16, 16]]]\n net = Net(slice)\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\ndef test_reshape_batchnorm_reshape_batchnorm():\n class Net(gluon.HybridBlock):\n def __init__(self, shape, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(128, (1, 1))\n self.bn0 = nn.BatchNorm()\n self.bn1 = nn.BatchNorm()\n self.reshape = shape\n\n def forward(self, x):\n x_in = self.conv0(x)\n x_reshape = x_in.reshape(self.reshape[0])\n y = self.bn0(x_reshape)\n y_reshape = y.reshape(self.reshape[1])\n out = self.bn1(y_reshape)\n return out\n\n x = mx.np.random.uniform(size=(4, 32, 64, 64))\n shape = [(4, 64, 64, -1), (4, 128, -1, 32)]\n net = Net(shape)\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.serial\ndef test_slice_batchnorm_reshape_batchnorm():\n class Net(gluon.HybridBlock):\n def __init__(self, shape, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(128, (1, 1))\n self.bn0 = nn.BatchNorm()\n self.bn1 = nn.BatchNorm()\n self.reshape = shape\n self.slice = slice\n\n def forward(self, x):\n x_in = self.conv0(x)\n x_slice = mx.npx.slice(x_in, begin=tuple(self.slice[0]), end=tuple(self.slice[1]))\n y = self.bn0(x_slice)\n y_reshape = y.reshape(self.reshape)\n out = self.bn1(y_reshape)\n return out\n\n x = mx.np.random.uniform(size=(16, 128, 256, 256))\n slice = [[0, 0, 0, 0], [4, 32, 32, 32]]\n shape = (1, 128, 64, -1)\n net = Net(shape, slice)\n check_layer_forward_withinput(net, x)\n\n\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\ndef test_reshape_batchnorm_slice_batchnorm():\n class Net(gluon.HybridBlock):\n def __init__(self, shape, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(128, (1, 1))\n self.bn0 = nn.BatchNorm()\n self.bn1 = nn.BatchNorm()\n self.reshape = shape\n self.slice = slice\n\n def forward(self, x):\n x_in = self.conv0(x)\n x_reshape = x_in.reshape(self.reshape)\n y = self.bn0(x_reshape)\n y_slice = y.slice(begin=tuple(self.slice[0]), end=tuple(self.slice[1]))\n out = self.bn1(y_slice)\n return out\n\n x = mx.np.random.uniform(size=(4, 32, 64, 64))\n slice = [[0, 0, 0, 0], [2, 64, 32, 32]]\n shape = (4, 64, 64, -1)\n net = Net(shape, slice)\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\ndef test_reshape_pooling2d():\n max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))\n avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))\n global_maxpooling = nn.GlobalMaxPool2D()\n global_avgpooling = nn.GlobalAvgPool2D()\n pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]\n class Net(gluon.HybridBlock):\n def __init__(self,\n shape,\n pooling_layer,\n **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.pool0 = pooling_layer\n\n def forward(self, x):\n x_reshape = x.reshape(self.reshape)\n out = self.pool0(x_reshape)\n return out\n\n x = mx.np.random.uniform(size=(4, 32, 32, 32))\n shape = (4, 64, 64, -1)\n for i in range(len(pooling_layers)):\n net = Net(shape, pooling_layers[i])\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.serial\ndef test_slice_pooling2d():\n # transpose shape to bring feature dimension 'c' from 2nd position to last\n def transpose(shape):\n return (shape[0],) + shape[2:] + (shape[1],)\n\n for layout in ['NCHW', 'NHWC']:\n max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1), layout=layout)\n avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1), layout=layout)\n global_maxpooling = nn.GlobalMaxPool2D(layout=layout)\n global_avgpooling = nn.GlobalAvgPool2D(layout=layout)\n pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]\n class Net(gluon.HybridBlock):\n def __init__(self,\n slice,\n pooling_layer,\n **kwargs):\n super(Net, self).__init__(**kwargs)\n self.slice = slice\n self.pool0 = pooling_layer\n\n def forward(self, x):\n x_slice = mx.npx.slice(x, begin=self.slice[0], end=self.slice[1])\n out = self.pool0(x_slice)\n return out\n\n xshape = (16, 128, 256, 256)\n slice_shape = (4, 16, 32, 64)\n if layout == 'NHWC':\n xshape = transpose(xshape)\n slice_shape = transpose(slice_shape)\n x = mx.np.random.uniform(size=xshape)\n slice = [(0, 0, 0, 0), slice_shape]\n for i in range(len(pooling_layers)):\n net = Net(slice, pooling_layers[i])\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\ndef test_reshape_pooling2d_reshape_pooling2d():\n max_pooling = nn.MaxPool2D(strides=(2, 2), padding=(1, 1))\n avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))\n global_maxpooling = nn.GlobalMaxPool2D()\n global_avgpooling = nn.GlobalAvgPool2D()\n pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]\n class Net(gluon.HybridBlock):\n def __init__(self,\n shape,\n pooling_layer1,\n pooling_layer2,\n **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.pool0 = pooling_layer1\n self.pool1 = pooling_layer2\n\n def forward(self, x):\n x_reshape = x.reshape(self.reshape[0])\n y = self.pool0(x_reshape)\n y_reshape = y.reshape(self.reshape[1])\n out = self.pool1(y_reshape)\n return out\n\n x = mx.np.random.uniform(size=(16, 128, 256, 256))\n shape = [(128, 256, 64, -1), (128, 256, 11, -1)]\n for i in range(len(pooling_layers)):\n for j in range(len(pooling_layers)):\n if isinstance(pooling_layers[i], (nn.GlobalMaxPool2D, nn.GlobalAvgPool2D)):\n shape[1] = (256, 128, 1, 1)\n net = Net(shape, pooling_layers[i], pooling_layers[j])\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.serial\ndef test_slice_pooling2d_slice_pooling2d():\n max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))\n avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))\n global_maxpooling = nn.GlobalMaxPool2D()\n global_avgpooling = nn.GlobalAvgPool2D()\n pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]\n class Net(gluon.HybridBlock):\n def __init__(self,\n slice,\n pooling_layer1,\n pooling_layer2,\n **kwargs):\n super(Net, self).__init__(**kwargs)\n self.slice = slice\n self.pool0 = pooling_layer1\n self.pool1 = pooling_layer2\n\n def forward(self, x):\n x_slice = mx.npx.slice(x, begin=self.slice[0][0], end=self.slice[0][1])\n y = self.pool0(x_slice)\n y_slice = mx.npx.slice(y, begin=self.slice[1][0], end=self.slice[1][1])\n out = self.pool1(y_slice)\n return out\n\n x = mx.np.random.uniform(size=(16, 128, 256, 256))\n slice = [[(8, 0, 100, 50), (16, -1, -1, -1)], [(0, 64, 0, 50), (2, -1, -1, -1)]]\n for i in range(len(pooling_layers)):\n for j in range(len(pooling_layers)):\n if isinstance(pooling_layers[i], (nn.GlobalMaxPool2D, nn.GlobalAvgPool2D)):\n slice[1] = [(0, 64, 0, 0), (2, -1, 1, 1)]\n net = Net(slice, pooling_layers[i], pooling_layers[j])\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\ndef test_slice_pooling2d_reshape_pooling2d():\n max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))\n avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))\n global_maxpooling = nn.GlobalMaxPool2D()\n global_avgpooling = nn.GlobalAvgPool2D()\n pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]\n class Net(gluon.HybridBlock):\n def __init__(self,\n shape,\n slice,\n pooling_layer1,\n pooling_layer2,\n **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.slice = slice\n self.pool0 = pooling_layer1\n self.pool1 = pooling_layer2\n\n def forward(self, x):\n x_slice = x.slice(begin=self.slice[0], end=self.slice[1])\n y = self.pool0(x_slice)\n y_reshape = y.reshape(self.reshape)\n out = self.pool1(y_reshape)\n return out\n\n x = mx.np.random.uniform(size=(16, 128, 256, 256))\n slice = [(8, 0, 100, 50), (16, 128, 256, 256)]\n shape = (32, -1, 0, 0)\n for i in range(len(pooling_layers)):\n for j in range(len(pooling_layers)):\n net = Net(shape, slice, pooling_layers[i], pooling_layers[j])\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\n@pytest.mark.serial\ndef test_reshape_pooling2d_slice_pooling2d():\n max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))\n avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))\n global_maxpooling = nn.GlobalMaxPool2D()\n global_avgpooling = nn.GlobalAvgPool2D()\n pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]\n class Net(gluon.HybridBlock):\n def __init__(self,\n shape,\n slice,\n pooling_layer1,\n pooling_layer2,\n **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.slice = slice\n self.pool0 = pooling_layer1\n self.pool1 = pooling_layer2\n\n def forward(self, x):\n x_reshape = x.reshape(self.reshape)\n y = self.pool0(x_reshape)\n y_slice = y.slice(begin=self.slice[0], end=self.slice[1])\n out = self.pool1(y_slice)\n return out\n\n x = mx.np.random.uniform(size=(16, 128, 256, 256))\n shape = (0, 512, 64, -1)\n slice = [(8, 256, 10, 20), (-1, -1, -1, 70)]\n for i in range(len(pooling_layers)):\n for j in range(len(pooling_layers)):\n if isinstance(pooling_layers[i], (nn.GlobalMaxPool2D, nn.GlobalAvgPool2D)):\n slice = [(8, 256, 0, 0), (-1, -1, 1, 1)]\n net = Net(shape, slice, pooling_layers[i], pooling_layers[j])\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\n@pytest.mark.serial\ndef test_reshape_deconv():\n class Net(gluon.HybridBlock):\n def __init__(self, shape, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.conv0 = nn.Conv2DTranspose(64, (3, 3))\n\n def forward(self, x):\n x_reshape = x.reshape(self.reshape)\n out = self.conv0(x_reshape)\n return out\n x = mx.np.random.uniform(size=(4, 16, 32, 32))\n shape = (4, 16, 64, -1)\n net = Net(shape)\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\n@pytest.mark.serial\ndef test_slice_deconv():\n class Net(gluon.HybridBlock):\n def __init__(self, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.slice = slice\n self.conv0 = nn.Conv2DTranspose(64, (3, 3))\n\n def forward(self, x):\n x_slice = x.slice(begin=self.slice[0], end=self.slice[1])\n out = self.conv0(x_slice)\n return out\n x = mx.np.random.uniform(size=(8, 32, 64, 64))\n slice = [(0, 16, 0, 0), (4, 32, 32, 32)]\n net = Net(slice)\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\n@pytest.mark.serial\ndef test_reshape_deconv_reshape_deconv():\n class Net(gluon.HybridBlock):\n def __init__(self, shape, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.conv0 = nn.Conv2DTranspose(32, (3, 3))\n self.conv1 = nn.Conv2DTranspose(64, (3, 3), strides=(2, 2))\n\n def forward(self, x):\n x_reshape = x.reshape(self.reshape[0])\n y = self.conv0(x_reshape)\n \"shape of y is (4, 32, 66, 18)\"\n y_reshape = y.reshape(self.reshape[1])\n out = self.conv1(y_reshape)\n return out\n x = mx.np.random.uniform(size=(4, 16, 32, 32))\n shape = [(4, 16, 64, -1), (4, 32, 33, -1)]\n net = Net(shape)\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\n@pytest.mark.serial\ndef test_slice_deconv_slice_deconv():\n class Net(gluon.HybridBlock):\n def __init__(self, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.slice = slice\n self.conv0 = nn.Conv2DTranspose(32, (3, 3))\n self.conv1 = nn.Conv2DTranspose(64, (3, 3), strides=(2, 2))\n\n def forward(self, x):\n x_slice = x.slice(begin=self.slice[0][0], end=self.slice[0][1])\n y = self.conv0(x_slice)\n \"shape of y is (4, 32, 66, 18)\"\n y_slice = y.slice(begin=self.slice[1][0], end=self.slice[1][1])\n out = self.conv1(y_slice)\n return out\n x = mx.np.random.uniform(size=(8, 32, 64, 64))\n slice = [[(0, 0, 0, 0), (4, 16, 32, 32)], [(0, 0, 0, 0), (2, 16, 16, 16)]]\n net = Net(slice)\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\n@pytest.mark.serial\ndef test_reshape_deconv_slice_deconv():\n class Net(gluon.HybridBlock):\n def __init__(self, shape, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.slice = slice\n self.conv0 = nn.Conv2DTranspose(32, (3, 3))\n self.conv1 = nn.Conv2DTranspose(64, (3, 3), strides=(2, 2))\n\n def forward(self, x):\n x_reshape = x.reshape(self.reshape)\n y = self.conv0(x_reshape)\n \"shape of y is (4, 32, 66, 18)\"\n y_slice = y.slice(begin=self.slice[0], end=self.slice[1])\n out = self.conv1(y_slice)\n return out\n x = mx.np.random.uniform(size=(4, 16, 32, 32))\n shape = (4, 16, 64, -1)\n slice = [(0, 0, 0, 0), (2, 16, 16, 16)]\n net = Net(shape, slice)\n check_layer_forward_withinput(net, x)\n\n@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/11164')\n@pytest.mark.serial\ndef test_slice_deconv_reshape_deconv():\n class Net(gluon.HybridBlock):\n def __init__(self, shape, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.slice = slice\n self.conv0 = nn.Conv2DTranspose(32, (3, 3))\n self.conv1 = nn.Conv2DTranspose(96, (3, 3), strides=(2, 2))\n\n def forward(self, x):\n x_slice = x.slice(begin=self.slice[0], end=self.slice[1])\n y = self.conv0(x_slice)\n \"shape of y is (4, 32, 34, 34)\"\n y_reshape = y.reshape(self.reshape)\n out = self.conv1(y_reshape)\n return out\n x = mx.np.random.uniform(size=(8, 32, 64, 64))\n shape = (4, 64, 34, -1)\n slice = [(4, 0, 0, 0), (8, 16, 32, 32)]\n net = Net(shape, slice)\n check_layer_forward_withinput(net, x)\n\n@use_np\n@pytest.mark.serial\ndef test_reshape_activation():\n class Net(gluon.HybridBlock):\n def __init__(self, act, shape, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.act = nn.Activation(act)\n\n def forward(self, x):\n x_reshape = x.reshape(self.reshape)\n out = self.act(x_reshape)\n return out\n acts = [\"relu\", \"sigmoid\", \"tanh\", \"softrelu\", \"softsign\"]\n for act in acts:\n x = mx.np.random.uniform(-1, 1, size=(4, 16, 32, 32))\n shape = (4, 32, 32, -1)\n net = Net(act, shape)\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.serial\ndef test_slice_activation():\n class Net(gluon.HybridBlock):\n def __init__(self, act, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.slice = slice\n self.act = nn.Activation(act)\n\n def forward(self, x):\n x_slice = mx.npx.slice(x, begin=self.slice[0], end=self.slice[1])\n out = self.act(x_slice)\n return out\n\n acts = [\"relu\", \"sigmoid\", \"tanh\", \"softrelu\", \"softsign\"]\n for act in acts:\n x = mx.np.random.uniform(-1, 1, size=(8, 32, 64, 64))\n slice = [(0, 16, 32, 32), (4, 32, 64, 64)]\n net = Net(act, slice)\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.serial\ndef test_reshape_activation_reshape_activation():\n class Net(gluon.HybridBlock):\n def __init__(self, act0, act1, shape, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.act0 = nn.Activation(act0)\n self.act1 = nn.Activation(act1)\n\n def forward(self, x):\n x_reshape = x.reshape(self.reshape[0])\n y = self.act0(x_reshape)\n y_reshape = y.reshape(self.reshape[1])\n out = self.act1(y_reshape)\n return out\n acts = [\"relu\", \"sigmoid\", \"tanh\", \"softrelu\", \"softsign\"]\n for idx0, act0 in enumerate(acts):\n for idx1, act1 in enumerate(acts):\n if idx1 == idx0:\n continue\n x = mx.np.random.uniform(-1, 1, size=(4, 16, 32, 32))\n shape = [(4, 32, 32, -1), (4, 32, 16, -1)]\n net = Net(act0, act1, shape)\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.serial\ndef test_slice_activation_slice_activation():\n class Net(gluon.HybridBlock):\n def __init__(self, act0, act1, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.slice = slice\n self.act0 = nn.Activation(act0)\n self.act1 = nn.Activation(act1)\n\n def forward(self, x):\n x_slice = mx.npx.slice(x, begin=self.slice[0][0], end=self.slice[0][1])\n y = self.act0(x_slice)\n y_slice = mx.npx.slice(y, begin=self.slice[1][0], end=self.slice[1][1])\n out = self.act1(y_slice)\n return out\n acts = [\"relu\", \"sigmoid\", \"tanh\", \"softrelu\", \"softsign\"]\n for idx0, act0 in enumerate(acts):\n for idx1, act1 in enumerate(acts):\n if idx1 == idx0:\n continue\n x = mx.np.random.uniform(-1, 1, size=(8, 32, 64, 64))\n slice = [[(0, 16, 32, 32), (4, 32, 64, 64)], [(2, 0, 16, 16), (4, 16, 32, 32)]]\n net = Net(act0, act1, slice)\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.serial\ndef test_reshape_activation_slice_activation():\n class Net(gluon.HybridBlock):\n def __init__(self, act0, act1, shape, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.slice = slice\n self.act0 = nn.Activation(act0)\n self.act1 = nn.Activation(act1)\n\n def forward(self, x):\n x_reshape = x.reshape(self.reshape)\n y = self.act0(x_reshape)\n y_slice = mx.npx.slice(y, begin=self.slice[0], end=self.slice[1])\n out = self.act1(y_slice)\n return out\n acts = [\"relu\", \"sigmoid\", \"tanh\", \"softrelu\", \"softsign\"]\n for idx0, act0 in enumerate(acts):\n for idx1, act1 in enumerate(acts):\n if idx1 == idx0:\n continue\n x = mx.np.random.uniform(-1, 1, size=(4, 16, 32, 32))\n shape = (4, 32, 32, -1)\n slice = [(0, 0, 0, 0), (2, 16, 16, 16)]\n net = Net(act0, act1, shape, slice)\n check_layer_forward_withinput(net, x)\n\n\n@use_np\n@pytest.mark.serial\ndef test_slice_activation_reshape_activation():\n class Net(gluon.HybridBlock):\n def __init__(self, act0, act1, shape, slice, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.reshape = shape\n self.slice = slice\n self.act0 = nn.Activation(act0)\n self.act1 = nn.Activation(act1)\n\n def forward(self, x):\n x_slice = mx.npx.slice(x, begin=self.slice[0], end=self.slice[1])\n y = self.act0(x_slice)\n y_reshape = y.reshape(self.reshape)\n out = self.act1(y_reshape)\n return out\n acts = [\"relu\", \"sigmoid\", \"tanh\", \"softrelu\", \"softsign\"]\n for idx0, act0 in enumerate(acts):\n for idx1, act1 in enumerate(acts):\n if idx1 == idx0:\n continue\n x = mx.np.random.uniform(-1, 1, size=(8, 32, 64, 64))\n slice = [(0, 16, 32, 32), (4, 32, 64, 64)]\n shape = (4, 32, 32, -1)\n net = Net(act0, act1, shape, slice)\n check_layer_forward_withinput(net, x)\n\n@use_np\n@pytest.mark.serial\ndef test_np_shape_parameters():\n class Foo(gluon.Block):\n def __init__(self, **kwargs):\n super(Foo, self).__init__(**kwargs)\n self.dense = gluon.nn.Dense(16)\n def forward(self, x):\n return self.dense(x)\n\n with mx.np_shape(True):\n z = mx.np.zeros((2,2016))\n print(z.shape)\n foo = Foo()\n foo.initialize()\n print(foo(z).shape)\n\ndef test_gluon_param_load():\n net = mx.gluon.nn.Dense(10, in_units=10)\n net.initialize()\n net.save_parameters('test_gluon_param_load.params')\n net.cast('float16')\n net.load_parameters('test_gluon_param_load.params', cast_dtype=True)\n mx.npx.waitall()\n\ndef test_gluon_param_load_dtype_source():\n net = mx.gluon.nn.Dense(10, in_units=10)\n net.initialize()\n net.cast('float16')\n net.save_parameters('test_gluon_param_load_dtype_source.params')\n net.cast('float32')\n net.load_parameters('test_gluon_param_load_dtype_source.params', cast_dtype=True, dtype_source=\"saved\")\n assert net.weight.dtype == onp.float16\n mx.npx.waitall()\n\n@use_np\ndef test_squeeze_consistency():\n class Foo(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Foo, self).__init__(**kwargs)\n\n def forward(self, x):\n return x.squeeze()\n\n block = Foo()\n block.hybridize()\n shape = (onp.random.randint(1, 10), onp.random.randint(1, 10), 1)\n block(mx.np.ones(shape))\n\ndef test_shared_parameters_with_non_default_initializer():\n class MyBlock(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(MyBlock, self).__init__(**kwargs)\n\n self.param = gluon.Parameter(shape=(1, ), init=mx.init.Constant(-10.0))\n\n bl = MyBlock()\n bl2 = MyBlock().share_parameters(bl.collect_params())\n assert bl.param is bl2.param\n bl3 = MyBlock()\n assert bl.param is not bl3.param\n assert bl.param.init == bl3.param.init\n\n@use_np\ndef test_reqs_switching_training_inference():\n class Foo(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Foo, self).__init__(**kwargs)\n\n def forward(self, x):\n y = 2 * x\n return mx.np.sqrt(x) + mx.np.sqrt(y)\n\n f = Foo()\n f.hybridize(static_alloc=True)\n x = mx.np.ones(shape=(10,10))\n x.attach_grad()\n x2 = mx.np.ones(shape=x.shape) * 2\n x2.attach_grad()\n\n # Call first in training mode\n with mx.autograd.record():\n y = f(x)\n y.backward()\n\n grad1 = x.grad.asnumpy()\n\n # Compute the gradient with some other input\n with mx.autograd.record():\n y = f(x2)\n y.backward()\n\n # Call inference mode\n y = f(x)\n\n # Call training mode again\n with mx.autograd.record():\n y = f(x)\n y.backward()\n\n grad2 = x.grad.asnumpy()\n\n mx.test_utils.assert_almost_equal(grad1, grad2)\n\n\n@pytest.mark.usefixtures(\"check_leak_ndarray\")\ndef test_no_memory_leak_in_gluon():\n class MyNet(mx.gluon.Block):\n def __init__(self):\n super().__init__()\n self.net = mx.gluon.nn.Dense(10, in_units=10)\n net = MyNet()\n net.initialize()\n\ndef test_DeformableConvolution():\n \"\"\"test of the deformable convolution layer with possible combinations of arguments,\n currently this layer only supports gpu\n \"\"\"\n try:\n ctx = mx.gpu()\n _ = mx.np.array([0], ctx=ctx)\n except mx.base.MXNetError:\n pytest.skip(\"deformable_convolution only supports GPU\")\n net = nn.HybridSequential()\n net.add(\n nn.DeformableConvolution(10, kernel_size=(3, 3), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False, num_deformable_group=4),\n )\n\n net.initialize(force_reinit=True, ctx=ctx)\n net.hybridize()\n\n x = mx.np.random.uniform(size=(8, 5, 30, 31), ctx=ctx)\n with mx.autograd.record():\n y = net(x)\n y.backward()\n\ndef test_ModulatedDeformableConvolution():\n \"\"\"test of the deformable convolution layer with possible combinations of arguments,\n currently this layer only supports gpu\n \"\"\"\n net = nn.HybridSequential()\n net.add(\n nn.DeformableConvolution(10, kernel_size=(3, 3), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(1, 1), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(5, 5), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(3, 5), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(5, 1), strides=1, padding=0, num_deformable_group=2),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False, num_deformable_group=4),\n )\n\n ctx = default_context()\n net.initialize(force_reinit=True, ctx=ctx)\n net.hybridize()\n\n x = mx.np.random.uniform(size=(8, 5, 30, 31), ctx=ctx)\n with mx.autograd.record():\n y = net(x)\n\n\n@use_np\n@pytest.mark.parametrize('dc', [True, False])\n@pytest.mark.parametrize('hybridize', [True, False])\n@pytest.mark.garbage_expected\ndef test_concatenate(dc, hybridize):\n if dc:\n class MyBlock(mx.gluon.HybridBlock):\n def __init__(self, units, activation=None, in_units=0):\n super().__init__()\n self.dense = mx.gluon.nn.Dense(units, activation=activation, in_units=in_units)\n\n def forward(self, x):\n return self.dense(x)\n else:\n MyBlock = nn.Dense\n\n model = nn.HybridConcatenate(axis=1)\n model.add(MyBlock(128, activation='tanh', in_units=10))\n model.add(MyBlock(64, activation='tanh', in_units=10))\n model.add(MyBlock(32, in_units=10))\n model2 = nn.Concatenate(axis=1)\n model2.add(MyBlock(128, activation='tanh', in_units=10))\n model2.add(MyBlock(64, activation='tanh', in_units=10))\n model2.add(MyBlock(32, in_units=10))\n\n # ndarray\n model.initialize(mx.init.Xavier(magnitude=2.24))\n model2.initialize(mx.init.Xavier(magnitude=2.24))\n if hybridize:\n model.hybridize()\n model2.hybridize()\n x = model(mx.np.zeros((32, 10)))\n x2 = model2(mx.np.zeros((32, 10)))\n assert x.shape == (32, 224)\n assert x2.shape == (32, 224)\n x.wait_to_read()\n x2.wait_to_read()\n\ndef test_identity():\n model = nn.Identity()\n x = mx.np.random.uniform(size=(128, 33, 64))\n assert_almost_equal(model(x), x)\n\ndef test_pixelshuffle1d():\n nchan = 2\n up_x = 2\n nx = 3\n shape_before = (1, nchan * up_x, nx)\n shape_after = (1, nchan, nx * up_x)\n layer = nn.PixelShuffle1D(up_x)\n x = mx.np.arange(onp.prod(shape_before)).reshape(shape_before)\n y = layer(x)\n assert y.shape == shape_after\n assert_allclose(\n y,\n [[[0, 3, 1, 4, 2, 5],\n [6, 9, 7, 10, 8, 11]]]\n )\n\ndef test_pixelshuffle2d():\n nchan = 2\n up_x = 2\n up_y = 3\n nx = 2\n ny = 3\n shape_before = (1, nchan * up_x * up_y, nx, ny)\n shape_after = (1, nchan, nx * up_x, ny * up_y)\n layer = nn.PixelShuffle2D((up_x, up_y))\n x = mx.np.arange(onp.prod(shape_before)).reshape(shape_before)\n y = layer(x)\n assert y.shape == shape_after\n # - Channels are reshaped to form 2x3 blocks\n # - Within each block, the increment is `nx * ny` when increasing the column\n # index by 1\n # - Increasing the block index adds an offset of 1\n # - Increasing the channel index adds an offset of `nx * up_x * ny * up_y`\n assert_allclose(\n y,\n [[[[ 0, 6, 12, 1, 7, 13, 2, 8, 14],\n [18, 24, 30, 19, 25, 31, 20, 26, 32],\n [ 3, 9, 15, 4, 10, 16, 5, 11, 17],\n [21, 27, 33, 22, 28, 34, 23, 29, 35]],\n\n [[36, 42, 48, 37, 43, 49, 38, 44, 50],\n [54, 60, 66, 55, 61, 67, 56, 62, 68],\n [39, 45, 51, 40, 46, 52, 41, 47, 53],\n [57, 63, 69, 58, 64, 70, 59, 65, 71]]]]\n )\n\ndef test_pixelshuffle3d():\n nchan = 1\n up_x = 2\n up_y = 1\n up_z = 2\n nx = 2\n ny = 3\n nz = 4\n shape_before = (1, nchan * up_x * up_y * up_z, nx, ny, nz)\n shape_after = (1, nchan, nx * up_x, ny * up_y, nz * up_z)\n layer = nn.PixelShuffle3D((up_x, up_y, up_z))\n x = mx.np.arange(onp.prod(shape_before)).reshape(shape_before)\n y = layer(x)\n assert y.shape == shape_after\n # - Channels are reshaped to form 2x1x2 blocks\n # - Within each block, the increment is `nx * ny * nz` when increasing the\n # column index by 1, e.g. the block [[[ 0, 24]], [[48, 72]]]\n # - Increasing the block index adds an offset of 1\n assert_allclose(\n y,\n [[[[[ 0, 24, 1, 25, 2, 26, 3, 27],\n [ 4, 28, 5, 29, 6, 30, 7, 31],\n [ 8, 32, 9, 33, 10, 34, 11, 35]],\n\n [[48, 72, 49, 73, 50, 74, 51, 75],\n [52, 76, 53, 77, 54, 78, 55, 79],\n [56, 80, 57, 81, 58, 82, 59, 83]],\n\n [[12, 36, 13, 37, 14, 38, 15, 39],\n [16, 40, 17, 41, 18, 42, 19, 43],\n [20, 44, 21, 45, 22, 46, 23, 47]],\n\n [[60, 84, 61, 85, 62, 86, 63, 87],\n [64, 88, 65, 89, 66, 90, 67, 91],\n [68, 92, 69, 93, 70, 94, 71, 95]]]]]\n )\n"
] |
[
[
"numpy.array",
"numpy.random.rand",
"numpy.asarray",
"numpy.ones",
"numpy.prod",
"numpy.random.randint"
]
] |
voxie-viewer/voxie
|
[
"d2b5e6760519782e9ef2e51f5322a3baa0cb1198",
"d2b5e6760519782e9ef2e51f5322a3baa0cb1198"
] |
[
"filters/digitalvolumecorrelation/perftest/fftgputest/plot_timing.py",
"filters/tomopy_misc_phantom.py"
] |
[
"from pathlib import Path\n#\n# Copyright (c) 2014-2022 The Voxie Authors\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nFFT_SIZE = 32\nSEP = '\\n'\nPATH_TEST = Path.cwd() / 'filters/digitalvolumecorrelation/perftest/fftgputest'\nPATH_TIMING_DATA = PATH_TEST / f'timing_data/{FFT_SIZE}'\nPATH_FIGURE = PATH_TEST / f'fft_runtime_plot_{FFT_SIZE}.eps'\n\nCOLORS = ('#7570b3', '#1b9e77', '#d95f02')\nMODULE_NAMES = ('gpu_reikna', 'gpu_gpyfft', 'cpu')\n\n\ndef main():\n matplotlib.rcParams['figure.figsize'] = [8, 8]\n\n datasets = []\n\n for module_name in MODULE_NAMES:\n\n test_name = module_name + '_test_'\n\n filepaths = sorted(PATH_TIMING_DATA.glob(f'profile_{module_name}*'),\n key=lambda p: p.name.split(test_name)[1]\n .split('_')[0].zfill(4))\n data = [np.fromfile(str(p), sep=SEP) for p in filepaths]\n datasets.append(data)\n\n # means = np.mean(datasets, axis=2)\n # for i in range(means.shape[1]):\n # reikna = means[0, i]\n # cpu = means[2, i]\n\n # print(2**i, cpu/reikna)\n\n axes = plt.subplot()\n\n sizes = [p.name.split(test_name)[1].split('_')[0]\n for p in filepaths]\n\n bp1 = axes.boxplot(datasets[0],\n positions=np.arange(len(datasets[0])) * 2.0 - 0.6,\n widths=0.6, sym='',\n showbox=False, showcaps=False, showfliers=False)\n bp2 = axes.boxplot(datasets[1],\n positions=np.arange(len(datasets[1])) * 2.0,\n widths=0.6, sym='',\n showbox=False, showcaps=False, showfliers=False)\n bp3 = axes.boxplot(datasets[2],\n positions=np.arange(len(datasets[2])) * 2.0 + 0.6,\n widths=0.6, sym='',\n showbox=False, showcaps=False, showfliers=False)\n\n def color_box_plot(bp, color):\n plt.setp(bp['boxes'], color=color)\n plt.setp(bp['whiskers'], color=color)\n plt.setp(bp['medians'], color=color)\n plt.setp(bp['caps'], color=color)\n\n for bp, color in zip((bp1, bp2, bp3), COLORS):\n color_box_plot(bp, color)\n\n axes.set_yscale('log')\n axes.set_yticks([0.00001, 0.001, 0.01, 0.1, 1, 10, 10])\n # axes.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())\n axes.get_yaxis().set_major_formatter(\n matplotlib.ticker.StrMethodFormatter('{x:,.3f}'))\n axes.set_ylim([0.0001, 100.0])\n axes.set_ylabel('runtime in s')\n axes.set_xlabel('batch-size')\n axes.set_xticklabels(sizes)\n axes.set_xticks(range(0, len(sizes) * 2, 2))\n # axes.set_title(f'Batched 3D FFT of size {FFT_SIZE} runtime comparison')\n\n for module, color in zip(MODULE_NAMES, COLORS):\n axes.plot([], c=color, label=module.replace('_', '-'))\n\n axes.legend()\n\n plt.tight_layout()\n plt.savefig(str(PATH_FIGURE), transparent=True)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file has been modified to remove some dependencies and functionality\n\n# #########################################################################\n# Copyright (c) 2015-2019, UChicago Argonne, LLC. All rights reserved. #\n# #\n# Copyright 2015-2019. UChicago Argonne, LLC. This software was produced #\n# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #\n# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #\n# U.S. Department of Energy. The U.S. Government has rights to use, #\n# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #\n# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #\n# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #\n# modified to produce derivative works, such modified software should #\n# be clearly marked, so as not to confuse it with the version available #\n# from ANL. #\n# #\n# Additionally, redistribution and use in source and binary forms, with #\n# or without modification, are permitted provided that the following #\n# conditions are met: #\n# #\n# * Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# * Redistributions in binary form must reproduce the above copyright #\n# notice, this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# * Neither the name of UChicago Argonne, LLC, Argonne National #\n# Laboratory, ANL, the U.S. Government, nor the names of its #\n# contributors may be used to endorse or promote products derived #\n# from this software without specific prior written permission. #\n# #\n# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #\n# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\n# POSSIBILITY OF SUCH DAMAGE. #\n# #########################################################################\n\n\"\"\"\nModule for generating synthetic phantoms.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\n\n\n__author__ = \"Doga Gursoy\"\n__copyright__ = \"Copyright (c) 2015, UChicago Argonne, LLC.\"\n__docformat__ = 'restructuredtext en'\n__all__ = ['shepp3d',\n 'phantom']\n\ndef _totuple(size, dim):\n \"\"\"\n Converts size to tuple.\n \"\"\"\n if not isinstance(size, tuple):\n if dim == 2:\n size = (size, size)\n elif dim == 3:\n size = (size, size, size)\n return size\n\n\ndef shepp3d(size=128, dtype='float32'):\n \"\"\"\n Load 3D Shepp-Logan image array.\n\n Parameters\n ----------\n size : int or tuple, optional\n Size of the 3D data.\n dtype : str, optional\n The desired data-type for the array.\n\n Returns\n -------\n ndarray\n Output 3D test image.\n \"\"\"\n size = _totuple(size, 3)\n shepp_params = _array_to_params(_get_shepp_array())\n return phantom(size, shepp_params, dtype).clip(0, np.inf)\n\n\ndef phantom(size, params, dtype='float32'):\n \"\"\"\n Generate a cube of given size using a list of ellipsoid parameters.\n\n Parameters\n ----------\n size: tuple of int\n Size of the output cube.\n params: list of dict\n List of dictionaries with the parameters defining the ellipsoids\n to include in the cube.\n dtype: str, optional\n Data type of the output ndarray.\n\n Returns\n -------\n ndarray\n 3D object filled with the specified ellipsoids.\n \"\"\"\n # instantiate ndarray cube\n obj = np.zeros(size, dtype=dtype)\n\n # define coords\n coords = _define_coords(size)\n\n # recursively add ellipsoids to cube\n for param in params:\n _ellipsoid(param, out=obj, coords=coords)\n return obj\n\n\ndef _ellipsoid(params, shape=None, out=None, coords=None):\n \"\"\"\n Generate a cube containing an ellipsoid defined by its parameters.\n If out is given, fills the given cube instead of creating a new one.\n \"\"\"\n # handle inputs\n if shape is None and out is None:\n raise ValueError(\"You need to set shape or out\")\n if out is None:\n out = np.zeros(shape)\n if shape is None:\n shape = out.shape\n if len(shape) == 1:\n shape = shape, shape, shape\n elif len(shape) == 2:\n shape = shape[0], shape[1], 1\n elif len(shape) > 3:\n raise ValueError(\"input shape must be lower or equal to 3\")\n if coords is None:\n coords = _define_coords(shape)\n\n # rotate coords\n coords = _transform(coords, params)\n\n # recast as ndarray\n coords = np.asarray(coords)\n np.square(coords, out=coords)\n ellip_mask = coords.sum(axis=0) <= 1.\n ellip_mask.resize(shape)\n\n # fill ellipsoid with value\n out[ ellip_mask ] += params['A']\n return out\n\n\ndef _rotation_matrix(p):\n \"\"\"\n Defines an Euler rotation matrix from angles phi, theta and psi.\n \"\"\"\n cphi = np.cos(np.radians(p['phi']))\n sphi = np.sin(np.radians(p['phi']))\n ctheta = np.cos(np.radians(p['theta']))\n stheta = np.sin(np.radians(p['theta']))\n cpsi = np.cos(np.radians(p['psi']))\n spsi = np.sin(np.radians(p['psi']))\n alpha = [[cpsi * cphi - ctheta * sphi * spsi,\n cpsi * sphi + ctheta * cphi * spsi,\n spsi * stheta],\n [-spsi * cphi - ctheta * sphi * cpsi,\n -spsi * sphi + ctheta * cphi * cpsi,\n cpsi * stheta],\n [stheta * sphi,\n -stheta * cphi,\n ctheta]]\n return np.asarray(alpha)\n\n\ndef _define_coords(shape):\n \"\"\"\n Generate a tuple of coords in 3D with a given shape.\n \"\"\"\n mgrid = np.lib.index_tricks.nd_grid()\n cshape = np.asarray(1j) * shape\n x, y, z = mgrid[-1:1:cshape[0], -1:1:cshape[1], -1:1:cshape[2]]\n return x, y, z\n\n\ndef _transform(coords, p):\n \"\"\"\n Apply rotation, translation and rescaling to a 3-tuple of coords.\n \"\"\"\n alpha = _rotation_matrix(p)\n out_coords = np.tensordot(alpha, coords, axes=1)\n _shape = (3,) + (1,) * ( out_coords.ndim - 1 )\n _dt = out_coords.dtype\n M0 = np.array([p['x0'], p['y0'], p['z0']], dtype=_dt).reshape(_shape)\n sc = np.array([p['a'], p['b'], p['c']], dtype=_dt).reshape(_shape)\n out_coords -= M0\n out_coords /= sc\n return out_coords\n\n\ndef _get_shepp_array():\n \"\"\"\n Returns the parameters for generating modified Shepp-Logan phantom.\n \"\"\"\n shepp_array = [\n [1., .6900, .920, .810, 0., 0., 0., 90., 90., 90.],\n [-.8, .6624, .874, .780, 0., -.0184, 0., 90., 90., 90.],\n [-.2, .1100, .310, .220, .22, 0., 0., -108., 90., 100.],\n [-.2, .1600, .410, .280, -.22, 0., 0., 108., 90., 100.],\n [.1, .2100, .250, .410, 0., .35, -.15, 90., 90., 90.],\n [.1, .0460, .046, .050, 0., .1, .25, 90., 90., 90.],\n [.1, .0460, .046, .050, 0., -.1, .25, 90., 90., 90.],\n [.1, .0460, .023, .050, -.08, -.605, 0., 90., 90., 90.],\n [.1, .0230, .023, .020, 0., -.606, 0., 90., 90., 90.],\n [.1, .0230, .046, .020, .06, -.605, 0., 90., 90., 90.]]\n return shepp_array\n\n\ndef _array_to_params(array):\n \"\"\"\n Converts list to a dictionary.\n \"\"\"\n # mandatory parameters to define an ellipsoid\n params_tuple = [\n 'A',\n 'a', 'b', 'c',\n 'x0', 'y0', 'z0',\n 'phi', 'theta', 'psi']\n\n array = np.asarray(array)\n out = []\n for i in range(array.shape[0]):\n tmp = dict()\n for k, j in zip(params_tuple, list(range(array.shape[1]))):\n tmp[k] = array[i, j]\n out.append(tmp)\n return out\n"
] |
[
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"matplotlib.ticker.StrMethodFormatter"
],
[
"numpy.square",
"numpy.array",
"numpy.asarray",
"numpy.zeros",
"numpy.tensordot",
"numpy.radians",
"numpy.lib.index_tricks.nd_grid"
]
] |
lusi1990/ImageProcessing100Wen
|
[
"3682e1d68a1b8472818402df2cc75ded82bb1805",
"3682e1d68a1b8472818402df2cc75ded82bb1805",
"3682e1d68a1b8472818402df2cc75ded82bb1805",
"3682e1d68a1b8472818402df2cc75ded82bb1805",
"3682e1d68a1b8472818402df2cc75ded82bb1805"
] |
[
"Question_41_50/answers_py/answer_44.py",
"Question_81_90/answers/answer_82.py",
"Question_31_40/answers_py/answer_40.py",
"Question_31_40/answers_py/_answer_40.py",
"Question_91_100/answers/answer_91.py"
] |
[
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef Canny(img):\n\n\t# Gray scale\n\tdef BGR2GRAY(img):\n\t\tb = img[:, :, 0].copy()\n\t\tg = img[:, :, 1].copy()\n\t\tr = img[:, :, 2].copy()\n\n\t\t# Gray scale\n\t\tout = 0.2126 * r + 0.7152 * g + 0.0722 * b\n\t\tout = out.astype(np.uint8)\n\n\t\treturn out\n\n\n\t# Gaussian filter for grayscale\n\tdef gaussian_filter(img, K_size=3, sigma=1.3):\n\n\t\tif len(img.shape) == 3:\n\t\t\tH, W, C = img.shape\n\t\t\tgray = False\n\t\telse:\n\t\t\timg = np.expand_dims(img, axis=-1)\n\t\t\tH, W, C = img.shape\n\t\t\tgray = True\n\n\t\t## Zero padding\n\t\tpad = K_size // 2\n\t\tout = np.zeros([H + pad * 2, W + pad * 2, C], dtype=np.float)\n\t\tout[pad : pad + H, pad : pad + W] = img.copy().astype(np.float)\n\n\t\t## prepare Kernel\n\t\tK = np.zeros((K_size, K_size), dtype=np.float)\n\t\tfor x in range(-pad, -pad + K_size):\n\t\t\tfor y in range(-pad, -pad + K_size):\n\t\t\t\tK[y + pad, x + pad] = np.exp( - (x ** 2 + y ** 2) / (2 * sigma * sigma))\n\t\t#K /= (sigma * np.sqrt(2 * np.pi))\n\t\tK /= (2 * np.pi * sigma * sigma)\n\t\tK /= K.sum()\n\n\t\ttmp = out.copy()\n\n\t\t# filtering\n\t\tfor y in range(H):\n\t\t\tfor x in range(W):\n\t\t\t\tfor c in range(C):\n\t\t\t\t\tout[pad + y, pad + x, c] = np.sum(K * tmp[y : y + K_size, x : x + K_size, c])\n\n\t\tout = np.clip(out, 0, 255)\n\t\tout = out[pad : pad + H, pad : pad + W]\n\t\tout = out.astype(np.uint8)\n\n\t\tif gray:\n\t\t\tout = out[..., 0]\n\n\t\treturn out\n\n\n\t# sobel filter\n\tdef sobel_filter(img, K_size=3):\n\t\tif len(img.shape) == 3:\n\t\t\tH, W, C = img.shape\n\t\telse:\n\t\t\tH, W = img.shape\n\n\t\t# Zero padding\n\t\tpad = K_size // 2\n\t\tout = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)\n\t\tout[pad : pad + H, pad : pad + W] = img.copy().astype(np.float)\n\t\ttmp = out.copy()\n\n\t\tout_v = out.copy()\n\t\tout_h = out.copy()\n\n\t\t## Sobel vertical\n\t\tKv = [[1., 2., 1.],[0., 0., 0.], [-1., -2., -1.]]\n\t\t## Sobel horizontal\n\t\tKh = [[1., 0., -1.],[2., 0., -2.],[1., 0., -1.]]\n\n\t\t# filtering\n\t\tfor y in range(H):\n\t\t\tfor x in range(W):\n\t\t\t\tout_v[pad + y, pad + x] = np.sum(Kv * (tmp[y : y + K_size, x : x + K_size]))\n\t\t\t\tout_h[pad + y, pad + x] = np.sum(Kh * (tmp[y : y + K_size, x : x + K_size]))\n\n\t\tout_v = np.clip(out_v, 0, 255)\n\t\tout_h = np.clip(out_h, 0, 255)\n\n\t\tout_v = out_v[pad : pad + H, pad : pad + W]\n\t\tout_v = out_v.astype(np.uint8)\n\t\tout_h = out_h[pad : pad + H, pad : pad + W]\n\t\tout_h = out_h.astype(np.uint8)\n\n\t\treturn out_v, out_h\n\n\n\tdef get_edge_angle(fx, fy):\n\t\t# get edge strength\n\t\tedge = np.sqrt(np.power(fx.astype(np.float32), 2) + np.power(fy.astype(np.float32), 2))\n\t\tedge = np.clip(edge, 0, 255)\n\n\t\tfx = np.maximum(fx, 1e-10)\n\t\t#fx[np.abs(fx) <= 1e-5] = 1e-5\n\n\t\t# get edge angle\n\t\tangle = np.arctan(fy / fx)\n\n\t\treturn edge, angle\n\n\n\tdef angle_quantization(angle):\n\t\tangle = angle / np.pi * 180\n\t\tangle[angle < -22.5] = 180 + angle[angle < -22.5]\n\t\t_angle = np.zeros_like(angle, dtype=np.uint8)\n\t\t_angle[np.where(angle <= 22.5)] = 0\n\t\t_angle[np.where((angle > 22.5) & (angle <= 67.5))] = 45\n\t\t_angle[np.where((angle > 67.5) & (angle <= 112.5))] = 90\n\t\t_angle[np.where((angle > 112.5) & (angle <= 157.5))] = 135\n\n\t\treturn _angle\n\n\n\tdef non_maximum_suppression(angle, edge):\n\t\tH, W = angle.shape\n\t\t_edge = edge.copy()\n\t\t\n\t\tfor y in range(H):\n\t\t\tfor x in range(W):\n\t\t\t\t\tif angle[y, x] == 0:\n\t\t\t\t\t\t\tdx1, dy1, dx2, dy2 = -1, 0, 1, 0\n\t\t\t\t\telif angle[y, x] == 45:\n\t\t\t\t\t\t\tdx1, dy1, dx2, dy2 = -1, 1, 1, -1\n\t\t\t\t\telif angle[y, x] == 90:\n\t\t\t\t\t\t\tdx1, dy1, dx2, dy2 = 0, -1, 0, 1\n\t\t\t\t\telif angle[y, x] == 135:\n\t\t\t\t\t\t\tdx1, dy1, dx2, dy2 = -1, -1, 1, 1\n\t\t\t\t\tif x == 0:\n\t\t\t\t\t\t\tdx1 = max(dx1, 0)\n\t\t\t\t\t\t\tdx2 = max(dx2, 0)\n\t\t\t\t\tif x == W-1:\n\t\t\t\t\t\t\tdx1 = min(dx1, 0)\n\t\t\t\t\t\t\tdx2 = min(dx2, 0)\n\t\t\t\t\tif y == 0:\n\t\t\t\t\t\t\tdy1 = max(dy1, 0)\n\t\t\t\t\t\t\tdy2 = max(dy2, 0)\n\t\t\t\t\tif y == H-1:\n\t\t\t\t\t\t\tdy1 = min(dy1, 0)\n\t\t\t\t\t\t\tdy2 = min(dy2, 0)\n\t\t\t\t\tif max(max(edge[y, x], edge[y + dy1, x + dx1]), edge[y + dy2, x + dx2]) != edge[y, x]:\n\t\t\t\t\t\t\t_edge[y, x] = 0\n\n\t\treturn _edge\n\n\tdef hysterisis(edge, HT=100, LT=30):\n\t\tH, W = edge.shape\n\n\t\t# Histeresis threshold\n\t\tedge[edge >= HT] = 255\n\t\tedge[edge <= LT] = 0\n\n\t\t_edge = np.zeros((H + 2, W + 2), dtype=np.float32)\n\t\t_edge[1 : H + 1, 1 : W + 1] = edge\n\n\t\t## 8 - Nearest neighbor\n\t\tnn = np.array(((1., 1., 1.), (1., 0., 1.), (1., 1., 1.)), dtype=np.float32)\n\n\t\tfor y in range(1, H+2):\n\t\t\t\tfor x in range(1, W+2):\n\t\t\t\t\t\tif _edge[y, x] < LT or _edge[y, x] > HT:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif np.max(_edge[y-1:y+2, x-1:x+2] * nn) >= HT:\n\t\t\t\t\t\t\t\t_edge[y, x] = 255\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t_edge[y, x] = 0\n\n\t\tedge = _edge[1:H+1, 1:W+1]\n\t\t\t\t\t\t\t\t\n\t\treturn edge\n\n\t# grayscale\n\tgray = BGR2GRAY(img)\n\n\t# gaussian filtering\n\tgaussian = gaussian_filter(gray, K_size=5, sigma=1.4)\n\n\t# sobel filtering\n\tfy, fx = sobel_filter(gaussian, K_size=3)\n\n\t# get edge strength, angle\n\tedge, angle = get_edge_angle(fx, fy)\n\n\t# angle quantization\n\tangle = angle_quantization(angle)\n\n\t# non maximum suppression\n\tedge = non_maximum_suppression(angle, edge)\n\n\t# hysterisis threshold\n\tout = hysterisis(edge, 100, 30)\n\n\treturn out\n\n\ndef Hough_Line_step1(edge):\n\t## Voting\n\tdef voting(edge):\n\t\tH, W = edge.shape\n\t\tdrho = 1\n\t\tdtheta = 1\n\n\t\t# get rho max length\n\t\trho_max = np.ceil(np.sqrt(H ** 2 + W ** 2)).astype(np.int)\n\n\t\t# hough table\n\t\though = np.zeros((rho_max * 2, 180), dtype=np.int)\n\n\t\t# get index of edge\n\t\tind = np.where(edge == 255)\n\n\t\t## hough transformation\n\t\tfor y, x in zip(ind[0], ind[1]):\n\t\t\t\tfor theta in range(0, 180, dtheta):\n\t\t\t\t\t\t# get polar coordinat4s\n\t\t\t\t\t\tt = np.pi / 180 * theta\n\t\t\t\t\t\trho = int(x * np.cos(t) + y * np.sin(t))\n\n\t\t\t\t\t\t# vote\n\t\t\t\t\t\though[rho + rho_max, theta] += 1\n\t\t\t\t\t\t\t\n\t\tout = hough.astype(np.uint8)\n\n\t\treturn out\n\n\t# voting\n\tout = voting(edge)\n\n\treturn out\n\n\n# Read image\nimg = cv2.imread(\"thorino.jpg\").astype(np.float32)\n\n# Canny\nedge = Canny(img)\n\n# Hough\nout = Hough_Line_step1(edge)\n\nout = out.astype(np.uint8)\n\n# Save result\n#cv2.imwrite(\"out.jpg\", out)\ncv2.imshow(\"result\", out)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Harris corner detection\ndef Harris_corner_step1(img):\n\n\t## Grayscale\n\tdef BGR2GRAY(img):\n\t\tgray = 0.2126 * img[..., 2] + 0.7152 * img[..., 1] + 0.0722 * img[..., 0]\n\t\tgray = gray.astype(np.uint8)\n\t\treturn gray\n\n\t## Sobel\n\tdef Sobel_filtering(gray):\n\t\t# get shape\n\t\tH, W = gray.shape\n\n\t\t# sobel kernel\n\t\tsobely = np.array(((1, 2, 1),\n\t\t\t\t\t\t(0, 0, 0),\n\t\t\t\t\t\t(-1, -2, -1)), dtype=np.float32)\n\n\t\tsobelx = np.array(((1, 0, -1),\n\t\t\t\t\t\t(2, 0, -2),\n\t\t\t\t\t\t(1, 0, -1)), dtype=np.float32)\n\n\t\t# padding\n\t\ttmp = np.pad(gray, (1, 1), 'edge')\n\n\t\t# prepare\n\t\tIx = np.zeros_like(gray, dtype=np.float32)\n\t\tIy = np.zeros_like(gray, dtype=np.float32)\n\n\t\t# get differential\n\t\tfor y in range(H):\n\t\t\tfor x in range(W):\n\t\t\t\tIx[y, x] = np.mean(tmp[y : y + 3, x : x + 3] * sobelx)\n\t\t\t\tIy[y, x] = np.mean(tmp[y : y + 3, x : x + 3] * sobely)\n\t\t\t\n\t\tIx2 = Ix ** 2\n\t\tIy2 = Iy ** 2\n\t\tIxy = Ix * Iy\n\n\t\treturn Ix2, Iy2, Ixy\n\n\n\t# gaussian filtering\n\tdef gaussian_filtering(I, K_size=3, sigma=3):\n\t\t# get shape\n\t\tH, W = I.shape\n\n\t\t## gaussian\n\t\tI_t = np.pad(I, (K_size // 2, K_size // 2), 'edge')\n\n\t\t# gaussian kernel\n\t\tK = np.zeros((K_size, K_size), dtype=np.float)\n\t\tfor x in range(K_size):\n\t\t\tfor y in range(K_size):\n\t\t\t\t_x = x - K_size // 2\n\t\t\t\t_y = y - K_size // 2\n\t\t\t\tK[y, x] = np.exp( -(_x ** 2 + _y ** 2) / (2 * (sigma ** 2)))\n\t\tK /= (sigma * np.sqrt(2 * np.pi))\n\t\tK /= K.sum()\n\n\t\t# filtering\n\t\tfor y in range(H):\n\t\t\tfor x in range(W):\n\t\t\t\tI[y,x] = np.sum(I_t[y : y + K_size, x : x + K_size] * K)\n\t\t\t\t\n\t\treturn I\n\n\t\n\t# 1. grayscale\n\tgray = BGR2GRAY(img)\n\n\t# 2. get difference image\n\tIx2, Iy2, Ixy = Sobel_filtering(gray)\n\n\t# 3. gaussian filtering\n\tIx2 = gaussian_filtering(Ix2, K_size=3, sigma=3)\n\tIy2 = gaussian_filtering(Iy2, K_size=3, sigma=3)\n\tIxy = gaussian_filtering(Ixy, K_size=3, sigma=3)\n\n\t# show result\n\tplt.subplots_adjust(left=0, right=1, top=1, bottom=0, hspace=0, wspace=0.2)\n\n\tplt.subplot(1,3,1)\n\tplt.imshow(Ix2, cmap='gray')\n\tplt.title(\"Ix^2\")\n\tplt.axis(\"off\")\n\n\tplt.subplot(1,3,2)\n\tplt.imshow(Iy2, cmap='gray')\n\tplt.title(\"Iy^2\")\n\tplt.axis(\"off\")\n\n\tplt.subplot(1,3,3)\n\tplt.imshow(Ixy, cmap='gray')\n\tplt.title(\"Ixy\")\n\tplt.axis(\"off\")\n\n\t# plt.savefig(\"out.png\")\n\tplt.show()\n\n\n# Read image\nimg = cv2.imread(\"thorino.jpg\").astype(np.float32)\n\n# Harris corner detection step1\nout = Harris_corner_step1(img)\n",
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# DCT hyoer-parameter\nT = 8\nK = 8\nchannel = 3\n\n\n# BGR -> Y Cb Cr\ndef BGR2YCbCr(img):\n H, W, _ = img.shape\n\n ycbcr = np.zeros([H, W, 3], dtype=np.float32)\n\n ycbcr[..., 0] = 0.2990 * img[..., 2] + 0.5870 * img[..., 1] + 0.1140 * img[..., 0]\n ycbcr[..., 1] = -0.1687 * img[..., 2] - 0.3313 * img[..., 1] + 0.5 * img[..., 0] + 128.\n ycbcr[..., 2] = 0.5 * img[..., 2] - 0.4187 * img[..., 1] - 0.0813 * img[..., 0] + 128.\n\n return ycbcr\n\n\n# Y Cb Cr -> BGR\ndef YCbCr2BGR(ycbcr):\n H, W, _ = ycbcr.shape\n\n out = np.zeros([H, W, channel], dtype=np.float32)\n out[..., 2] = ycbcr[..., 0] + (ycbcr[..., 2] - 128.) * 1.4020\n out[..., 1] = ycbcr[..., 0] - (ycbcr[..., 1] - 128.) * 0.3441 - (ycbcr[..., 2] - 128.) * 0.7139\n out[..., 0] = ycbcr[..., 0] + (ycbcr[..., 1] - 128.) * 1.7718\n\n out = np.clip(out, 0, 255)\n out = out.astype(np.uint8)\n\n return out\n\n\n# DCT weight\ndef DCT_w(x, y, u, v):\n cu = 1.\n cv = 1.\n if u == 0:\n cu /= np.sqrt(2)\n if v == 0:\n cv /= np.sqrt(2)\n theta = np.pi / (2 * T)\n return ((2 * cu * cv / T) * np.cos((2 * x + 1) * u * theta) * np.cos((2 * y + 1) * v * theta))\n\n\n# DCT\ndef dct(img):\n H, W, _ = img.shape\n\n F = np.zeros((H, W, channel), dtype=np.float32)\n\n for c in range(channel):\n for yi in range(0, H, T):\n for xi in range(0, W, T):\n for v in range(T):\n for u in range(T):\n for y in range(T):\n for x in range(T):\n F[v + yi, u + xi, c] += img[y + yi, x + xi, c] * DCT_w(x, y, u, v)\n\n return F\n\n\n# IDCT\ndef idct(F):\n H, W, _ = F.shape\n\n out = np.zeros((H, W, channel), dtype=np.float32)\n\n for c in range(channel):\n for yi in range(0, H, T):\n for xi in range(0, W, T):\n for y in range(T):\n for x in range(T):\n for v in range(K):\n for u in range(K):\n out[y + yi, x + xi, c] += F[v + yi, u + xi, c] * DCT_w(x, y, u, v)\n\n out = np.clip(out, 0, 255)\n out = np.round(out).astype(np.uint8)\n\n return out\n\n\n# Quantization\ndef quantization(F):\n H, W, _ = F.shape\n\n Q = np.array(((16, 11, 10, 16, 24, 40, 51, 61),\n (12, 12, 14, 19, 26, 58, 60, 55),\n (14, 13, 16, 24, 40, 57, 69, 56),\n (14, 17, 22, 29, 51, 87, 80, 62),\n (18, 22, 37, 56, 68, 109, 103, 77),\n (24, 35, 55, 64, 81, 104, 113, 92),\n (49, 64, 78, 87, 103, 121, 120, 101),\n (72, 92, 95, 98, 112, 100, 103, 99)), dtype=np.float32)\n\n for ys in range(0, H, T):\n for xs in range(0, W, T):\n for c in range(channel):\n F[ys: ys + T, xs: xs + T, c] = np.round(F[ys: ys + T, xs: xs + T, c] / Q) * Q\n\n return F\n\n\n# JPEG without Hufman coding\ndef JPEG(img):\n # BGR -> Y Cb Cr\n ycbcr = BGR2YCbCr(img)\n\n # DCT\n F = dct(ycbcr)\n\n # quantization\n F = quantization(F)\n\n # IDCT\n ycbcr = idct(F)\n\n # Y Cb Cr -> BGR\n out = YCbCr2BGR(ycbcr)\n\n return out\n\n\n# MSE\ndef MSE(img1, img2):\n H, W, _ = img1.shape\n mse = np.sum((img1 - img2) ** 2) / (H * W * channel)\n return mse\n\n\n# PSNR\ndef PSNR(mse, vmax=255):\n return 10 * np.log10(vmax * vmax / mse)\n\n\n# bitrate\ndef BITRATE():\n return 1. * T * K * K / T / T\n\n\n# Read image\nimg = cv2.imread(\"imori.jpg\").astype(np.float32)\n\n# JPEG\nout = JPEG(img)\n\n# MSE\nmse = MSE(img, out)\n\n# PSNR\npsnr = PSNR(mse)\n\n# bitrate\nbitrate = BITRATE()\n\nprint(\"MSE:\", mse)\nprint(\"PSNR:\", psnr)\nprint(\"bitrate:\", bitrate)\n\n# Save result\ncv2.imshow(\"result\", out)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n# cv2.imwrite(\"out.jpg\", out)\n",
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Read image\nimg = cv2.imread(\"imori.jpg\").astype(np.float32)\nH, W, C = img.shape\n\n# RGB > YCbCr\nY = 0.2990 * img[..., 2] + 0.5870 * img[..., 1] + 0.1140 * img[..., 0]\nCb = -0.1687 * img[..., 2] - 0.3313 * img[..., 1] + 0.5 * img[..., 0] + 128.\nCr = 0.5 * img[..., 2] - 0.4187 * img[..., 1] - 0.0813 * img[..., 0] + 128.\n\nYCC = np.zeros_like(img, dtype=np.float32)\nYCC[..., 0] = Y\nYCC[..., 1] = Cb\nYCC[..., 2] = Cr\n\n\n# DCT\nT = 8\nK = 8\nX = np.zeros((H, W, C), dtype=np.float64)\n\nQ1 = np.array(((16, 11, 10, 16, 24, 40, 51, 61),\n (12, 12, 14, 19, 26, 58, 60, 55),\n (14, 13, 16, 24, 40, 57, 69, 56),\n (14, 17, 22, 29, 51, 87, 80, 62),\n (18, 22, 37, 56, 68, 109, 103, 77),\n (24, 35, 55, 64, 81, 104, 113, 92),\n (49, 64, 78, 87, 103, 121, 120, 101),\n (72, 92, 95, 98, 112, 100, 103, 99)), dtype=np.float32)\n\nQ2 = np.array(((17, 18, 24, 47, 99, 99, 99, 99),\n (18, 21, 26, 66, 99, 99, 99, 99),\n (24, 26, 56, 99, 99, 99, 99, 99),\n (47, 66, 99, 99, 99, 99, 99, 99),\n (99, 99, 99, 99, 99, 99, 99, 99),\n (99, 99, 99, 99, 99, 99, 99, 99),\n (99, 99, 99, 99, 99, 99, 99, 99),\n (99, 99, 99, 99, 99, 99, 99, 99)), dtype=np.float32)\n\ndef w(x, y, u, v):\n cu = 1.\n cv = 1.\n if u == 0:\n cu /= np.sqrt(2)\n if v == 0:\n cv /= np.sqrt(2)\n theta = np.pi / (2 * T)\n return (( 2 * cu * cv / T) * np.cos((2*x+1)*u*theta) * np.cos((2*y+1)*v*theta))\n \nfor yi in range(0, H, T):\n for xi in range(0, W, T):\n for v in range(T):\n for u in range(T):\n for y in range(T):\n for x in range(T):\n for c in range(C):\n X[v+yi, u+xi, c] += YCC[y+yi, x+xi, c] * w(x,y,u,v)\n \n X[yi:yi+T, xi:xi+T, 0] = np.round(X[yi:yi+T, xi:xi+T, 0] / Q1) * Q1\n X[yi:yi+T, xi:xi+T, 1] = np.round(X[yi:yi+T, xi:xi+T, 1] / Q2) * Q2\n X[yi:yi+T, xi:xi+T, 2] = np.round(X[yi:yi+T, xi:xi+T, 2] / Q2) * Q2\n \n\n# IDCT\nIYCC = np.zeros((H, W, 3), dtype=np.float64)\n\nfor yi in range(0, H, T):\n for xi in range(0, W, T):\n for y in range(T):\n for x in range(T):\n for v in range(K):\n for u in range(K):\n IYCC[y+yi, x+xi] += X[v+yi, u+xi] * w(x,y,u,v)\n\n\n# YCbCr > RGB\nout = np.zeros_like(img, dtype=np.float32)\nout[..., 2] = IYCC[..., 0] + (IYCC[..., 2] - 128.) * 1.4020\nout[..., 1] = IYCC[..., 0] - (IYCC[..., 1] - 128.) * 0.3441 - (IYCC[..., 2] - 128.) * 0.7139\nout[..., 0] = IYCC[..., 0] + (IYCC[..., 1] - 128.) * 1.7718\n\nout[out>255] = 255\nout = out.astype(np.uint8)\n \n# MSE\nv_max = 255.\nmse = np.sum(np.power(np.abs(img.astype(np.float32) - out.astype(np.float32)), 2)) / (H * W * C)\npsnr = 10 * np.log10(v_max ** 2 / mse)\n\nprint(\"PSNR >>\", psnr)\n\nbitrate = 1. * T * K ** 2 / (T ** 2)\nprint(\"bitrate >>\", bitrate)\n\n# Save result\ncv2.imshow(\"result\", out)\ncv2.waitKey(0)\n#cv2.imwrite(\"out.jpg\", out)\n",
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom glob import glob\n\n\n# K-means step1\ndef k_means_step1(img, Class=5):\n\t# get shape\n\tH, W, C = img.shape\n\n\t# initiate random seed\n\tnp.random.seed(0)\n\n\t# reshape\n\timg = np.reshape(img, (H * W, -1))\n\n\t# select one index randomly\n\ti = np.random.choice(np.arange(H * W), Class, replace=False)\n\tCs = img[i].copy()\n\n\tprint(Cs)\n\n\tclss = np.zeros((H * W), dtype=int)\n\n\t# each pixel\n\tfor i in range(H * W):\n\t\t# get distance from base pixel\n\t\tdis = np.sqrt(np.sum((Cs - img[i]) ** 2, axis=1))\n\t\t# get argmin distance\n\t\tclss[i] = np.argmin(dis)\n\n\t# show\n\tout = np.reshape(clss, (H, W)) * 50\n\tout = out.astype(np.uint8)\n\n\treturn out\n\n\n# read image\nimg = cv2.imread(\"imori.jpg\").astype(np.float32)\n\n# K-means step2\nout = k_means_step1(img)\n\n#cv2.imwrite(\"out.jpg\", out)\ncv2.imshow(\"result\", out)\ncv2.waitKey(0)\n"
] |
[
[
"numpy.max",
"numpy.zeros_like",
"numpy.array",
"numpy.sin",
"numpy.zeros",
"numpy.sum",
"numpy.exp",
"numpy.where",
"numpy.arctan",
"numpy.sqrt",
"numpy.clip",
"numpy.cos",
"numpy.expand_dims",
"numpy.maximum"
],
[
"matplotlib.pyplot.subplot",
"numpy.array",
"numpy.pad",
"numpy.zeros_like",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.title",
"numpy.exp",
"numpy.mean",
"numpy.sqrt",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
],
[
"numpy.array",
"numpy.clip",
"numpy.zeros",
"numpy.round",
"numpy.sum",
"numpy.cos",
"numpy.sqrt",
"numpy.log10"
],
[
"numpy.array",
"numpy.zeros_like",
"numpy.zeros",
"numpy.round",
"numpy.sqrt",
"numpy.cos",
"numpy.log10"
],
[
"numpy.reshape",
"numpy.zeros",
"numpy.argmin",
"numpy.random.seed",
"numpy.sum",
"numpy.arange"
]
] |
oneflyingfish/tvm
|
[
"500ad0a4c6f88300fe624f7c07e15fd1fda17668"
] |
[
"tests/python/relay/test_op_level1.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport numpy as np\nimport pytest\nimport tvm\nfrom tvm import te\nimport scipy\nfrom tvm import relay\nfrom tvm.relay import transform\nfrom tvm.relay.testing import run_infer_type\nimport tvm.topi.testing\nfrom tvm.contrib.nvcc import have_fp16\nimport tvm.testing\n\n\ndef sigmoid(x):\n one = np.ones_like(x)\n return one / (one + np.exp(-x))\n\n\ndef relu(x):\n x_copy = np.copy(x)\n np.maximum(x_copy, 0, x_copy)\n return x_copy\n\n\ndef rsqrt(x):\n one = np.ones_like(x)\n return one / np.sqrt(x)\n\n\nclass TestUnaryOp:\n op_list = {\n \"log\": (tvm.relay.log, np.log),\n \"exp\": (tvm.relay.exp, np.exp),\n \"erf\": (tvm.relay.erf, scipy.special.erf),\n \"sqrt\": (tvm.relay.sqrt, np.sqrt),\n \"rqsrt\": (tvm.relay.rsqrt, rsqrt),\n \"sigmoid\": (tvm.relay.sigmoid, sigmoid),\n \"tanh\": (tvm.relay.tanh, np.tanh),\n \"relu\": (relay.nn.relu, relu),\n \"cos\": (tvm.relay.cos, np.cos),\n \"sin\": (tvm.relay.sin, np.sin),\n \"tan\": (tvm.relay.tan, np.tan),\n \"atan\": (tvm.relay.atan, np.arctan),\n }\n\n dtype = tvm.testing.parameter(\"float16\", \"float32\")\n\n relay_op, ref_func = tvm.testing.parameters(*op_list.values(), ids=op_list.keys())\n\n def test_unary_op(self, target, dev, relay_op, ref_func, dtype):\n target = tvm.target.Target(target)\n if dtype == \"float16\":\n if target.kind.name == \"cuda\":\n if not have_fp16(tvm.cuda(0).compute_version):\n pytest.xfail(\n \"No float16 support on local cuda device (compute_version != 5.3 and < 6.0)\"\n )\n elif target.kind.name == \"vulkan\" and not target.attrs.get(\"supports_float16\", False):\n pytest.xfail(\"No float16 support on vulkan target (supports_float16=False)\")\n else:\n pytest.xfail(f\"No float16 support on {target.kind.name} target\")\n\n if target.kind.name == \"vulkan\" and relay_op in [\n tvm.relay.erf,\n tvm.relay.tan,\n tvm.relay.atan,\n ]:\n pytest.xfail(f\"Vulkan runtime doesn't yet support {relay_op}\")\n\n shape = (10, 4)\n dtype = dtype\n tp = relay.TensorType(shape, dtype=dtype)\n x = relay.var(\"x\", type_annotation=tp)\n y = relay_op(x)\n # test printer\n assert (\"{}(%x)\".format(y.op.name)) in y.astext()\n # test type inference\n yy = run_infer_type(y)\n assert yy.checked_type == tp\n\n if ref_func is not None:\n data = np.random.rand(*shape).astype(dtype)\n ref_res = ref_func(data).astype(dtype)\n func = relay.Function([x], y)\n # use graph by execuor default for testing, as we need\n # create function explicitly to avoid constant-folding.\n op_res = relay.create_executor(\"graph\", device=dev, target=target).evaluate(func)(data)\n np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n\n@tvm.testing.uses_gpu\ndef test_binary_op():\n def inst(vars, sh):\n return [vars.get(s, s) for s in sh]\n\n def check_binary_op(opfunc, ref, dtype):\n # TODO(@jroesch): this piece of code improperly uses type variables.\n n = te.var(\"n\")\n s1 = (5, n, 5)\n s2 = (n, 1)\n t1 = relay.TensorType(s1)\n t2 = relay.TensorType(s2)\n x = relay.var(\"x\", t1, dtype=dtype)\n y = relay.var(\"y\", t2, dtype=dtype)\n z = opfunc(x, y)\n # test printer\n assert (\"{}(%x, %y)\".format(z.op.name)) in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == t1\n\n if ref is not None:\n t1 = relay.TensorType((5, 10, 5))\n t2 = relay.TensorType((5, 10, 5))\n x = relay.var(\"x\", t1, dtype=dtype)\n y = relay.var(\"y\", t2, dtype=dtype)\n z = opfunc(x, y)\n x_data = np.random.rand(5, 10, 5).astype(dtype)\n y_data = np.random.rand(5, 10, 5).astype(dtype)\n ref_res = ref(x_data, y_data)\n func = relay.Function([x, y], z)\n\n for target, dev in tvm.testing.enabled_targets():\n # use graph by execuor default for testing, as we need\n # create function explicitly to avoid constant-folding.\n if (\n dtype == \"float16\"\n and target == \"cuda\"\n and not have_fp16(tvm.cuda(0).compute_version)\n ):\n continue\n op_res = relay.create_executor(\"graph\", device=dev, target=target).evaluate(func)(\n x_data, y_data\n )\n np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01, atol=1e-3)\n\n for opfunc, ref in [\n (relay.add, np.add),\n (relay.subtract, np.subtract),\n (relay.multiply, np.multiply),\n (relay.divide, np.divide),\n (relay.floor_divide, np.floor_divide),\n (relay.floor_mod, np.fmod),\n ]:\n for dtype in [\"float16\", \"float32\"]:\n check_binary_op(opfunc, ref, dtype)\n\n\n@tvm.testing.uses_gpu\ndef test_expand_dims():\n # based on topi test\n def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):\n x = relay.Var(\"x\", relay.TensorType(dshape, dtype))\n func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))\n for target, dev in tvm.testing.enabled_targets():\n if (\n dtype == \"float16\"\n and target == \"cuda\"\n and not have_fp16(tvm.cuda(0).compute_version)\n ):\n continue\n data = np.random.uniform(size=dshape).astype(dtype)\n ref_res = data.reshape(oshape)\n op_res = relay.create_executor(\"graph\", device=dev, target=target).evaluate(func)(data)\n np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)\n\n for dtype in [\"float16\", \"float32\"]:\n verify_expand_dims((3, 10), dtype, (3, 10, 1, 1), 2, 2)\n verify_expand_dims((3, 10), dtype, (1, 3, 10), -3, 1)\n\n\n@tvm.testing.uses_gpu\ndef test_bias_add():\n for dtype in [\"float16\", \"float32\"]:\n xshape = (10, 2, 3, 4)\n bshape = (2,)\n rtol = 1e-2 if dtype == \"float16\" else 1e-5\n x = relay.var(\"x\", shape=xshape, dtype=dtype)\n bias = relay.var(\"bias\", dtype=dtype)\n z = relay.nn.bias_add(x, bias)\n zz = run_infer_type(z)\n assert \"axis=\" not in zz.astext()\n assert zz.args[1].checked_type == relay.TensorType(bshape, dtype)\n\n func = relay.Function([x, bias], z)\n x_data = np.random.uniform(size=xshape).astype(dtype)\n y_data = np.random.uniform(size=bshape).astype(dtype)\n ref_res = x_data + y_data.reshape((2, 1, 1))\n for target, dev in tvm.testing.enabled_targets():\n if (\n dtype == \"float16\"\n and target == \"cuda\"\n and not have_fp16(tvm.cuda(0).compute_version)\n ):\n continue\n op_res = relay.create_executor(\"graph\", device=dev, target=target).evaluate(func)(\n x_data, y_data\n )\n np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol)\n\n\ndef test_bias_add_type_failure():\n def assert_failure(expr):\n try:\n run_infer_type(expr)\n except tvm._ffi.base.TVMError:\n return\n else:\n assert False\n\n for axis in (0, -1, -3, 1):\n assert_failure(relay.nn.bias_add(relay.const(1), relay.const(2), axis=axis))\n\n\ndef test_expand_dims_infer_type():\n for dtype in [\"float16\", \"float32\"]:\n n, t, d = te.size_var(\"n\"), te.size_var(\"t\"), 100\n x = relay.var(\"x\", shape=(n, t, d), dtype=dtype)\n y = relay.expand_dims(x, axis=2)\n assert \"axis=2\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, t, 1, 100), dtype)\n\n\n@tvm.testing.uses_gpu\ndef test_softmax():\n for dtype in [\"float16\", \"float32\"]:\n # Softmax accuracy for float16 is poor\n if dtype == \"float16\":\n return\n shape = (10, 4)\n x = relay.var(\"x\", shape=shape, dtype=dtype)\n y = relay.nn.softmax(x, axis=1)\n assert \"nn.softmax\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(shape, dtype)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=shape).astype(dtype)\n ref_res = tvm.topi.testing.softmax_python(x_data)\n for target, dev in tvm.testing.enabled_targets():\n op_res = relay.create_executor(\"graph\", device=dev, target=target).evaluate(func)(\n x_data\n )\n np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n\n@tvm.testing.uses_gpu\ndef test_log_softmax():\n for dtype in [\"float16\", \"float32\"]:\n # Softmax accuracy for float16 is poor\n if dtype == \"float16\":\n return\n shape = (10, 4)\n x = relay.var(\"x\", shape=shape, dtype=dtype)\n y = relay.nn.log_softmax(x, axis=1)\n assert \"nn.log_softmax\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(shape, dtype)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=shape).astype(dtype)\n ref_res = tvm.topi.testing.log_softmax_python(x_data)\n for target, dev in tvm.testing.enabled_targets():\n op_res = relay.create_executor(\"graph\", device=dev, target=target).evaluate(func)(\n x_data\n )\n np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)\n\n\n@tvm.testing.uses_gpu\ndef test_concatenate():\n for dtype in [\"float16\", \"float32\"]:\n n, t, d = te.size_var(\"n\"), te.size_var(\"t\"), 100\n x = relay.var(\"x\", shape=(n, t, d))\n y = relay.var(\"y\", shape=(n, t, d))\n z = relay.concatenate((x, y), axis=-1)\n assert \"axis=\" in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, t, 200))\n\n x = relay.exp(x)\n z = relay.concatenate((x, y), axis=2)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, t, 200))\n\n z = relay.concatenate((x, y), axis=1)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, t + t, 100))\n\n # check shape mismatches (the following case is expected to raise tvm._ffi.base.TVMError.\n try:\n x = relay.var(\"p1\", shape=(2, 5))\n y = relay.var(\"p2\", shape=(2, 3))\n c = relay.concatenate([x, y], axis=0)\n func = relay.Function([x, y], c)\n zz = run_infer_type(func)\n except tvm._ffi.base.TVMError:\n pass\n else:\n assert False\n\n x = relay.var(\"x\", shape=(10, 5), dtype=dtype)\n y = relay.var(\"y\", shape=(10, 5), dtype=dtype)\n t = relay.var(\"z\", shape=(), dtype=dtype)\n z = relay.concatenate((x, y), axis=1)\n z = relay.add(z, t)\n # Check result.\n func = relay.Function([x, y, t], z)\n x_data = np.random.rand(10, 5).astype(dtype)\n y_data = np.random.rand(10, 5).astype(dtype)\n t_data = np.random.uniform(size=()).astype(dtype)\n ref_res = np.concatenate((x_data, y_data), axis=1) + t_data\n\n for target, dev in tvm.testing.enabled_targets():\n if (\n dtype == \"float16\"\n and target == \"cuda\"\n and not have_fp16(tvm.cuda(0).compute_version)\n ):\n continue\n op_res1 = relay.create_executor(\"graph\", device=dev, target=target).evaluate(func)(\n x_data, y_data, t_data\n )\n tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=0.01)\n op_res2 = relay.create_executor(\"debug\", device=dev, target=target).evaluate(func)(\n x_data, y_data, t_data\n )\n tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=0.01)\n\n\ndef test_dropout():\n for dtype in [\"float16\", \"float32\"]:\n n, t, d = te.size_var(\"n\"), te.size_var(\"t\"), te.size_var(\"d\")\n input_ty = relay.TensorType((n, t, d), dtype)\n x = relay.var(\"x\", input_ty)\n y = relay.nn.dropout(x, rate=0.75)\n assert \"rate=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == input_ty\n\n in_np = np.random.random([4, 5, 6]).astype(\"float32\")\n x = relay.const(in_np)\n y = relay.nn.dropout(x, rate=0.5)\n func = relay.Function([], y)\n for target, dev in tvm.testing.enabled_targets():\n for backend in [\"debug\", \"graph\"]:\n op_res = relay.create_executor(\"debug\", device=dev, target=target).evaluate(func)()\n tvm.testing.assert_allclose(op_res.numpy(), in_np, rtol=0.01)\n\n\ndef test_batch_norm():\n for dtype in [\"float16\", \"float32\"]:\n # beta and gamma ignored\n data = relay.var(\"data\", relay.TensorType((3, 2, 1), dtype))\n beta = relay.var(\"beta\", relay.TensorType((2,), dtype))\n gamma = relay.var(\"gamma\", relay.TensorType((2,), dtype))\n moving_mean = relay.var(\"moving_mean\", relay.TensorType((2,), dtype))\n moving_var = relay.var(\"moving_var\", relay.TensorType((2,), dtype))\n y = relay.nn.batch_norm(\n data, gamma, beta, moving_mean, moving_var, center=False, scale=False\n )\n yy = run_infer_type(y.astuple())\n assert \"center=\" in yy.astext()\n assert yy.checked_type == relay.ty.TupleType(\n tvm.runtime.convert(\n [\n relay.TensorType((3, 2, 1), dtype),\n relay.TensorType((2,), dtype),\n relay.TensorType((2,), dtype),\n ]\n )\n )\n\n # axis=1\n beta = relay.var(\"beta\", relay.TensorType((3,), dtype))\n gamma = relay.var(\"gamma\", relay.TensorType((3,), dtype))\n moving_mean = relay.var(\"moving_mean\", relay.TensorType((3,), dtype))\n moving_var = relay.var(\"moving_var\", relay.TensorType((3,), dtype))\n\n y = relay.nn.batch_norm(\n data, gamma, beta, moving_mean, moving_var, axis=0, center=False, scale=False\n )\n yy = run_infer_type(y.astuple())\n assert yy.checked_type == relay.ty.TupleType(\n tvm.runtime.convert(\n [\n relay.ty.TensorType((3, 2, 1), dtype),\n relay.ty.TensorType((3,), dtype),\n relay.ty.TensorType((3,), dtype),\n ]\n )\n )\n\n # axis=-1\n data = relay.var(\"data\", relay.TensorType((1, 2, 3), dtype))\n beta = relay.var(\"beta\", relay.TensorType((3,), dtype))\n gamma = relay.var(\"gamma\", relay.TensorType((3,), dtype))\n moving_mean = relay.var(\"moving_mean\", relay.TensorType((3,), dtype))\n moving_var = relay.var(\"moving_var\", relay.TensorType((3,), dtype))\n y = relay.nn.batch_norm(\n data, gamma, beta, moving_mean, moving_var, axis=-1, center=False, scale=False\n )\n yy = run_infer_type(y.astuple())\n assert yy.checked_type == relay.ty.TupleType(\n tvm.runtime.convert(\n [\n relay.ty.TensorType((1, 2, 3), dtype),\n relay.ty.TensorType((3,), dtype),\n relay.ty.TensorType((3,), dtype),\n ]\n )\n )\n\n\ndef test_batch_norm_fold_const():\n axis = 1\n dtype = \"float32\"\n shape = [4, 5, 6]\n\n data_np = np.random.random(shape).astype(dtype)\n beta_np = np.random.random(shape[axis]).astype(dtype)\n gamma_np = np.random.random(shape[axis]).astype(dtype)\n moving_mean_np = np.random.random(shape[axis]).astype(dtype)\n moving_var_np = np.random.random(shape[axis]).astype(dtype)\n\n data = relay.var(\"data\", relay.TensorType(shape, dtype))\n beta = relay.var(\"beta\", relay.TensorType((shape[1],), dtype))\n gamma = relay.var(\"gamma\", relay.TensorType((shape[1],), dtype))\n moving_mean = relay.var(\"moving_mean\", relay.TensorType((shape[1],), dtype))\n moving_var = relay.var(\"moving_var\", relay.TensorType((shape[1],), dtype))\n out = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var, axis=axis).astuple()\n func = relay.Function([data, gamma, beta, moving_mean, moving_var], out)\n\n out_const = relay.nn.batch_norm(\n relay.const(data_np),\n relay.const(gamma_np),\n relay.const(beta_np),\n relay.const(moving_mean_np),\n relay.const(moving_var_np),\n axis=axis,\n ).astuple()\n func_const = relay.Function([], out_const)\n\n # Build the module with constants to have FoldConstant transform batch_norm.\n mod_const = tvm.IRModule.from_expr(func_const)\n mod_const = relay.transform.FoldConstant()(mod_const)\n\n const_data_out = mod_const[\"main\"].body[0].data\n const_moving_mean_out = mod_const[\"main\"].body[1].data\n const_moving_var_out = mod_const[\"main\"].body[2].data\n\n # Run the Relay func without constants. This will use SimplyInference instead.\n vm_data_out, vm_moving_mean_out, vm_moving_var_out = relay.create_executor(\n \"vm\", device=tvm.device(\"llvm\"), target=\"llvm\"\n ).evaluate(func)(data_np, gamma_np, beta_np, moving_mean_np, moving_var_np)\n\n tvm.testing.assert_allclose(const_data_out.numpy(), vm_data_out.numpy())\n tvm.testing.assert_allclose(const_moving_mean_out.numpy(), vm_moving_mean_out.numpy())\n tvm.testing.assert_allclose(const_moving_var_out.numpy(), vm_moving_var_out.numpy())\n\n\n@pytest.mark.xfail\ndef test_matmul_type_check():\n dtype = \"float16\"\n n, c, h, w = 2, 2, 2, 2\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n # it should fail since it does not match with m(2)\n mismatch_w = 3\n w = relay.var(\"w\", relay.TensorType((mismatch_w, 2), dtype))\n y = relay.nn.matmul(x, w)\n yy = run_infer_type(y)\n\n\n@tvm.testing.uses_gpu\ndef test_matmul():\n for dtype in [\"float16\", \"float32\"]:\n # Matmul accuracy for float16 is poor\n if dtype == \"float16\":\n continue\n n, c, h, w = te.size_var(\"n\"), te.size_var(\"c\"), te.size_var(\"h\"), te.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n w = relay.var(\"w\", relay.TensorType((2, w), dtype))\n y = relay.nn.matmul(x, w, units=2, transpose_b=True)\n assert \"units=2\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)\n\n n, c, h, w = te.size_var(\"n\"), te.size_var(\"c\"), te.size_var(\"h\"), 2\n x = relay.var(\"x\", relay.TensorType((n, c, w, h), dtype))\n wh, ww = te.size_var(\"wh\"), te.size_var(\"ww\")\n w = relay.var(\"w\", relay.TensorType((wh, ww), dtype))\n y = relay.nn.matmul(x, w, transpose_a=True)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)\n\n n, c, h, w = te.size_var(\"n\"), te.size_var(\"c\"), te.size_var(\"h\"), 2\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n w = relay.var(\"w\", relay.IncompleteType())\n y = relay.nn.matmul(x, w, units=2)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)\n\n x = relay.var(\"x\", shape=(5, 10), dtype=dtype)\n w = relay.var(\"w\", shape=(5, 2), dtype=dtype)\n z = relay.nn.matmul(x, w, transpose_a=True)\n\n # Check result.\n func = relay.Function([x, w], z)\n x_data = np.random.rand(5, 10).astype(dtype)\n w_data = np.random.rand(5, 2).astype(dtype)\n ref_res = np.dot(x_data.transpose(), w_data)\n\n for target, dev in tvm.testing.enabled_targets():\n op_res1 = relay.create_executor(\"graph\", device=dev, target=target).evaluate(func)(\n x_data, w_data\n )\n tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5)\n op_res2 = relay.create_executor(\"debug\", device=dev, target=target).evaluate(func)(\n x_data, w_data\n )\n tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5)\n\n\n@pytest.mark.xfail\ndef test_dense_type_check():\n dtype = \"float16\"\n n, c, h, w = 2, 2, 2, 2\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n # it should fail since it does not match with m(2)\n mismatch_w = 3\n w = relay.var(\"w\", relay.TensorType((2, mismatch_w), dtype))\n y = relay.nn.dense(x, w)\n yy = run_infer_type(y)\n\n\n@tvm.testing.uses_gpu\ndef test_dense():\n for dtype in [\"float16\", \"float32\"]:\n # Dense accuracy for float16 is poor\n if dtype == \"float16\":\n continue\n n, c, h, w = te.size_var(\"n\"), te.size_var(\"c\"), te.size_var(\"h\"), te.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n w = relay.var(\"w\", relay.TensorType((2, w), dtype))\n y = relay.nn.dense(x, w, units=2)\n assert \"units=2\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)\n\n n, c, h, w = te.size_var(\"n\"), te.size_var(\"c\"), te.size_var(\"h\"), 2\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n wh, ww = te.size_var(\"wh\"), te.size_var(\"ww\")\n w = relay.var(\"w\", relay.TensorType((ww, wh), dtype))\n y = relay.nn.dense(x, w)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)\n\n n, c, h, w = te.size_var(\"n\"), te.size_var(\"c\"), te.size_var(\"h\"), 2\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n w = relay.var(\"w\", relay.IncompleteType())\n y = relay.nn.dense(x, w, units=2)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)\n\n x = relay.var(\"x\", shape=(10, 5), dtype=dtype)\n w = relay.var(\"w\", shape=(2, 5), dtype=dtype)\n z = relay.nn.dense(x, w)\n\n # Check result.\n func = relay.Function([x, w], z)\n x_data = np.random.rand(10, 5).astype(dtype)\n w_data = np.random.rand(2, 5).astype(dtype)\n ref_res = np.dot(x_data, w_data.T)\n\n for target, dev in tvm.testing.enabled_targets():\n op_res1 = relay.create_executor(\"graph\", device=dev, target=target).evaluate(func)(\n x_data, w_data\n )\n tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5)\n op_res2 = relay.create_executor(\"debug\", device=dev, target=target).evaluate(func)(\n x_data, w_data\n )\n tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5)\n\n\n@tvm.testing.uses_gpu\ndef test_dense_same_args_compile():\n for dtype in [\"float32\", \"int8\"]:\n x = relay.var(\"x\", shape=(32, 64), dtype=dtype)\n out_dtype = \"int32\" if dtype == \"int8\" else \"float32\"\n f = relay.Function([x], relay.nn.dense(x, x, out_dtype=out_dtype))\n m = tvm.IRModule.from_expr(f)\n\n for target, _ in tvm.testing.enabled_targets():\n tvm.relay.build(m, target=target)\n\n\ndef test_dense_dtype():\n data_dtype = \"uint8\"\n weight_dtype = \"int8\"\n out_dtype = \"uint8\"\n n, c, h, w = te.size_var(\"n\"), te.size_var(\"c\"), te.size_var(\"h\"), te.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), data_dtype))\n w = relay.var(\"w\", relay.TensorType((2, w), weight_dtype))\n y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype)\n assert \"units=2\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, 2), out_dtype)\n assert run_infer_type(yy.args[0]).checked_type.dtype == \"uint8\"\n assert run_infer_type(yy.args[1]).checked_type.dtype == \"int8\"\n\n\ndef test_bitserial_dense():\n m, k = te.size_var(\"m\"), te.size_var(\"k\")\n x = relay.var(\"x\", relay.TensorType((m, k), \"int16\"))\n w = relay.var(\"w\", relay.TensorType((k, 32), \"int16\"))\n y = relay.nn.bitserial_dense(x, w, units=32)\n \"units=8\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((m, 32), \"int16\")\n\n\nif __name__ == \"__main__\":\n test_concatenate()\n test_bias_add()\n test_bias_add_type_failure()\n test_unary_op()\n test_binary_op()\n test_expand_dims_infer_type()\n test_expand_dims()\n test_softmax()\n test_log_softmax()\n test_dropout()\n test_batch_norm()\n test_matmul()\n test_dense()\n test_bitserial_dense()\n test_dense_dtype()\n"
] |
[
[
"numpy.concatenate",
"numpy.ones_like",
"numpy.dot",
"numpy.random.rand",
"numpy.copy",
"numpy.exp",
"numpy.random.uniform",
"numpy.sqrt",
"numpy.random.random",
"numpy.maximum"
]
] |
kovanostra/message-passing-nn
|
[
"6617a4753173c8fffc60140b9d8d0f497b33aed4"
] |
[
"data/test_training_dataset.py"
] |
[
"import torch as to\n\nBASE_GRAPH = to.tensor([[0, 1, 1, 0],\n [1, 0, 1, 0],\n [1, 1, 0, 1],\n [0, 0, 1, 0]])\nBASE_GRAPH_NODE_FEATURES = to.tensor([[1, 2], [1, 1], [2, 0.5], [0.5, 0.5]])\nBASE_GRAPH_EDGE_FEATURES = to.tensor([[[0.0, 0.0], [1.0, 2.0], [2.0, 0.5], [0.0, 0.0]],\n [[1.0, 2.0], [0.0, 0.0], [1.0, 1.0], [0.0, 0.0]],\n [[2.0, 0.5], [1.0, 1.0], [0.0, 0.0], [0.5, 0.5]],\n [[0.0, 0.0], [0.0, 0.0], [0.5, 0.5], [0.0, 0.0]]])\n"
] |
[
[
"torch.tensor"
]
] |
osswangxining/iot-app-enabler-conversation
|
[
"d2a174af770a496ef220330cb6d5428f5f0114f6"
] |
[
"conversationinsights-mynlu/_pytest/test_featurizers.py"
] |
[
"from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nimport os\n\nimport numpy as np\nimport pytest\n\nfrom mynlu.tokenizers.mitie_tokenizer import MitieTokenizer\nfrom mynlu.tokenizers.spacy_tokenizer import SpacyTokenizer\nfrom mynlu.training_data import Message\n\n\n@pytest.mark.parametrize(\"sentence, expected\", [\n (\"hey how are you today\", [-0.19649599, 0.32493639, -0.37408298, -0.10622784, 0.062756])\n])\ndef test_spacy_featurizer(sentence, expected, spacy_nlp):\n from mynlu.featurizers.spacy_featurizer import SpacyFeaturizer\n ftr = SpacyFeaturizer()\n doc = spacy_nlp(sentence)\n vecs = ftr.features_for_doc(doc)\n assert np.allclose(doc.vector[:5], expected, atol=1e-5)\n assert np.allclose(vecs, doc.vector, atol=1e-5)\n\n\ndef test_mitie_featurizer(mitie_feature_extractor, default_config):\n from mynlu.featurizers.mitie_featurizer import MitieFeaturizer\n\n default_config[\"mitie_file\"] = os.environ.get('MITIE_FILE')\n if not default_config[\"mitie_file\"] or not os.path.isfile(default_config[\"mitie_file\"]):\n default_config[\"mitie_file\"] = os.path.join(\"data\", \"total_word_feature_extractor.dat\")\n\n ftr = MitieFeaturizer.load()\n sentence = \"Hey how are you today\"\n tokens = MitieTokenizer().tokenize(sentence)\n vecs = ftr.features_for_tokens(tokens, mitie_feature_extractor)\n assert np.allclose(vecs[:5], np.array([0., -4.4551446, 0.26073121, -1.46632245, -1.84205751]), atol=1e-5)\n\n\ndef test_ngram_featurizer(spacy_nlp):\n from mynlu.featurizers.ngram_featurizer import NGramFeaturizer\n ftr = NGramFeaturizer()\n repetition_factor = 5 # ensures that during random sampling of the ngram CV we don't end up with a one-class-split\n labeled_sentences = [\n Message(\"heyheyheyhey\", {\"intent\": \"greet\", \"text_features\": [0.5]}),\n Message(\"howdyheyhowdy\", {\"intent\": \"greet\", \"text_features\": [0.5]}),\n Message(\"heyhey howdyheyhowdy\", {\"intent\": \"greet\", \"text_features\": [0.5]}),\n Message(\"howdyheyhowdy heyhey\", {\"intent\": \"greet\", \"text_features\": [0.5]}),\n Message(\"astalavistasista\", {\"intent\": \"goodby\", \"text_features\": [0.5]}),\n Message(\"astalavistasista sistala\", {\"intent\": \"goodby\", \"text_features\": [0.5]}),\n Message(\"sistala astalavistasista\", {\"intent\": \"goodby\", \"text_features\": [0.5]}),\n ] * repetition_factor\n\n for m in labeled_sentences:\n m.set(\"spacy_doc\", spacy_nlp(m.text))\n\n ftr.min_intent_examples_for_ngram_classification = 2\n ftr.train_on_sentences(labeled_sentences,\n max_number_of_ngrams=10)\n assert len(ftr.all_ngrams) > 0\n assert ftr.best_num_ngrams > 0\n\n\n@pytest.mark.parametrize(\"sentence, expected, labeled_tokens\", [\n (\"hey how are you today\", [0., 1.], [0]),\n (\"hey 123 how are you\", [1., 1.], [0, 1]),\n (\"blah balh random eh\", [0., 0.], []),\n (\"looks really like 123 today\", [1., 0.], [3]),\n])\ndef test_regex_featurizer(sentence, expected, labeled_tokens, spacy_nlp):\n from mynlu.featurizers.regex_featurizer import RegexFeaturizer\n patterns = [\n {\"pattern\": '[0-9]+', \"name\": \"number\", \"usage\": \"intent\"},\n {\"pattern\": '\\\\bhey*', \"name\": \"hello\", \"usage\": \"intent\"}\n ]\n ftr = RegexFeaturizer(patterns)\n\n # adds tokens to the message\n tokenizer = SpacyTokenizer()\n message = Message(sentence)\n message.set(\"spacy_doc\", spacy_nlp(sentence))\n tokenizer.process(message)\n\n result = ftr.features_for_patterns(message)\n assert np.allclose(result, expected, atol=1e-10)\n assert len(message.get(\"tokens\", [])) > 0 # the tokenizer should have added tokens\n for i, token in enumerate(message.get(\"tokens\")):\n if i in labeled_tokens:\n assert token.get(\"pattern\") in [0, 1]\n else:\n assert token.get(\"pattern\") is None # if the token is not part of a regex the pattern should not be set\n"
] |
[
[
"numpy.allclose",
"numpy.array"
]
] |
gallanoe/tinygrad
|
[
"0cf21881b710dd55614a1eb19c5ca0e7081cb8b5"
] |
[
"examples/efficientnet.py"
] |
[
"# load weights from\n# https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth\n# a rough copy of\n# https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/model.py\nimport os\nGPU = os.getenv(\"GPU\", None) is not None\nimport sys\nimport io\nimport time\nimport numpy as np\nnp.set_printoptions(suppress=True)\nfrom tinygrad.tensor import Tensor\nfrom tinygrad.utils import fetch, get_parameters\nfrom extra.efficientnet import EfficientNet\n\ndef infer(model, img):\n # preprocess image\n aspect_ratio = img.size[0] / img.size[1]\n img = img.resize((int(224*max(aspect_ratio,1.0)), int(224*max(1.0/aspect_ratio,1.0))))\n\n img = np.array(img)\n y0,x0=(np.asarray(img.shape)[:2]-224)//2\n retimg = img = img[y0:y0+224, x0:x0+224]\n\n # if you want to look at the image\n \"\"\"\n import matplotlib.pyplot as plt\n plt.imshow(img)\n plt.show()\n \"\"\"\n\n # low level preprocess\n img = np.moveaxis(img, [2,0,1], [0,1,2])\n img = img.astype(np.float32)[:3].reshape(1,3,224,224)\n img /= 255.0\n img -= np.array([0.485, 0.456, 0.406]).reshape((1,-1,1,1))\n img /= np.array([0.229, 0.224, 0.225]).reshape((1,-1,1,1))\n\n # run the net\n if GPU:\n out = model.forward(Tensor(img).cuda()).cpu()\n else:\n out = model.forward(Tensor(img))\n\n # if you want to look at the outputs\n \"\"\"\n import matplotlib.pyplot as plt\n plt.plot(out.data[0])\n plt.show()\n \"\"\"\n return out, retimg\n\nif __name__ == \"__main__\":\n # instantiate my net\n model = EfficientNet(int(os.getenv(\"NUM\", \"0\")))\n model.load_weights_from_torch()\n if GPU:\n [x.cuda_() for x in get_parameters(model)]\n\n # category labels\n import ast\n lbls = fetch(\"https://gist.githubusercontent.com/yrevar/942d3a0ac09ec9e5eb3a/raw/238f720ff059c1f82f368259d1ca4ffa5dd8f9f5/imagenet1000_clsidx_to_labels.txt\")\n lbls = ast.literal_eval(lbls.decode('utf-8'))\n\n # load image and preprocess\n from PIL import Image\n url = sys.argv[1]\n if url == 'webcam':\n import cv2\n cap = cv2.VideoCapture(0)\n cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)\n while 1:\n _ = cap.grab() # discard one frame to circumvent capture buffering\n ret, frame = cap.read()\n img = Image.fromarray(frame[:, :, [2,1,0]])\n out, retimg = infer(model, img)\n print(np.argmax(out.data), np.max(out.data), lbls[np.argmax(out.data)])\n SCALE = 3\n simg = cv2.resize(retimg, (224*SCALE, 224*SCALE))\n retimg = cv2.cvtColor(simg, cv2.COLOR_RGB2BGR)\n cv2.imshow('capture', retimg)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n else:\n if url.startswith('http'):\n img = Image.open(io.BytesIO(fetch(url)))\n else:\n img = Image.open(url)\n st = time.time()\n out, _ = infer(model, img)\n print(np.argmax(out.data), np.max(out.data), lbls[np.argmax(out.data)])\n print(\"did inference in %.2f s\" % (time.time()-st))\n #print(\"NOT\", np.argmin(out.data), np.min(out.data), lbls[np.argmin(out.data)])\n\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.asarray",
"numpy.set_printoptions",
"numpy.argmax",
"numpy.moveaxis"
]
] |
MuchToMyDelight/tvm
|
[
"474bc4e761e3bc87d69419c376042c19b6b7dbbe",
"474bc4e761e3bc87d69419c376042c19b6b7dbbe"
] |
[
"tests/python/contrib/test_ethosn/infrastructure.py",
"tests/python/relay/test_json_runtime.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Ethos-N test functions\"\"\"\n\nfrom __future__ import absolute_import, print_function\nimport tvm\nfrom tvm import relay\nfrom tvm.contrib import utils, graph_runtime, download\nfrom hashlib import md5\nfrom itertools import zip_longest, combinations\nimport numpy as np\nfrom PIL import Image\nimport os\n\nfrom . import _infrastructure\nfrom tvm.relay.op.contrib import get_pattern_table\n\n\ndef get_real_image(im_height, im_width):\n repo_base = \"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/\"\n img_name = \"elephant-299.jpg\"\n image_url = os.path.join(repo_base, img_name)\n img_path = download.download_testdata(image_url, img_name, module=\"data\")\n image = Image.open(img_path).resize((im_height, im_width))\n x = np.array(image).astype(\"uint8\")\n data = np.reshape(x, (1, im_height, im_width, 3))\n return data\n\n\ndef assert_lib_hash(lib, golden):\n \"\"\"Check that the Ethos-N runtime modules in a library hash to the same values\n as given by the golden hash(es).\n\n If there's only one Ethos-N module, the golden hash may be provided as a str.\n If there's multiple, a set of golden hashes should be provided to correspond\n with each Ethos-N module that is expected.\n\n This function is used to ensure that no change is made which alters the output\n of a compilation. If such a change is made deliberately (eg. to fix a bug) then\n the golden hash should be updated after verifying on hardware that the behaviour\n is still correct.\n\n This method is used because of the lack of hardware availability in upstream CI.\n \"\"\"\n # Convert str hash into a set of hashes\n if isinstance(golden, str):\n golden = {golden}\n\n temp = utils.tempdir()\n path = temp.relpath(\"lib.cmm\")\n hash_set = set()\n for mod in lib.imported_modules:\n if mod.type_key == \"ethos-n\":\n mod.save(path)\n lib_hash = md5(open(path, \"rb\").read()).hexdigest()\n hash_set.add(lib_hash)\n\n assert hash_set == golden, \"Expected hash: {} Got hash: {}\".format(golden, hash_set)\n\n\ndef make_module(func, params):\n func = relay.Function(relay.analysis.free_vars(func), func)\n if params:\n relay.build_module.bind_params_by_name(func, params)\n mod = tvm.IRModule.from_expr(func)\n return relay.transform.InferType()(mod)\n\n\ndef make_ethosn_composite(ethosn_expr, name):\n vars = relay.analysis.free_vars(ethosn_expr)\n func = relay.Function([relay.Var(\"a\")], ethosn_expr)\n func = func.with_attr(\"Composite\", name)\n call = relay.Call(func, vars)\n return call\n\n\ndef make_ethosn_partition(ethosn_expr):\n # Create an Ethos-N global function\n mod = tvm.IRModule({})\n vars = relay.analysis.free_vars(ethosn_expr)\n # NB: it is illegal to reuse variables inside and outside a scope in Relay\n # if you want to duplicate types and names you must re-allocate them.\n fresh_vars = [relay.Var(v.name_hint, v.type_annotation) for v in vars]\n binds = {}\n for var, fresh_var in zip(vars, fresh_vars):\n binds[var] = fresh_var\n ethosn_expr_fresh = relay.bind(ethosn_expr, binds)\n func = relay.Function(fresh_vars, ethosn_expr_fresh)\n func = func.with_attr(\"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n func = func.with_attr(\"Inline\", tvm.tir.IntImm(\"int32\", 1))\n func = func.with_attr(\"Compiler\", \"ethos-n\")\n func = func.with_attr(\"global_symbol\", \"ethos-n_0\")\n g1 = relay.GlobalVar(\"ethos-n_0\")\n mod[g1] = func\n mod = relay.transform.InferType()(mod)\n\n # These are the vars to call the Ethos-N partition with\n more_vars = relay.analysis.free_vars(ethosn_expr)\n # Call the Ethos-N partition in main\n call_fn1 = g1(*more_vars)\n mod[\"main\"] = relay.Function(more_vars, call_fn1)\n return relay.transform.InferType()(mod)\n\n\ndef get_host_op_count(mod):\n class Counter(tvm.relay.ExprVisitor):\n def __init__(self):\n super().__init__()\n self.count = 0\n\n def visit_call(self, call):\n if isinstance(call.op, tvm.ir.Op):\n self.count += 1\n super().visit_call(call)\n\n c = Counter()\n c.visit(mod[\"main\"])\n return c.count\n\n\ndef build(mod, params, npu=True, expected_host_ops=0, npu_partitions=1):\n \"\"\"Build a network with or without Ethos-N offloading.\n\n Parameters\n ----------\n mod : IRModule\n The Relay module to build.\n params : dict of str to NDArray\n The weights to build with.\n npu : bool, optional\n Whether to build with Ethos-N offloading.\n expected_host_ops : int, optional\n The number of ops expected to remain on the host.\n npu_partitions : int, optional\n The number of Ethos-N partitions expected.\n \"\"\"\n relay.backend.compile_engine.get().clear()\n with tvm.transform.PassContext(\n opt_level=3, config={\"relay.ext.ethos-n.options\": {\"variant\": get_ethosn_variant()}}\n ):\n with tvm.target.Target(\"llvm\"):\n if npu:\n f = relay.build_module.bind_params_by_name(mod[\"main\"], params)\n mod = tvm.IRModule()\n mod[\"main\"] = f\n pattern = get_pattern_table(\"ethos-n\")\n mod = relay.transform.InferType()(mod)\n mod = relay.transform.MergeComposite(pattern)(mod)\n mod = relay.transform.AnnotateTarget(\"ethos-n\")(mod)\n mod = relay.transform.InferType()(mod)\n mod = relay.transform.MergeCompilerRegions()(mod)\n mod = relay.transform.InferType()(mod)\n mod = relay.transform.PartitionGraph()(mod)\n host_op_count = get_host_op_count(mod)\n assert (\n host_op_count == expected_host_ops\n ), \"Got {} host operators, expected {}\".format(host_op_count, expected_host_ops)\n partition_count = 0\n for global_var in mod.get_global_vars():\n if \"ethos-n\" in global_var.name_hint:\n partition_count += 1\n\n assert (\n npu_partitions == partition_count\n ), \"Got {} ethos-n partitions, expected {}\".format(partition_count, npu_partitions)\n\n return relay.build(mod, params=params)\n\n\ndef run(lib, inputs, outputs, npu=True):\n \"\"\"Run a module with specified inputs.\n\n Parameters\n ----------\n lib : runtime.Module\n The runtime module.\n inputs : dict of str to NDArray\n The input dictionary.\n outputs : int\n The expected number of outputs.\n npu : bool\n Whether or not any part of the lib is offloaded to Ethos-N.\n If it's false (i.e. it's all running on the CPU), we set\n the mocked result equal to the output so that a subsequent\n mocked run on the NPU returns the same value.\n\n Returns\n -------\n out : list of NDArray\n The results.\n\n \"\"\"\n # Export and load lib to confirm this works\n lib_name = \"mod.so\"\n temp = utils.tempdir()\n lib_path = temp.relpath(lib_name)\n lib.export_library(lib_path)\n lib = tvm.runtime.load_module(lib_path)\n module = graph_runtime.GraphModule(lib[\"default\"](tvm.cpu()))\n module.set_input(**inputs)\n module.run()\n out = [module.get_output(i) for i in range(outputs)]\n if not npu:\n inference_result(out)\n return out\n\n\ndef build_and_run(\n mod, inputs, outputs, params, device=tvm.cpu(), npu=True, expected_host_ops=0, npu_partitions=1\n):\n lib = build(mod, params, npu, expected_host_ops, npu_partitions)\n return run(lib, inputs, outputs, npu)\n\n\ndef verify(answers, atol, rtol=1e-07, verify_saturation=True):\n \"\"\"Compare the array of answers. Each entry is a list of outputs\"\"\"\n if len(answers) < 2:\n print(\"No results to compare: expected at least two, found \", len(answers))\n for answer in zip_longest(*answers):\n for outs in combinations(answer, 2):\n if verify_saturation:\n assert (\n np.count_nonzero(outs[0].asnumpy() == 255) < 0.25 * outs[0].asnumpy().size\n ), \"Output is saturated: {}\".format(outs[0])\n assert (\n np.count_nonzero(outs[0].asnumpy() == 0) < 0.25 * outs[0].asnumpy().size\n ), \"Output is saturated: {}\".format(outs[0])\n tvm.testing.assert_allclose(outs[0].asnumpy(), outs[1].asnumpy(), rtol=rtol, atol=atol)\n\n\ndef inference_result(outputs):\n \"\"\"Set the expected results of an Ethos inference, if the testing\n infrastructure is available. This assumes that the entire graph\n was offloaded to the neural processor.\"\"\"\n if tvm.get_global_func(\"relay.ethos-n.test.infra.inference_result\", True):\n return _infrastructure.inference_result(*outputs)\n return False\n\n\ndef test_error(mod, params, err_msg):\n caught = None\n with tvm.transform.PassContext(opt_level=3):\n with tvm.target.Target(\"llvm\"):\n try:\n mod = relay.transform.InferType()(mod)\n relay.build(mod, params)\n except tvm.error.TVMError as e:\n caught = e.args[0]\n finally:\n relay.backend.compile_engine.get().clear()\n\n assert caught is not None\n assert err_msg in caught, caught\n\n\ndef get_conv2d(var, shape):\n \"\"\"Standard convolution to test activation functions\"\"\"\n\n weight_shape = (1, 1, shape[3], 1)\n w = tvm.nd.array(np.ones(weight_shape, \"uint8\"))\n weights = relay.const(w, \"uint8\")\n conv = relay.qnn.op.conv2d(\n var,\n weights,\n input_zero_point=relay.const(0, \"int32\"),\n kernel_zero_point=relay.const(0, \"int32\"),\n input_scale=relay.const(1.0, \"float32\"),\n kernel_scale=relay.const(1.0, \"float32\"),\n kernel_size=(1, 1),\n channels=1,\n data_layout=\"NHWC\",\n kernel_layout=\"HWIO\",\n )\n b = tvm.nd.array(np.zeros((shape[0],), \"int32\"))\n biasc = relay.const(b, \"int32\")\n bias = relay.nn.bias_add(conv, biasc, axis=0)\n req = relay.qnn.op.requantize(\n bias,\n relay.const(1.0, \"float32\"), # input zero scale\n relay.const(0, \"int32\"), # input zero point\n relay.const(1.1, \"float32\"), # output zero scale\n relay.const(0, \"int32\"), # output zero point\n out_dtype=\"uint8\",\n )\n params = {\"w\": w, \"b\": b}\n return req, params\n\n\ndef get_conv2d_qnn_params(input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, channels):\n input_max = input_sc * (255 - input_zp)\n input_min = -input_sc * input_zp\n kernel_max = kernel_sc * (255 - kernel_zp)\n kernel_min = -kernel_sc * kernel_zp\n output_limits = [\n kernel_max * kernel_h * kernel_w * channels * input_max,\n kernel_min * kernel_h * kernel_w * channels * input_max,\n kernel_min * kernel_h * kernel_w * channels * input_min,\n kernel_max * kernel_h * kernel_w * channels * input_min,\n ]\n output_max = max(output_limits)\n output_min = min(output_limits)\n output_sc = (output_max - output_min) / 255\n output_zp = -int(output_min / output_sc)\n return output_zp, output_sc\n\n\ndef get_ethosn_api_version():\n return tvm.get_global_func(\"relay.ethos-n.api.version\")()\n\n\ndef get_ethosn_variant():\n ethosn_variant_config = os.getenv(\"ETHOSN_VARIANT_CONFIG\")\n if ethosn_variant_config is not None:\n return 3\n return 0\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Unit tests for JSON codegen and runtime.\"\"\"\nimport os\nimport sys\n\nimport numpy as np\n\nimport tvm\nimport tvm.relay.op as reg\nimport tvm.relay.testing\nfrom tvm import relay, runtime\nfrom tvm.contrib import utils\nfrom tvm.relay import transform\nfrom tvm.relay.backend import compile_engine\nfrom tvm.relay.build_module import bind_params_by_name\nfrom tvm.relay.op.contrib.register import get_pattern_table\n\n\ndef set_func_attr(func, compile_name, symbol_name):\n func = func.with_attr(\"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n func = func.with_attr(\"Inline\", tvm.tir.IntImm(\"int32\", 1))\n func = func.with_attr(\"Compiler\", compile_name)\n func = func.with_attr(\"global_symbol\", symbol_name)\n return func\n\n\ndef check_result(\n mod, ref_mod, map_inputs, out_shape, tol=1e-5, target=\"llvm\", device=tvm.cpu(), params=None\n):\n if sys.platform == \"win32\":\n print(\"Skip test on Windows for now\")\n return\n\n # Run the reference result\n compile_engine.get().clear()\n with tvm.transform.PassContext(opt_level=3):\n json, lib, param = relay.build(ref_mod, target=target, params=params)\n rt_mod = tvm.contrib.graph_runtime.create(json, lib, device)\n\n for name, data in map_inputs.items():\n rt_mod.set_input(name, data)\n rt_mod.set_input(**param)\n rt_mod.run()\n out = tvm.nd.empty(out_shape, device=device)\n out = rt_mod.get_output(0, out)\n ref_result = out.asnumpy()\n\n def check_vm_result():\n compile_engine.get().clear()\n with relay.build_config(opt_level=3):\n exe = relay.vm.compile(mod, target=target, params=params)\n code, lib = exe.save()\n exe = runtime.vm.Executable.load_exec(code, lib)\n vm = runtime.vm.VirtualMachine(exe, device)\n out = vm.run(**map_inputs)\n tvm.testing.assert_allclose(out.asnumpy(), ref_result, rtol=tol, atol=tol)\n\n def check_graph_runtime_result():\n compile_engine.get().clear()\n with relay.build_config(opt_level=3):\n json, lib, param = relay.build(mod, target=target, params=params)\n rt_mod = tvm.contrib.graph_runtime.create(json, lib, device)\n\n for name, data in map_inputs.items():\n rt_mod.set_input(name, data)\n rt_mod.set_input(**param)\n rt_mod.run()\n out = tvm.nd.empty(out_shape, device=device)\n out = rt_mod.get_output(0, out)\n tvm.testing.assert_allclose(out.asnumpy(), ref_result, rtol=tol, atol=tol)\n\n check_vm_result()\n check_graph_runtime_result()\n\n\ndef test_conv2d():\n \"\"\"Test a subgraph with a single conv2d operator.\"\"\"\n if not tvm.get_global_func(\"runtime.DNNLJSONRuntimeCreate\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n def conv2d_direct():\n dtype = \"float32\"\n ishape = (1, 32, 14, 14)\n w1shape = (32, 32, 3, 3)\n\n data0 = relay.var(\"data\", shape=ishape, dtype=dtype)\n weight0 = relay.var(\"weight\", shape=w1shape, dtype=dtype)\n out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1))\n\n func = relay.Function([data0, weight0], out)\n func = set_func_attr(func, \"dnnl\", \"dnnl_0\")\n glb_var = relay.GlobalVar(\"dnnl_0\")\n mod = tvm.IRModule()\n mod[glb_var] = func\n mod = transform.InferType()(mod)\n\n data = relay.var(\"data\", shape=(ishape), dtype=dtype)\n weight = relay.var(\"weight\", shape=(w1shape), dtype=dtype)\n main_f = relay.Function([data, weight], glb_var(data, weight))\n mod[\"main\"] = main_f\n mod = transform.InferType()(mod)\n\n data0 = relay.var(\"data\", shape=ishape, dtype=dtype)\n weight0 = relay.var(\"weight\", shape=w1shape, dtype=dtype)\n out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1))\n main_f = relay.Function([data0, weight0], out)\n ref_mod = tvm.IRModule()\n ref_mod[\"main\"] = main_f\n ref_mod = transform.InferType()(ref_mod)\n\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)\n\n return mod, ref_mod, {\"data\": i_data, \"weight\": w1_data}, (1, 32, 14, 14)\n\n def group_conv2d():\n dtype = \"float32\"\n ishape = (1, 32, 14, 14)\n w2shape = (32, 1, 3, 3)\n\n data0 = relay.var(\"data\", shape=(ishape), dtype=dtype)\n weight0 = relay.var(\"weight\", shape=(w2shape), dtype=dtype)\n out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=32)\n\n func = relay.Function([data0, weight0], out)\n func = set_func_attr(func, \"dnnl\", \"dnnl_0\")\n glb_var = relay.GlobalVar(\"dnnl_0\")\n mod = tvm.IRModule()\n mod[glb_var] = func\n mod = transform.InferType()(mod)\n\n data = relay.var(\"data\", shape=(ishape), dtype=dtype)\n weight = relay.var(\"weight\", shape=(w2shape), dtype=dtype)\n main_f = relay.Function([data, weight], glb_var(data, weight))\n mod[\"main\"] = main_f\n mod = transform.InferType()(mod)\n\n data0 = relay.var(\"data\", shape=(ishape), dtype=dtype)\n weight0 = relay.var(\"weight\", shape=(w2shape), dtype=dtype)\n out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=32)\n main_f = relay.Function([data0, weight0], out)\n ref_mod = tvm.IRModule()\n ref_mod[\"main\"] = main_f\n ref_mod = transform.InferType()(ref_mod)\n\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n w_data = np.random.uniform(0, 1, w2shape).astype(dtype)\n\n return mod, ref_mod, {\"data\": i_data, \"weight\": w_data}, (1, 32, 14, 14)\n\n for mod, ref_mod, map_inputs, out_shape in [conv2d_direct(), group_conv2d()]:\n check_result(mod, ref_mod, map_inputs, out_shape, tol=1e-5)\n\n\ndef test_add():\n \"\"\"Test a subgraph with a single add operator.\"\"\"\n if not tvm.get_global_func(\"runtime.DNNLJSONRuntimeCreate\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = \"float32\"\n shape = (10, 10)\n\n def gen_add():\n data0 = relay.var(\"data0\", shape=shape, dtype=dtype)\n data1 = relay.var(\"data1\", shape=shape, dtype=dtype)\n out = relay.add(data0, data1)\n\n func = relay.Function([data0, data1], out)\n func = set_func_attr(func, \"dnnl\", \"dnnl_0\")\n glb_var = relay.GlobalVar(\"dnnl_0\")\n mod = tvm.IRModule()\n mod[glb_var] = func\n mod = transform.InferType()(mod)\n\n data0 = relay.var(\"data0\", shape=shape, dtype=dtype)\n data1 = relay.var(\"data1\", shape=shape, dtype=dtype)\n main_f = relay.Function([data0, data1], glb_var(data0, data1))\n mod[\"main\"] = main_f\n mod = transform.InferType()(mod)\n\n data0 = relay.var(\"data0\", shape=shape, dtype=dtype)\n data1 = relay.var(\"data1\", shape=shape, dtype=dtype)\n out = relay.add(data0, data1)\n main_f = relay.Function([data0, data1], out)\n ref_mod = tvm.IRModule()\n ref_mod[\"main\"] = main_f\n ref_mod = transform.InferType()(ref_mod)\n\n return mod, ref_mod\n\n mod, ref_mod = gen_add()\n\n data0 = np.random.uniform(0, 1, shape).astype(dtype)\n data1 = np.random.uniform(0, 1, shape).astype(dtype)\n check_result(mod, ref_mod, {\"data0\": data0, \"data1\": data1}, shape, tol=1e-5)\n\n\ndef test_relu():\n \"\"\"Test a subgraph with a single ReLU operator.\"\"\"\n if not tvm.get_global_func(\"runtime.DNNLJSONRuntimeCreate\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = \"float32\"\n shape = (1, 32, 14, 14)\n\n def gen_relu():\n data0 = relay.var(\"data0\", shape=shape, dtype=dtype)\n out = relay.nn.relu(data0)\n\n func = relay.Function([data0], out)\n func = set_func_attr(func, \"dnnl\", \"dnnl_0\")\n glb_var = relay.GlobalVar(\"dnnl_0\")\n mod = tvm.IRModule()\n mod[glb_var] = func\n mod = transform.InferType()(mod)\n\n data0 = relay.var(\"data0\", shape=shape, dtype=dtype)\n main_f = relay.Function([data0], glb_var(data0))\n mod[\"main\"] = main_f\n mod = transform.InferType()(mod)\n\n data0 = relay.var(\"data0\", shape=shape, dtype=dtype)\n out = relay.nn.relu(data0)\n main_f = relay.Function([data0], out)\n ref_mod = tvm.IRModule()\n ref_mod[\"main\"] = main_f\n ref_mod = transform.InferType()(ref_mod)\n\n return mod, ref_mod\n\n mod, ref_mod = gen_relu()\n\n data0 = np.random.uniform(-1, 1, shape).astype(dtype)\n check_result(\n mod,\n ref_mod,\n {\n \"data0\": data0,\n },\n (1, 32, 14, 14),\n tol=1e-5,\n )\n\n\ndef test_dense():\n \"\"\"Test a subgraph with a single dense operator.\"\"\"\n if not tvm.get_global_func(\"runtime.DNNLJSONRuntimeCreate\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = \"float32\"\n a_shape = (1, 512)\n b_shape = (1024, 512)\n\n def gen_dense():\n a = relay.var(\"A\", shape=a_shape, dtype=dtype)\n b = relay.var(\"B\", shape=b_shape, dtype=dtype)\n out = relay.nn.dense(a, b)\n\n func = relay.Function([a, b], out)\n func = set_func_attr(func, \"dnnl\", \"dnnl_0\")\n glb_var = relay.GlobalVar(\"dnnl_0\")\n mod = tvm.IRModule()\n mod[glb_var] = func\n mod = transform.InferType()(mod)\n\n a = relay.var(\"A\", shape=a_shape, dtype=dtype)\n b = relay.var(\"B\", shape=b_shape, dtype=dtype)\n main_f = relay.Function([a, b], glb_var(a, b))\n mod[\"main\"] = main_f\n mod = transform.InferType()(mod)\n\n a = relay.var(\"A\", shape=a_shape, dtype=dtype)\n b = relay.var(\"B\", shape=b_shape, dtype=dtype)\n out = relay.nn.dense(a, b)\n main_f = relay.Function([a, b], out)\n ref_mod = tvm.IRModule()\n ref_mod[\"main\"] = main_f\n ref_mod = transform.InferType()(ref_mod)\n\n return mod, ref_mod\n\n mod, ref_mod = gen_dense()\n\n data_a = np.random.uniform(0, 1, a_shape).astype(dtype)\n data_b = np.random.uniform(0, 1, b_shape).astype(dtype)\n check_result(mod, ref_mod, {\"A\": data_a, \"B\": data_b}, (1, 1024), tol=1e-5)\n\n\ndef test_bn():\n \"\"\"Test a subgraph with a single batch_norm operator.\"\"\"\n if not tvm.get_global_func(\"runtime.DNNLJSONRuntimeCreate\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = \"float32\"\n d_shape = (1, 8)\n c_shape = (8,)\n\n def gen_bn():\n data = relay.var(\"data\", shape=d_shape)\n gamma = relay.var(\"gamma\", shape=c_shape)\n beta = relay.var(\"beta\", shape=c_shape)\n moving_mean = relay.var(\"moving_mean\", shape=c_shape)\n moving_var = relay.var(\"moving_var\", shape=c_shape)\n bn = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var)\n out = bn[0]\n\n func = relay.Function([data, gamma, beta, moving_mean, moving_var], out)\n func = set_func_attr(func, \"dnnl\", \"dnnl_0\")\n glb_var = relay.GlobalVar(\"dnnl_0\")\n mod = tvm.IRModule()\n mod[glb_var] = func\n mod = transform.InferType()(mod)\n\n data = relay.var(\"data\", shape=d_shape)\n gamma = relay.var(\"gamma\", shape=c_shape)\n beta = relay.var(\"beta\", shape=c_shape)\n moving_mean = relay.var(\"moving_mean\", shape=c_shape)\n moving_var = relay.var(\"moving_var\", shape=c_shape)\n main_f = relay.Function(\n [data, gamma, beta, moving_mean, moving_var],\n glb_var(data, gamma, beta, moving_mean, moving_var),\n )\n mod[\"main\"] = main_f\n mod = transform.InferType()(mod)\n\n data = relay.var(\"data\", shape=d_shape)\n gamma = relay.var(\"gamma\", shape=c_shape)\n beta = relay.var(\"beta\", shape=c_shape)\n moving_mean = relay.var(\"moving_mean\", shape=c_shape)\n moving_var = relay.var(\"moving_var\", shape=c_shape)\n bn = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var)\n out = bn[0]\n main_f = relay.Function([data, gamma, beta, moving_mean, moving_var], out)\n ref_mod = tvm.IRModule()\n ref_mod[\"main\"] = main_f\n ref_mod = transform.InferType()(ref_mod)\n\n return mod, ref_mod\n\n mod, ref_mod = gen_bn()\n\n data = np.random.uniform(-1, 1, d_shape).astype(dtype)\n gamma = np.random.uniform(-1, 1, c_shape).astype(dtype)\n beta = np.random.uniform(-1, 1, c_shape).astype(dtype)\n moving_mean = np.random.uniform(-1, 1, c_shape).astype(dtype)\n moving_var = np.random.uniform(-1, 1, c_shape).astype(dtype)\n check_result(\n mod,\n ref_mod,\n {\n \"data\": data,\n \"gamma\": gamma,\n \"beta\": beta,\n \"moving_mean\": moving_mean,\n \"moving_var\": moving_var,\n },\n d_shape,\n tol=1e-5,\n )\n\n\ndef test_multiple_ops():\n \"\"\"Test a subgraph with multiple operators.\"\"\"\n if not tvm.get_global_func(\"runtime.DNNLJSONRuntimeCreate\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = \"float32\"\n ishape = (1, 32, 14, 14)\n w1shape = (32, 32, 3, 3)\n w2shape = (64, 32, 5, 5)\n\n def get_net():\n data = relay.var(\"data\", relay.TensorType(ishape, dtype))\n w1 = relay.var(\"w1\", relay.TensorType(w1shape, dtype))\n w2 = relay.var(\"w2\", relay.TensorType(w2shape, dtype))\n\n layer = relay.nn.conv2d(data=data, weight=w1, kernel_size=(3, 3), padding=(1, 1))\n layer = relay.nn.relu(layer)\n layer = relay.nn.conv2d(data=layer, weight=w2, kernel_size=(5, 5), padding=(2, 2))\n layer = relay.nn.relu(layer)\n\n main_f = relay.Function([data, w1, w2], layer)\n mod = tvm.IRModule()\n mod[\"main\"] = main_f\n return mod\n\n def get_partitoned_mod(mod):\n remove_bn_pass = tvm.transform.Sequential(\n [\n transform.InferType(),\n transform.SimplifyInference(),\n transform.FoldConstant(),\n transform.FoldScaleAxis(),\n ]\n )\n byoc_pass = tvm.transform.Sequential(\n [\n remove_bn_pass,\n transform.AnnotateTarget(\"dnnl\"),\n transform.MergeCompilerRegions(),\n transform.PartitionGraph(),\n ]\n )\n\n with tvm.transform.PassContext(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n return byoc_pass(mod)\n\n ref_mod = get_net()\n mod = get_partitoned_mod(ref_mod)\n\n data = np.random.uniform(0, 1, ishape).astype(dtype)\n w1 = np.random.uniform(0, 1, w1shape).astype(dtype)\n w2 = np.random.uniform(0, 1, w2shape).astype(dtype)\n check_result(\n mod,\n ref_mod,\n {\n \"data\": data,\n \"w1\": w1,\n \"w2\": w2,\n },\n (1, 64, 14, 14),\n tol=1e-5,\n )\n\n\ndef test_composite():\n \"\"\"Test DNNL patterns and there composite functions.\"\"\"\n if not tvm.get_global_func(\"runtime.DNNLJSONRuntimeCreate\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = \"float32\"\n\n def conv2d_relu():\n ishape = (1, 32, 14, 14)\n w1shape = (32, 32, 3, 3)\n\n # Composite function\n in_1 = relay.var(\"in_1\", shape=ishape, dtype=dtype)\n in_2 = relay.var(\"in_2\", shape=w1shape, dtype=dtype)\n conv2d = relay.nn.conv2d(in_1, in_2, kernel_size=(3, 3), padding=(1, 1))\n relu = relay.nn.relu(conv2d)\n func = relay.Function([in_1, in_2], relu)\n func = func.with_attr(\"Composite\", \"dnnl.conv2d_relu\")\n func = func.with_attr(\"PartitionedFromPattern\", \"nn.conv2d_nn.relu_\")\n\n # Partition function\n arg_1 = relay.var(\"arg_1\", shape=ishape, dtype=dtype)\n arg_2 = relay.var(\"arg_2\", shape=w1shape, dtype=dtype)\n call = relay.Call(func, [arg_1, arg_2])\n p_func = relay.Function([arg_1, arg_2], call)\n p_func = set_func_attr(p_func, \"dnnl\", \"dnnl_0\")\n glb_var = relay.GlobalVar(\"dnnl_0\")\n mod = tvm.IRModule()\n mod[glb_var] = p_func\n mod = transform.InferType()(mod)\n\n # Main function\n data = relay.var(\"data\", shape=ishape, dtype=dtype)\n weight = relay.var(\"weight\", shape=w1shape, dtype=dtype)\n main_func = relay.Function([data, weight], glb_var(data, weight))\n mod[\"main\"] = main_func\n mod = transform.InferType()(mod)\n\n # Reference module\n data = relay.var(\"data\", shape=ishape, dtype=dtype)\n weight = relay.var(\"weight\", shape=w1shape, dtype=dtype)\n conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1))\n relu = relay.nn.relu(conv2d)\n main_func = relay.Function([data, weight], relu)\n ref_mod = tvm.IRModule()\n ref_mod[\"main\"] = main_func\n ref_mod = transform.InferType()(ref_mod)\n\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)\n\n return mod, ref_mod, {\"data\": i_data, \"weight\": w1_data}, (1, 32, 14, 14)\n\n def conv2d_bias_relu():\n ishape = (1, 32, 14, 14)\n w1shape = (32, 32, 3, 3)\n bshape = (32, 1, 1)\n\n # Composite function\n in_1 = relay.var(\"in_1\", shape=ishape, dtype=dtype)\n in_2 = relay.var(\"in_2\", shape=w1shape, dtype=dtype)\n in_3 = relay.var(\"in_3\", shape=bshape, dtype=dtype)\n conv2d = relay.nn.conv2d(in_1, in_2, kernel_size=(3, 3), padding=(1, 1))\n add = relay.add(conv2d, in_3)\n relu = relay.nn.relu(add)\n func = relay.Function([in_1, in_2, in_3], relu)\n func = func.with_attr(\"Composite\", \"dnnl.conv2d_bias_relu\")\n func = func.with_attr(\"PartitionedFromPattern\", \"nn.conv2d_add_nn.relu_\")\n\n # Partition function\n arg_1 = relay.var(\"arg_1\", shape=ishape, dtype=dtype)\n arg_2 = relay.var(\"arg_2\", shape=w1shape, dtype=dtype)\n arg_3 = relay.var(\"arg_3\", shape=bshape, dtype=dtype)\n call = relay.Call(func, [arg_1, arg_2, arg_3])\n p_func = relay.Function([arg_1, arg_2, arg_3], call)\n p_func = set_func_attr(p_func, \"dnnl\", \"dnnl_0\")\n glb_var = relay.GlobalVar(\"dnnl_0\")\n mod = tvm.IRModule()\n mod[glb_var] = p_func\n mod = transform.InferType()(mod)\n\n # Main function\n data = relay.var(\"data\", shape=ishape, dtype=dtype)\n weight = relay.var(\"weight\", shape=w1shape, dtype=dtype)\n bias = relay.var(\"bias\", shape=bshape, dtype=dtype)\n main_func = relay.Function([data, weight, bias], glb_var(data, weight, bias))\n mod[\"main\"] = main_func\n mod = transform.InferType()(mod)\n\n # Reference module\n data = relay.var(\"data\", shape=ishape, dtype=dtype)\n weight = relay.var(\"weight\", shape=w1shape, dtype=dtype)\n bias = relay.var(\"bias\", shape=bshape, dtype=dtype)\n conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1))\n add = relay.add(conv2d, bias)\n relu = relay.nn.relu(add)\n main_func = relay.Function([data, weight, bias], relu)\n ref_mod = tvm.IRModule()\n ref_mod[\"main\"] = main_func\n ref_mod = transform.InferType()(ref_mod)\n\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)\n b_data = np.random.uniform(0, 1, bshape).astype(dtype)\n\n return mod, ref_mod, {\"data\": i_data, \"weight\": w1_data, \"bias\": b_data}, (1, 32, 14, 14)\n\n for mod, ref_mod, input_maps, out_shape in [conv2d_relu(), conv2d_bias_relu()]:\n check_result(mod, ref_mod, input_maps, out_shape, tol=1e-5)\n\n\ndef test_constant():\n \"\"\"Test the subgraph with (var, const, ...) arguments.\"\"\"\n if not tvm.get_global_func(\"runtime.DNNLJSONRuntimeCreate\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = \"float32\"\n ishape = (1, 32, 14, 14)\n wshape = (32, 32, 3, 3)\n\n data = relay.var(\"data\", shape=ishape, dtype=dtype)\n weight = relay.var(\"weight\", shape=wshape, dtype=dtype)\n bn_gamma = relay.var(\"bn_gamma\")\n bn_beta = relay.var(\"bn_beta\")\n bn_mmean = relay.var(\"bn_mean\")\n bn_mvar = relay.var(\"bn_var\")\n\n layer = relay.nn.conv2d(data=data, weight=weight, kernel_size=(3, 3), padding=(1, 1))\n bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta, bn_mmean, bn_mvar)\n out = bn_output[0]\n out = relay.nn.relu(out)\n\n func = relay.Function(relay.analysis.free_vars(out), out)\n ref_mod, params = tvm.relay.testing.create_workload(func)\n ref_mod[\"main\"] = bind_params_by_name(ref_mod[\"main\"], params)\n\n remove_bn_pass = tvm.transform.Sequential(\n [\n transform.InferType(),\n transform.SimplifyInference(),\n transform.FoldConstant(),\n transform.FoldScaleAxis(),\n ]\n )\n\n dnnl_patterns = get_pattern_table(\"dnnl\")\n composite_partition = tvm.transform.Sequential(\n [\n transform.MergeComposite(dnnl_patterns),\n transform.AnnotateTarget(\"dnnl\"),\n transform.PartitionGraph(),\n ]\n )\n\n with tvm.transform.PassContext(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n ref_mod = remove_bn_pass(ref_mod)\n mod = composite_partition(ref_mod)\n\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n check_result(mod, ref_mod, {\"data\": i_data}, (1, 32, 14, 14), tol=1e-5)\n\n\ndef test_partial_constant():\n \"\"\"Test the subgraph with (const, var, const, var) arguments.\"\"\"\n if not tvm.get_global_func(\"runtime.DNNLJSONRuntimeCreate\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = \"float32\"\n ishape = (10, 10)\n\n in_1 = relay.var(\"in_1\", shape=ishape, dtype=dtype)\n in_2 = relay.var(\"in_2\", shape=ishape, dtype=dtype)\n in_3 = relay.var(\"in_3\", shape=ishape, dtype=dtype)\n in_4 = relay.var(\"in_4\", shape=ishape, dtype=dtype)\n\n add1 = relay.add(in_1, in_2)\n add2 = relay.add(add1, in_3)\n add3 = relay.add(add2, in_3)\n add4 = relay.add(add3, in_3)\n\n func = relay.Function([in_1, in_2, in_3, in_4], add4)\n ref_mod = tvm.IRModule.from_expr(func)\n ref_mod = relay.transform.InferType()(ref_mod)\n\n data1 = np.random.uniform(0, 1, ishape).astype(dtype)\n data3 = np.random.uniform(0, 1, ishape).astype(dtype)\n\n params = {\n \"in_1\": tvm.nd.array(data1, device=tvm.cpu(0)),\n \"in_3\": tvm.nd.array(data3, device=tvm.cpu(0)),\n }\n ref_mod[\"main\"] = bind_params_by_name(ref_mod[\"main\"], params)\n\n opt_pass = tvm.transform.Sequential(\n [\n transform.InferType(),\n transform.SimplifyInference(),\n transform.FoldConstant(),\n transform.FoldScaleAxis(),\n transform.AnnotateTarget(\"dnnl\"),\n transform.MergeCompilerRegions(),\n transform.PartitionGraph(),\n ]\n )\n\n with tvm.transform.PassContext(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n mod = opt_pass(ref_mod)\n\n data2 = np.random.uniform(0, 1, ishape).astype(dtype)\n data4 = np.random.uniform(0, 1, ishape).astype(dtype)\n check_result(mod, ref_mod, {\"in_2\": data2, \"in_4\": data4}, (10, 10), tol=1e-5)\n\n\nif __name__ == \"__main__\":\n test_conv2d()\n test_add()\n test_relu()\n test_dense()\n test_bn()\n test_multiple_ops()\n test_composite()\n test_constant()\n test_partial_constant()\n"
] |
[
[
"numpy.array",
"numpy.ones",
"numpy.reshape",
"numpy.zeros"
],
[
"numpy.random.uniform"
]
] |
rjx678/facenet_demo
|
[
"c804eb3aa176e07470c8db40e30c1697fa3d3823"
] |
[
"src/align/align_dataset_mtcnn.py"
] |
[
"\"\"\"Performs face alignment and stores face thumbnails in the output directory.\"\"\"\n# MIT License\n# \n# Copyright (c) 2016 David Sandberg\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom scipy import misc\nimport sys\nimport os\nimport argparse\nimport tensorflow as tf\nimport numpy as np\nimport facenet\nimport align.detect_face\nimport random\nfrom scipy import misc\nfrom time import sleep\nimport math\nimport cv2\nimport matplotlib.pyplot as plt\ndef main(args):\n sleep(random.random())\n # 如果还没有输出文件夹,则创建\n # 设置对齐后的人脸图像存放的路径\n output_dir = os.path.expanduser(args.output_dir)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # 在日志目录的文本文件中存储一些Git修订信息\n # Store some git revision info in a text file in the log directory\n src_path,_ = os.path.split(os.path.realpath(__file__))\n # 在output_dir文件夹下创建revision_info.txt文件,里面存的是执行该命令时的参数信息\n # 当前使用的tensorflow版本,git hash,git diff\n facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))\n # 获取数据集下所有人名和其人名目录下是所有图片,\n # 放到ImageClass类中,再将类存到dataset列表里\n\n dataset = facenet.get_dataset(args.input_dir)\n \n print('Creating networks and loading parameters')\n '''2、建立MTCNN网络,并预训练(即使用训练好的网络初始化参数)'''\n with tf.Graph().as_default():\n # 设置Session的GPU参数,每条线程分配多少显存\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n # 获取P-Net,R-Net,O-Net网络\n pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)\n \n minsize = 20 # minimum size of face最小尺寸\n threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold阈值\n factor = 0.709 # scale factor 比例因子\n\n # Add a random key to the filename to allow alignment using multiple processes\n\n # 获取一个随机数,用于创建下面的文件名\n random_key = np.random.randint(0, high=99999)\n # 将图片和求得的相应的Bbox保存到bounding_boxes_XXXXX.txt文件里\n bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)\n '''3、每个图片中人脸所在的边界框写入记录文件中'''\n with open(bounding_boxes_filename, \"w\") as text_file:\n # 处理图片的总数量\n nrof_images_total = 0\n nrof_successfully_aligned = 0\n # 是否对所有图片进行洗牌\n if args.random_order:\n random.shuffle(dataset)\n # 获取每一个人,以及对应的所有图片的绝对路径\n for cls in dataset:\n # 每一个人对应的输出文件夹\n output_class_dir = os.path.join(output_dir, cls.name)\n # 如果目的文件夹里还没有相应的人名的文件夹,则创建相应文件夹\n if not os.path.exists(output_class_dir):\n os.makedirs(output_class_dir)\n if args.random_order:\n random.shuffle(cls.image_paths)\n #遍历每一张图片\n for image_path in cls.image_paths:\n nrof_images_total += 1\n # 对齐后的图片文件名\n filename = os.path.splitext(os.path.split(image_path)[1])[0]\n output_filename = os.path.join(output_class_dir, filename+'.png')\n print(image_path)\n if not os.path.exists(output_filename):\n try:\n # 读取图片文件\n img = misc.imread(image_path)\n except (IOError, ValueError, IndexError) as e:\n errorMessage = '{}: {}'.format(image_path, e)\n print(errorMessage)\n else:\n if img.ndim<2:\n print('Unable to align \"%s\"' % image_path)\n text_file.write('%s\\n' % (output_filename))\n continue\n if img.ndim == 2:\n img = facenet.to_rgb(img)\n img = img[:,:,0:3]\n # img = misc.imresize(img,0.8)\n #plt.imshow(img)\n #plt.show()\n # 检测人脸,bounding_boxes可能包含多张人脸框数据,\n # 一张人脸框有5个数据,第一和第二个数据表示框左上角坐标,第三个第四个数据表示框右下角坐标,\n # 最后一个数据应该是可信度\n\n # 人脸检测 bounding_boxes:表示边界框 形状为[n,5] 5对应x1,y1,x2,y2,score\n # _:人脸关键点坐标 形状为 [n,10]\n bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n # ###################仿射变换###########################\n rows,cols,hn = img.shape\n _new = np.transpose(_) # (10,2)->(2,10)\n for i in range(len(_new)):\n # print(\"左眼的位置(%s,%s)\" %(_new[i,0],_new[i,5]))\n # print(\"右眼的位置(%s,%s)\" %(_new[i,1],_new[i,6]))\n eye_center_x = (_new[i, 0] + _new[i, 1]) * 0.5\n eye_center_y = (_new[i, 5] + _new[i, 6]) * 0.5\n dy = _new[i, 5] - _new[i, 6]\n dx = _new[i, 0] - _new[i, 1]\n angle = math.atan2(dy, dx) * 180.0 / math.pi + 180.0\n #print(\"旋转角度为%s\" % angle)\n M = cv2.getRotationMatrix2D((eye_center_x, eye_center_y), angle, 1)\n dst = cv2.warpAffine(img, M, (cols, rows))\n ####################################################\n bounding_boxes, _ = align.detect_face.detect_face(dst, minsize,\n pnet, rnet, onet,\n threshold, factor)\n\n\n # 获得的人脸数量(#边界框个数)\n nrof_faces = bounding_boxes.shape[0]\n if nrof_faces>0:\n # [n,4] 人脸框\n det = bounding_boxes[:,0:4]\n # 保存所有人脸框\n det_arr = []\n # 原图片大小\n img_size = np.asarray(dst.shape)[0:2]\n #img_size = np.asarray(img.shape)[0:2]\n if nrof_faces>1:\n # 一张图片中检测多个人脸\n if args.detect_multiple_faces:\n # 如果要检测多张人脸的话\n for i in range(nrof_faces):\n det_arr.append(np.squeeze(det[i]))\n else:\n # 即使有多张人脸,也只要一张人脸就够了\n # 获取人脸框的大小\n bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])\n # 原图片中心坐标\n img_center = img_size / 2\n # 求人脸框中心点相对于图片中心点的偏移,\n # (det[:,0]+det[:,2])/2和(det[:,1]+det[:,3])/2组成的坐标其实就是人脸框中心点\n offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])\n # 求人脸框中心到图片中心偏移的平方和\n # 假设offsets=[[ 4.20016056 145.02849352 -134.53862838] [ -22.14250919 -26.74770141 -30.76835772]]\n # 则offset_dist_squared=[ 507.93206189 21748.70346425 19047.33436466]\n offset_dist_squared = np.sum(np.power(offsets,2.0),0)\n # 用人脸框像素大小减去偏移平方和的两倍,得到的结果哪个大就选哪个人脸框\n # 其实就是综合考虑了人脸框的位置和大小,优先选择框大,又靠近图片中心的人脸框\n index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering\n det_arr.append(det[index,:])\n else:\n # 只有一个人脸框的话,那就没得选了\n det_arr.append(np.squeeze(det))\n # 遍历每一个人脸框\n for i, det in enumerate(det_arr):\n # [4,] 边界框扩大margin区域,并进行裁切\n det = np.squeeze(det)\n bb = np.zeros(4, dtype=np.int32)\n # 边界框周围的裁剪边缘,就是我们这里要裁剪的人脸框要比MTCNN获取的人脸框大一点,\n # 至于大多少,就由margin参数决定了\n bb[0] = np.maximum(det[0]-args.margin/2, 0)\n bb[1] = np.maximum(det[1]-args.margin/2, 0)\n bb[2] = np.minimum(det[2]+args.margin/2, img_size[1])\n bb[3] = np.minimum(det[3]+args.margin/2, img_size[0])\n # 裁剪人脸框,再缩放\n cropped = dst[bb[1]:bb[3],bb[0]:bb[2],:]\n #cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]\n # 缩放到指定大小,并保存图片,以及边界框位置信息\n scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')\n nrof_successfully_aligned += 1\n filename_base, file_extension = os.path.splitext(output_filename)#分离文件名和扩展名\n if args.detect_multiple_faces:\n output_filename_n = \"{}_{}{}\".format(filename_base, i, file_extension)\n else:\n output_filename_n = \"{}{}\".format(filename_base, file_extension)\n # 保存图片\n misc.imsave(output_filename_n, scaled)\n # 记录信息到bounding_boxes_XXXXX.txt文件里\n text_file.write('%s %d %d %d %d\\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))\n else:\n print('Unable to align \"%s\"' % image_path)\n text_file.write('%s\\n' % (output_filename))\n \n print('Total number of images: %d' % nrof_images_total)\n print('Number of successfully aligned images: %d' % nrof_successfully_aligned)\n \n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n \n parser.add_argument('input_dir', type=str, help='Directory with unaligned images.')\n parser.add_argument('output_dir', type=str, help='Directory with aligned face thumbnails.')\n parser.add_argument('--image_size', type=int,\n help='Image size (height, width) in pixels.', default=160)\n parser.add_argument('--margin', type=int,\n help='Margin for the crop around the bounding box (height, width) in pixels.', default=32)\n parser.add_argument('--random_order', \n help='Shuffles the order of images to enable alignment using multiple processes.', action='store_true')\n parser.add_argument('--gpu_memory_fraction', type=float,\n help='Upper bound on the amount of GPU memory that will be used by the process.', default=0.25)\n parser.add_argument('--detect_multiple_faces', type=bool,\n help='Detect and align multiple faces per image.', default=False)\n return parser.parse_args(argv)\n\n# if __name__ == '__main__':\n# main(parse_arguments(sys.argv[1:]))\n"
] |
[
[
"numpy.asarray",
"numpy.zeros",
"numpy.minimum",
"tensorflow.Graph",
"scipy.misc.imresize",
"scipy.misc.imread",
"tensorflow.ConfigProto",
"numpy.random.randint",
"numpy.transpose",
"numpy.argmax",
"scipy.misc.imsave",
"numpy.power",
"numpy.squeeze",
"tensorflow.GPUOptions",
"numpy.vstack",
"numpy.maximum"
]
] |
OceanSnape/twembeddings
|
[
"5b18db7427221fa64c4978a0b7f77b78802a101c"
] |
[
"twembeddings/build_features_matrix.py"
] |
[
"# -*- coding: utf-8 -*-\nimport argparse\nimport logging\nimport pandas as pd\nfrom .embeddings import TfIdf, W2V, BERT, SBERT, Elmo, ResNetLayer, DenseNetLayer, USE\nfrom scipy.sparse import issparse, save_npz, load_npz\nimport numpy as np\nimport os\nimport re\nimport csv\nfrom unidecode import unidecode\nfrom datetime import datetime, timedelta\n\n__all__ = ['build_matrix', 'load_dataset', 'load_matrix']\n\nES_DATE_FORMAT = \"%a %b %d %H:%M:%S +0000 %Y\"\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s : %(message)s', level=logging.INFO)\ntext_embeddings = ['tfidf_all_tweets', 'tfidf_dataset', 'w2v_afp_fr', 'w2v_gnews_en', 'w2v_twitter_fr',\n \"w2v_twitter_en\", \"elmo\", \"bert\", \"bert_tweets\", \"sbert_sts\", \"sbert_stsshort\",\n \"sbert_tweets_sts\", \"sbert_nli_sts\", \"sbert_tweets_sts_long\", \"use_multilingual\", \"use\"]\nimage_embeddings = [\"resnet\", \"densenet\"]\n\n\ndef find_date_created_at(created_at):\n return (datetime.strptime(created_at, ES_DATE_FORMAT) - timedelta(hours=5)).strftime(\"%Y%m%d\")\n\ndef remove_repeted_characters(expr):\n #limit number of repeted letters to 3. For example loooool --> loool\n string_not_repeted = \"\"\n for item in re.findall(r\"((.)\\2*)\", expr):\n if len(item[0]) <= 3:\n string_not_repeted += item[0]\n else:\n string_not_repeted += item[0][:3]\n return string_not_repeted\n\n\ndef camel_case_split(expr):\n matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', expr)\n return \" \".join([m.group(0) for m in matches])\n\n\ndef format_text(text, **format):\n # remove urls\n text = re.sub(r\"http\\S+\", '', text, flags=re.MULTILINE)\n if format[\"remove_mentions\"]:\n text = re.sub(r\"@\\S+\", '', text, flags=re.MULTILINE)\n # translate to equivalent ascii characters\n if format[\"unidecode\"]:\n text = unidecode(text)\n\n new_text = []\n for word in re.split(r\"[' ]\", text):\n # remove numbers longer than 4 digits\n if len(word) < 5 or not word.isdigit():\n if word.startswith(\"#\") and format[\"hashtag_split\"]:\n new_text.append(camel_case_split(word[1:]))\n else:\n new_text.append(word)\n text = remove_repeted_characters(\" \".join(new_text))\n if format[\"lower\"]:\n text = text.lower()\n return text\n\n\ndef build_path(**args):\n if args[\"dataset\"].startswith(\"event2018\"):\n dataset = args[\"dataset\"]\n else:\n dataset = args[\"dataset\"].split(\"/\")[-1].replace(\".tsv\", \"\")\n\n file_name = args[\"annotation\"]\n for arg in [\"text+\", \"hashtag_split\", \"svd\", \"tfidf_weights\"]:\n if args[arg]:\n file_name += \"_\" + arg\n return os.path.join(\"data\", dataset, args[\"model\"], file_name)\n\n\ndef save_matrix(X, **args):\n path = build_path(**args)\n os.makedirs(os.path.join(*path.split(\"/\")[:-1]), exist_ok=True)\n if issparse(X):\n save_npz(path, X)\n else:\n np.save(path, X)\n\n\ndef apply_mask(path, suffix, args, column):\n X = np.load(path + suffix) if suffix == \".npy\" else load_npz(path + suffix)\n data = load_dataset(args[\"dataset\"], args[\"annotation\"], args[\"text+\"])\n mask = data[column].notna()\n return X[mask]\n\n\ndef load_matrix(**args):\n path = build_path(**args)\n for suffix in [\".npy\"]:\n if os.path.exists(path + suffix):\n return np.load(path + suffix) if suffix == \".npy\" else load_npz(path + suffix)\n\n if args[\"dataset\"] == \"event2018\":\n if args[\"annotation\"] == \"annotated\":\n args1 = args.copy()\n args1[\"annotation\"] = \"examined\"\n path = build_path(**args1)\n for suffix in [\".npy\"]:\n if os.path.exists(path + suffix):\n return apply_mask(path, suffix, args1, \"label\")\n elif args[\"dataset\"] == \"event2018_image\":\n args1 = args.copy()\n args1[\"dataset\"] = \"event2018\"\n path = build_path(**args1)\n for suffix in [\".npy\"]:\n if os.path.exists(path + suffix):\n return apply_mask(path, suffix, args1, \"image\")\n elif args[\"dataset\"] == \"event2018_image\" and args[\"annotation\"] == \"annotated\":\n args1 = args.copy()\n args1[\"annotation\"] = \"examined\"\n args1[\"dataset\"] = \"event2018_image\"\n path = build_path(**args1)\n if os.path.exists(path + suffix):\n return apply_mask(path, suffix, args1, \"label\")\n\n\ndef load_dataset(dataset, annotation, text=False):\n data = pd.read_csv(dataset,\n sep=\"\\t\",\n quoting=csv.QUOTE_ALL,\n dtype={\"id\": str, \"label\": float, \"created_at\": str, \"text\": str}\n )\n if annotation == \"annotated\" and \"label\" in data.columns:\n data = data[data.label.notna()]\n elif annotation == \"examined\" and \"label\" in data.columns:\n data = data[data.event.notna()]\n if dataset == \"data/event2018_image\":\n data = data[data.image.notna()]\n\n if text == \"text+\" and \"text+quote+reply\" in data.columns:\n data = data.rename(columns={\"text\": \"text_not_formated\", \"text+quote+reply\": \"text\"})\n data[\"date\"] = data[\"created_at\"].apply(find_date_created_at)\n return data.drop_duplicates(\"id\").sort_values(\"id\").reset_index(drop=True)\n\n\ndef build_matrix(**args):\n X = load_matrix(**args)\n if args[\"model\"] in text_embeddings:\n data = load_dataset(args[\"dataset\"], args[\"annotation\"], args[\"text+\"])\n if X is not None:\n logging.info(\"Matrix already stored\")\n return X, data\n\n if args[\"model\"].startswith(\"tfidf\"):\n vectorizer = TfIdf(lang=args[\"lang\"], binary=args[\"binary\"])\n if args[\"model\"].endswith(\"all_tweets\"):\n vectorizer.load_history(args[\"lang\"])\n data.text = data.text.apply(format_text,\n remove_mentions=True,\n unidecode=True,\n lower=True,\n hashtag_split=args[\"hashtag_split\"]\n )\n count_matrix = vectorizer.add_new_samples(data)\n X = vectorizer.compute_vectors(count_matrix, min_df=10, svd=args[\"svd\"], n_components=100)\n\n elif args[\"model\"].startswith(\"w2v\"):\n vectorizer = W2V(args[\"model\"], lang=args[\"lang\"])\n if args[\"lang\"] == \"fr\":\n data.text = data.text.apply(format_text,\n remove_mentions=True,\n unidecode=True,\n lower=True,\n hashtag_split=args[\"hashtag_split\"]\n )\n elif args[\"model\"] == \"w2v_twitter_en\":\n data.text = data.text.apply(format_text,\n remove_mentions=False,\n unidecode=False,\n lower=False,\n hashtag_split=args[\"hashtag_split\"]\n )\n elif args[\"model\"] == \"w2v_gnews_en\":\n data.text = data.text.apply(format_text,\n remove_mentions=True,\n unidecode=False,\n lower=False,\n hashtag_split=args[\"hashtag_split\"]\n )\n if args[\"tfidf_weights\"]:\n X = vectorizer.compute_weighted_vectors(data, args[\"lang\"])\n else:\n X = vectorizer.compute_vectors(data)\n\n elif args[\"model\"] == \"elmo\":\n data.text = data.text.apply(format_text,\n remove_mentions=False,\n unidecode=False,\n lower=False,\n hashtag_split=True\n )\n vectorizer = Elmo(lang=args[\"lang\"])\n X = vectorizer.compute_vectors(data)\n\n elif args[\"model\"].startswith(\"bert\"):\n data.text = data.text.apply(format_text,\n remove_mentions=False,\n unidecode=False,\n lower=False,\n hashtag_split=True\n )\n vectorizer = BERT()\n X = vectorizer.compute_vectors(data)\n\n elif args[\"model\"].startswith(\"sbert\"):\n data.text = data.text.apply(format_text,\n remove_mentions=False,\n unidecode=False,\n lower=False,\n hashtag_split=True\n )\n vectorizer = SBERT(lang=args[\"lang\"])\n X = vectorizer.compute_vectors(data)\n\n elif args[\"model\"].startswith(\"use\"):\n data.text = data.text.apply(format_text,\n remove_mentions=False,\n unidecode=False,\n lower=False,\n hashtag_split=True\n )\n vectorizer = USE()\n X = vectorizer.compute_vectors(data)\n\n elif args[\"model\"] == \"resnet\":\n vectorizer = ResNetLayer()\n X = vectorizer.compute_vectors(\"data/images/event2018_image/\")\n\n elif args[\"model\"] == \"densenet\":\n vectorizer = DenseNetLayer()\n X = vectorizer.compute_vectors(\"data/images/event2018_image/\")\n\n if args[\"save\"]:\n save_matrix(X, **args)\n\n return X, data\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('--dataset',\n required=False,\n default=\"event2018\",\n help=\"\"\"\n - 'event2018' : Event2018 dataset; \n - 'event2018_image' : all tweets in Event2018 that contain an image. We include tweets that\n quote an image or reply to an image;\n - any other value should be the path to your own dataset in tsv format;\n \"\"\")\n parser.add_argument('--model',\n nargs='+',\n required=True,\n choices=text_embeddings + image_embeddings,\n help=\"\"\"\n Choose one text embedding AND/OR one image embedding\n \"\"\"\n )\n parser.add_argument(\"--save\", dest=\"save\", default=False,\n action=\"store_true\",\n help=\"\"\"\n Save the matrix on disk (.npy format for dense matrix, .npz for sparse matrix)\n \"\"\")\n parser.add_argument(\"--svd\", dest=\"svd\", default=False,\n action=\"store_true\",\n help=\"\"\"\n Only for TfIdf embedding: create a dense matrix of shape (n_documents, 100) \n using Singular Value Decomposition\n \"\"\")\n parser.add_argument('--binary', dest=\"binary\", default=True,\n action=\"store_false\",\n help=\"\"\"\n Only for TfIdf embedding: if True, all non-zero term counts are set to 1. \n This does not mean outputs will have only 0/1 values, only that the tf term \n in tf-idf is binary.\n \"\"\")\n\n parser.add_argument(\"--hashtag_split\", dest=\"hashtag_split\", default=False,\n action=\"store_true\",\n help=\"\"\"\n Split hashtags into words on capital letters (#FollowFriday --> Follow Friday)\n \"\"\")\n parser.add_argument(\"--tfidf_weights\", dest=\"tfidf_weights\", default=False,\n action=\"store_true\",\n help=\"\"\"\n Only for w2v embedding: each word vector of each document is weighted with\n its tfidf weight\n \"\"\")\n parser.add_argument('--text+',\n dest=\"text+\", default=False,\n action=\"store_true\",\n help=\"\"\"\n Only if --dataset argument is set to \"event2018\" or \"event2018_image\"\n Use the text of the tweet + the text of the tweet quoted or replied to \n \"\"\")\n parser.add_argument('--annotation',\n required=False,\n default=\"annotated\",\n choices=[\"annotated\", \"examined\"],\n help=\"\"\"\n Only if --dataset argument is set to \"default\" or \"has_image\"\n - annotated : (default) all tweets annotated as related to an event; \n - examined : all tweets annotated as related or unrelated to an event; \n \"\"\"\n # - no : all tweets in the dataset regardless of annotation\n )\n parser.add_argument('--lang',\n required=False,\n default=\"fr\",\n choices=[\"fr\", \"en\"]\n )\n\n args = vars(parser.parse_args())\n for m in args[\"model\"]:\n args1 = args.copy()\n X = []\n args1[\"model\"] = m\n matrix = build_matrix(**args1)\n"
] |
[
[
"scipy.sparse.issparse",
"scipy.sparse.load_npz",
"numpy.load",
"numpy.save",
"pandas.read_csv",
"scipy.sparse.save_npz"
]
] |
zeta1999/OpenJij
|
[
"0fe03f07af947f519a32ad58fe20423919651634"
] |
[
"openjij/sampler/cmos_annealer.py"
] |
[
"# Copyright 2019 Jij Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport requests\nimport json\n\nfrom openjij.sampler import BaseSampler\nimport openjij\n\nimport numpy as np\nimport dimod\n\n\nclass CMOSAnnealer(BaseSampler, dimod.Structured):\n \"\"\"Sampler with CMOS Annealer.\n\n Inherits from :class:`openjij.sampler.sampler.BaseSampler`.\n\n To get More details about CMOS Annealer API,\n please access the reference (https://annealing-cloud.com/web-api/reference.html).\n\n Args:\n token (str): API token of COMS Annealer.\n machine_type (str): Type of CMOS Annealer: 'ASIC' or 'FPGA'.\n beta_min (float): Minimum beta (inverse temperature).\n beta_max (float): Maximum beta (inverse temperature).\n step_length (int): Length of Monte Carlo step.\n num_sweeps (int): Number of Monte Carlo step.\n num_reads (int): Number of iterations.\n **kwargs: Optional keyword arguments for CMOS Annealer.\n\n Attributes:\n cmos_parameters (dict): Parameters of CMOS Annealer.\n\n Raises:\n ValueError:\n - only valid input: h, J, Q, or king graph.\n - CMOS Annealer API raises error.\n\n \"\"\"\n\n def __init__(self, token, machine_type=\"ASIC\",\n beta_min=None, beta_max=None,\n num_sweeps=10, step_length=100, num_reads=1, **kwargs):\n\n self.token = token\n self.machine_type = machine_type\n # setting machine type and 2D Grid size\n # more information see: https://annealing-cloud.com/web-api/reference/v2.html\n if machine_type == 'ASIC':\n self._X, self._Y = 352, 176\n self.max_abs = 3\n elif machine_type == 'FPGA':\n self._X, self._Y = 80, 80\n self.max_abs = 127\n else:\n raise ValueError(\"machine_type is 'ASIC' or 'FPGA'.\")\n\n self.beta_min = beta_min\n self.beta_max = beta_max\n self.num_sweeps = num_sweeps\n self.num_reads = num_reads\n self._schedule_setting = {\n 'beta_min': beta_min,\n 'beta_max': beta_max,\n 'num_sweeps': num_sweeps,\n 'num_reads': num_reads,\n 'step_length': step_length\n }\n\n @property\n def parameters(self):\n return {'num_reads': ['1<=n<=10', lambda n: 1 <= n <= 10],\n 'num_sweeps': ['1<=n<=100', lambda n: 1 <= n <= 100],\n 'step_length': ['1<=n<=1000', lambda n: 1 <= n <= 1000],\n 'beta_min': ['1/2000<=beta', lambda beta: 1/2000 <= beta],\n 'beta_max': ['1/2000<=beta', lambda beta: 1/2000 <= beta]}\n\n @property\n def properties(self):\n return {'System size': \"self._X, self._Y are size of 2D grid King's graph.\"}\n\n @property\n def nodelist(self):\n \"\"\"2-D lattice nodes\n\n Returns:\n [list of int]: 2-D lattice nodes\n \"\"\"\n return list(range(self._X*self._Y))\n\n @property\n def edgelist(self):\n \"\"\"King's graph\n\n Returns:\n [list of tuple]: King's graph\n \"\"\"\n edges = []\n for _x in range(self._X-1):\n for _y in range(self._Y-1):\n i = _x + self._X * _y\n edges += [\n (i, _x + 1 + self._X * _y),\n (i, _x + self._X * (_y + 1)),\n (i, _x + 1 + self._X * (_y + 1)),\n ]\n return edges\n\n def _validate_schedule(self):\n for k, v in self._schedule_setting.items():\n if not self.parameters[k][1](v):\n raise ValueError(\n 'The value of {} is invalid.'\n ' \"{}: {}\".'.format(k, k, self.parameters[k][0]))\n\n def sample_ising(self, h, J, num_reads=1,\n num_sweeps=None, step_length=None,\n beta_min=None, beta_max=None,):\n \"\"\"sample Ising model.\n\n Args:\n h (dict): linear biases\n J (dict): quadratic biases\n num_reads (int): number of reads\n num_sweeps (int): number of sweeps\n step_length (int): number of Monte Carlo steop for each step\n beta_min (float): minimal value of inverse temperature\n beta_max (float): maximum value of inverse temperature\n \"\"\"\n\n # settting annealing parameters and validation -------------\n self._setting_overwrite(\n beta_min=beta_min, beta_max=beta_max,\n num_sweeps=num_sweeps, num_reads=num_reads,\n step_length=step_length\n )\n # ------------- settting annealing parameters and validation\n\n # Convert to CMOS Annealing Cloud Web API format [xi,yi,xj,yj,pij] ---\n # from the Ising interaction dictionary.\n cmos_model = _cmos_annealer_format(\n h, J, self._X, self._Y, self.max_abs)\n beta_max, beta_min = defalut_beta_range(\n cmos_model,\n beta_max=self._schedule_setting['beta_max'],\n beta_min=self._schedule_setting['beta_min']\n )\n self._schedule_setting['beta_max'] = beta_max\n self._schedule_setting['beta_min'] = beta_min\n self._validate_schedule()\n headers, request = self.make_json_request(\n cmos_model, self.token\n )\n # --------------------------------------------------------------------\n\n # POST to API -------------------------------------\n url = 'https://annealing-cloud.com/api/v2/solve'\n res = requests.post(url, data=json.dumps(\n request, cls=NumpyEncoder), headers=headers)\n res_dict = res.json()\n # ------------------------------------- POST to API\n\n # API Error handling\n if res_dict['status'] != 0:\n raise ValueError('Error status: {}, message: {}'.format(\n res_dict['status'], res_dict['message']))\n\n # more infomation see : https://annealing-cloud.com/web-api/reference/v2.html\n info = {\n \"schedule\": {\n 'beta_max': self._schedule_setting['beta_max'],\n 'beta_min': self._schedule_setting['beta_min']\n },\n \"averaged_spins\": res_dict['result'][\"averaged_spins\"],\n \"averaged_energy\": res_dict['result'][\"averaged_energy\"],\n 'execution_time': res_dict['result']['execution_time'] * 10**(-3),\n 'job_id': res_dict['job_id'],\n 'posted_model': cmos_model\n }\n\n bqm = openjij.BinaryQuadraticModel(h, J)\n samples = [{x+self._X*y: s for x, y, s in spins}\n for spins in res_dict['result']['spins']]\n response = openjij.Response.from_samples_bqm(\n samples, bqm, info=info\n )\n return response\n\n def make_json_request(self, model, token):\n \"\"\"Make request for CMOS Annealer API.\n\n Args:\n model (list):\n A list of 5 integer values representing vertices\n or interactions of Ising model\n\n token (str):\n API token of COMS Annealer.\n\n \"\"\"\n\n headers = {\"Authorization\": \"Bearer \" + token}\n headers.update({\"Accept\": \"application/json\"})\n headers.update({'content-type': 'application/json'})\n\n request = {}\n request[\"model\"] = model # modelのみ必須項目\n request[\"type\"] = 1 if self.machine_type == \"ASIC\" else 2 # FPGA\n request[\"num_executions\"] = self.num_reads\n request[\"parameter\"] = {\n \"temperature_num_steps\": self._schedule_setting['num_sweeps'],\n \"temperature_step_length\": self._schedule_setting['step_length'],\n \"temperature_initial\": 1.0/self._schedule_setting['beta_min'],\n \"temperature_target\": 1.0/self._schedule_setting['beta_max']}\n request[\"outputs\"] = {\"energies\": True,\n \"spins\": True,\n \"execution_time\": True,\n \"num_outputs\": 0,\n \"averaged_spins\": True,\n \"averaged_energy\": True}\n\n return headers, request\n\n\ndef _cmos_annealer_format(h, J, X, Y, max_abs):\n\n interactions = []\n\n def _2d_pos(i, j, pij):\n xi, xj = i % X, j % X\n yi, yj = int((i-xi)/X), int((j-xj)/X)\n interactions.append(pij)\n return [xi, yi, xj, yj, pij]\n\n annealer_format = [_2d_pos(i, j, Jij) for (i, j), Jij in J.items()]\n annealer_format += [_2d_pos(i, i, hi) for i, hi in h.items()]\n # degitalize to integer\n max_int = np.max(np.abs(interactions))\n _interactions = np.array(annealer_format).T\n _interactions[-1] = _interactions[-1] * max_abs/max_int\n _interactions = [list(q) for q in _interactions.T.astype(int)]\n return _interactions\n\n\ndef defalut_beta_range(cmos_model, beta_max, beta_min):\n\n if beta_min is None or beta_max is None:\n interactions = {}\n\n def add_interactions(i, v):\n if i in interactions:\n interactions[i].append(np.abs(v))\n else:\n interactions[i] = [np.abs(v)]\n\n for xi, yi, xj, yj, Jij in cmos_model:\n add_interactions((xi, yi), Jij)\n add_interactions((xj, yj), Jij)\n\n abs_bias = np.array([np.sum(v) for v in interactions.values()])\n ising_interaction = np.abs(np.array(cmos_model).T[-1])\n min_delta_energy = np.min(ising_interaction[ising_interaction > 0])\n max_delta_energy = np.max(abs_bias[abs_bias > 0])\n\n beta_min = np.log(\n 2) / max_delta_energy if beta_min is None else beta_min\n beta_max = np.log(\n 100) / min_delta_energy if beta_max is None else beta_max\n\n beta_min = max(beta_min, 1/2000)\n beta_max = max(beta_max, 1/2000)\n\n return beta_max, beta_min\n\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NumpyEncoder, self).default(obj)\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.log",
"numpy.sum",
"numpy.min",
"numpy.abs"
]
] |
happy-beans/pylearning
|
[
"b20ef32ea65954d6b37e1026a6293a7c4d1d4f81"
] |
[
"statistics/s1.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport math\n\n# 合計\ndef getSum(ar):\n return np.sum(ar)\n\n# 平均\ndef getAvg(ar):\n return np.average(ar)\n\n# 偏差\ndef getDev(ar):\n a = getAvg(ar)\n d = []\n for i in ar:\n d.append(i - a)\n\n return d\n\n# 分散\ndef getVar(ar):\n return np.var(ar)\n\n# 標準偏差\ndef getStd(ar):\n return np.std(ar)\n\n# 偏差値\ndef getDV(ar):\n d = getDev(ar)\n s = getStd(ar)\n\n dv = []\n i = 0\n for i in range(len(ar)):\n dv.append(d[i] * 10 / s + 50)\n\n return dv\n\nif __name__ == \"__main__\":\n\n ar = [15,25,4,43,35,64,54,7,84,49]\n\n # 合計\n print(\"{name} = {val}\".format(name='合計',val=getSum(ar)))\n\n # 平均\n print(\"{name} = {val}\".format(name='平均',val=getAvg(ar)))\n\n # 偏差\n print(\"{name} = {val}\".format(name='偏差',val=getDev(ar)))\n\n # 分散\n print(\"{name} = {val}\".format(name='分散',val=getVar(ar)))\n\n # 標準偏差\n print(\"{name} = {val}\".format(name='標準偏差',val=getStd(ar)))\n\n # 偏差値\n print(\"{name} = {val}\".format(name='偏差値',val=getDV(ar)))\n"
] |
[
[
"numpy.average",
"numpy.sum",
"numpy.std",
"numpy.var"
]
] |
marcosboggia/gui_automation
|
[
"2adabfa71b00945ada04e619c1a36b124c1dda66"
] |
[
"gui_automation/background_handler.py"
] |
[
"# Made by Marcos Boggia\r\nimport io\r\nimport cv2\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport win32gui\r\nimport win32ui\r\nfrom ctypes import windll\r\nfrom time import sleep\r\nfrom gui_automation.handler import Handler\r\n\r\n\r\nclass BackgroundHandlerWin32(Handler):\r\n \"\"\"\r\n Handler only for Windows. It can work on background(not viewable) apps. It needs the app name/window name.\r\n If needed you can pass the full hierarchy names as arguments to select a specific UI element.\r\n Eg: BackgroundHandlerWin32('window_name', 'main_ui_element', 'child_ui_element')\r\n If the element is not found raises ElementNotFound exception.\r\n \"\"\"\r\n\r\n def __init__(self, app_name, *args):\r\n \"\"\"\r\n\r\n :param app_name: app or window name of the desired app to be controlled.\r\n :param args: if you need to control a specific UI element, you can set these *args to determine the UI hierarchy\r\n names. Currently the condition asks if given name is 'in' the element name. Should be replaced with regexp in\r\n the future.\r\n \"\"\"\r\n self.x_dis = 0\r\n self.y_dis = 0\r\n result = self.get_gui_elem(app_name, *args)\r\n if result:\r\n self.hwnd = result\r\n self.img_hwnd = result\r\n else:\r\n raise ElementNotFound\r\n\r\n def set_img_hwnd(self, app_name, *args, x_dis=0, y_dis=0):\r\n \"\"\"\r\n It is possible that the handler used to control the app returns a black screen or similar.\r\n This function allows to set a different handler to get the screenshot of that GUI.\r\n Since there might be some positional difference between the original handler and the one used to obtain the\r\n image, x_dis and y_dis parameters can be used to fix these displacement.\r\n\r\n :param app_name: app or window name of the desired app to be controlled.\r\n :param args: if you need to control a specific UI element, you can set these *args to determine the UI hierarchy\r\n names. Currently the condition asks if given name is 'in' the element name. Should be replaced with regexp in\r\n the future.\r\n :param x_dis: displacement in pixels in between the handlers for the X axis.\r\n :param y_dis: same as x_dis but for the Y axis.\r\n :return:\r\n \"\"\"\r\n result = self.get_gui_elem(app_name, *args)\r\n if result:\r\n self.img_hwnd = result\r\n self.x_dis = x_dis\r\n self.y_dis = y_dis\r\n else:\r\n raise ElementNotFound\r\n\r\n def screenshot(self):\r\n \"\"\"\r\n Screenshot for background Win32 apps.\r\n :return: screen as OpenCV image format.\r\n \"\"\"\r\n\r\n # OBTAIN IMAGE OF THE WINDOW SCREEN\r\n left, top, right, bot = win32gui.GetWindowRect(self.img_hwnd.handle)\r\n w = right - left\r\n h = bot - top\r\n hwnd_dc = win32gui.GetWindowDC(self.img_hwnd.handle)\r\n mfc_dc = win32ui.CreateDCFromHandle(hwnd_dc)\r\n save_dc = mfc_dc.CreateCompatibleDC()\r\n save_bit_map = win32ui.CreateBitmap()\r\n save_bit_map.CreateCompatibleBitmap(mfc_dc, w, h)\r\n save_dc.SelectObject(save_bit_map)\r\n result = windll.user32.PrintWindow(self.img_hwnd.handle, save_dc.GetSafeHdc(), 1)\r\n if result == 0:\r\n return False\r\n bmpinfo = save_bit_map.GetInfo()\r\n bmpstr = save_bit_map.GetBitmapBits(True)\r\n img = Image.frombuffer(\r\n 'RGB',\r\n (bmpinfo['bmWidth'], bmpinfo['bmHeight']),\r\n bmpstr, 'raw', 'BGRX', 0, 1)\r\n win32gui.DeleteObject(save_bit_map.GetHandle())\r\n save_dc.DeleteDC()\r\n mfc_dc.DeleteDC()\r\n win32gui.ReleaseDC(self.img_hwnd.handle, hwnd_dc)\r\n # CONVERT IT TO OPENCV FORMAT\r\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\r\n return img\r\n\r\n def click(self, x, y, clicks=1):\r\n for _ in range(clicks):\r\n print(f\"HANDLER clicked x: {x + self.x_dis} y: {y + self.y_dis}\")\r\n self.hwnd.click(coords=(x + self.x_dis, y + self.y_dis))\r\n\r\n def move(self, x, y):\r\n self.hwnd.move_mouse(coords=(x + self.x_dis, y + self.y_dis))\r\n\r\n def hold_click(self, x, y, time):\r\n refresh_rate = 0.05\r\n self.hwnd.press_mouse(coords=(x + self.x_dis, y + self.y_dis))\r\n while time > 0:\r\n sleep(refresh_rate)\r\n self.hwnd.move_mouse(coords=(x + self.x_dis, y + self.y_dis))\r\n time -= refresh_rate\r\n\r\n self.hwnd.release_mouse(coords=(x + self.x_dis, y + self.y_dis))\r\n\r\n def drag_click(self, start_x, start_y, end_x, end_y):\r\n self.hwnd.drag_mouse(press_coords=(start_x + self.x_dis, start_y + self.y_dis),\r\n release_coords=(end_x + self.x_dis, end_y + self.y_dis))\r\n\r\n def press_key(self, key):\r\n print(\"Not implemented yet\")\r\n\r\n def press_hotkey(self, *keys):\r\n print(\"Not implemented yet\")\r\n\r\n def write_string(self, key, interval=0):\r\n print(\"Not implemented yet\")\r\n\r\n @staticmethod\r\n def get_gui_elem(app_name, *args):\r\n \"\"\"\r\n\r\n :param app_name: name of the app. For eg: for Paint is MSPaintApp.\r\n :param args: name of the descendant elements ordered in hierarchy level.\r\n :return: returns the HWNDWrapper from pywinauto of the Gui element if found. Otherwise returns False.\r\n\r\n Due to some incompatibility between PyAutoGui and pywinauto, if pywinauto is imported, pyautogui stops working.\r\n That's why it is imported only when necessary(if this function is called).\r\n After this import, PyAutoGui will not continue to work. Which means foreground handler won't work neither.\r\n \"\"\"\r\n from pywinauto.controls import hwndwrapper\r\n\r\n data = [elem for elem in args]\r\n data.insert(0, app_name)\r\n windows_list = []\r\n\r\n def _append(hwnd, _):\r\n windows_list.append(hwndwrapper.HwndWrapper(hwnd))\r\n\r\n win32gui.EnumWindows(_append, [])\r\n\r\n for elem in windows_list:\r\n result = BackgroundHandlerWin32.process_element(elem, data, 0)\r\n if result:\r\n return result\r\n return False\r\n\r\n @staticmethod\r\n def process_element(elem, data, i):\r\n \"\"\"\r\n\r\n :param elem: HWNDWrapper element to start recursive search\r\n :param data: names of the elements ordered hierarchically.\r\n :param i: index of the data array.\r\n :return: returns the HWNDWrapper from pywinauto of the Gui element if found. Otherwise returns False.\r\n \"\"\"\r\n name1 = elem.friendlyclassname\r\n name2 = elem.element_info.name\r\n if (name1 is not None and data[i] in name1) or (name2 is not None and data[i] in name2):\r\n if i == len(data) - 1:\r\n return elem\r\n else:\r\n children = elem.children()\r\n BackgroundHandlerWin32.load_children(children)\r\n if children:\r\n for next_child in children:\r\n hwnd = BackgroundHandlerWin32.process_element(next_child, data, i + 1)\r\n if hwnd:\r\n return hwnd\r\n return False\r\n\r\n @staticmethod\r\n def load_children(children):\r\n \"\"\"\r\n WORKAROUND for children not being loaded. Rarely, printing them fix this issue. Maybe its some lazy loading.\r\n This supress stdout when printing.\r\n :param children:\r\n :return:\r\n \"\"\"\r\n from contextlib import redirect_stdout\r\n trap = io.StringIO()\r\n with redirect_stdout(trap):\r\n print(children)\r\n\r\n\r\nclass ElementNotFound(Exception):\r\n pass\r\n"
] |
[
[
"numpy.array"
]
] |
stephenjfox/trax
|
[
"918b1ce2ad63a24cb957ebc8e8ea0af1ee272666",
"918b1ce2ad63a24cb957ebc8e8ea0af1ee272666"
] |
[
"trax/data/text_encoder_test.py",
"trax/data/tf_inputs.py"
] |
[
"# coding=utf-8\n# Copyright 2022 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for trax.data.text_encoder.\"\"\"\n\nimport collections\nimport io\nimport os\nimport random\nimport shutil\nimport string\n\nimport mock\nfrom six.moves import range # pylint: disable=redefined-builtin\nimport tensorflow.compat.v1 as tf\nfrom trax.data import text_encoder\n\n\nclass NativeToUnicodeTest(tf.test.TestCase):\n\n def test_native_to_unicode(self):\n s = r\"foo bar\"\n s_unicode = text_encoder.native_to_unicode(s)\n self.assertEqual(s_unicode, u\"foo bar\")\n\n\nclass EscapeUnescapeTokenTest(tf.test.TestCase):\n\n def test_escape_token(self):\n escaped = text_encoder._escape_token(\n \"Foo! Bar.\\nunder_score back\\\\slash\",\n set(\"abcdefghijklmnopqrstuvwxyz .\\n\") | text_encoder._ESCAPE_CHARS)\n\n self.assertEqual(\n \"\\\\70;oo\\\\33; \\\\66;ar.\\\\10;under\\\\uscore back\\\\\\\\slash_\", escaped)\n\n def test_unescape_token(self):\n unescaped = text_encoder._unescape_token(\n \"\\\\70;oo\\\\33; \\\\66;ar.\\\\10;under\\\\uscore back\\\\\\\\slash_\")\n\n self.assertEqual(\n \"Foo! Bar.\\nunder_score back\\\\slash\", unescaped)\n\n\nclass TokenTextEncoderTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Make sure the test dir exists and is empty.\"\"\"\n cls.test_temp_dir = os.path.join(tf.test.get_temp_dir(), \"encoder_test\")\n shutil.rmtree(cls.test_temp_dir, ignore_errors=True)\n tf.gfile.MakeDirs(cls.test_temp_dir)\n\n def test_save_and_reload(self):\n \"\"\"Test that saving and reloading doesn't change the vocab.\n\n Note that this test reads and writes to the filesystem, which necessitates\n that this test size be \"large\".\n \"\"\"\n\n corpus = \"A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\"\n vocab_filename = os.path.join(self.test_temp_dir, \"abc.vocab\")\n\n # Make text encoder from a list and store vocab to fake filesystem.\n encoder = text_encoder.TokenTextEncoder(None, vocab_list=corpus.split())\n encoder.store_to_file(vocab_filename)\n\n # Load back the saved vocab file from the fake_filesystem.\n new_encoder = text_encoder.TokenTextEncoder(vocab_filename)\n\n self.assertEqual(encoder._id_to_token, new_encoder._id_to_token)\n self.assertEqual(encoder._token_to_id, new_encoder._token_to_id)\n\n def test_reserved_tokens_in_corpus(self):\n \"\"\"Test that we handle reserved tokens appearing in the corpus.\"\"\"\n corpus = \"A B {} D E F {} G {}\".format(text_encoder.EOS,\n text_encoder.EOS,\n text_encoder.PAD)\n\n encoder = text_encoder.TokenTextEncoder(None, vocab_list=corpus.split())\n\n all_tokens = encoder._id_to_token.values()\n\n # If reserved tokens are removed correctly, then the set of tokens will\n # be unique.\n self.assertEqual(len(all_tokens), len(set(all_tokens)))\n\n\nclass SubwordTextEncoderTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Make sure the test dir exists and is empty.\"\"\"\n cls.test_temp_dir = os.path.join(tf.test.get_temp_dir(), \"encoder_test\")\n shutil.rmtree(cls.test_temp_dir, ignore_errors=True)\n tf.gfile.MakeDirs(cls.test_temp_dir)\n\n def test_encode_decode(self):\n corpus = (\n \"This is a corpus of text that provides a bunch of tokens from which \"\n \"to build a vocabulary. It will be used when strings are encoded \"\n \"with a TextEncoder subclass. The encoder was coded by a coder.\")\n token_counts = collections.Counter(corpus.split(\" \"))\n alphabet = set(corpus) - {\" \"}\n\n original = \"This is a coded sentence encoded by the SubwordTextEncoder.\"\n token_counts.update(original.split(\" \"))\n\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n\n # Encoding should be reversible.\n encoded = encoder.encode(original)\n decoded = encoder.decode(encoded)\n self.assertEqual(original, decoded)\n\n # The substrings coded and coder are frequent enough in the corpus that\n # they should appear in the vocabulary even though they are substrings\n # of other included strings.\n subtoken_strings = {encoder.all_subtoken_strings[i] for i in encoded}\n self.assertIn(\"encoded_\", subtoken_strings)\n self.assertIn(\"coded_\", subtoken_strings)\n self.assertIn(\"TextEncoder\", encoder.all_subtoken_strings)\n self.assertIn(\"coder\", encoder.all_subtoken_strings)\n\n # Every character in the corpus should be in the encoders alphabet and\n # its subtoken vocabulary.\n self.assertTrue(alphabet.issubset(encoder._alphabet))\n for a in alphabet:\n self.assertIn(a, encoder.all_subtoken_strings)\n\n def test_unicode(self):\n corpus = \"Cat emoticons. \\U0001F638 \\U0001F639 \\U0001F63A \\U0001F63B\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n\n self.assertIn(\"\\U0001F638\", encoder._alphabet)\n self.assertIn(\"\\U0001F63B\", encoder.all_subtoken_strings)\n\n def test_small_vocab(self):\n corpus = \"The quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n alphabet = set(corpus) - {\" \"}\n\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 10, token_counts, 2, 10)\n\n # All vocabulary elements are in the alphabet and subtoken strings even\n # if we requested a smaller vocabulary to assure all expected strings\n # are encodable.\n self.assertTrue(alphabet.issubset(encoder._alphabet))\n for a in alphabet:\n self.assertIn(a, encoder.all_subtoken_strings)\n\n def test_long_tokens(self):\n \"\"\"Subword tokenization should still run efficiently with long tokens.\n\n To make it run efficiently, we need to use the `max_subtoken_length`\n argument when calling SubwordTextEncoder.build_to_target_size.\n \"\"\"\n token_length = 4000\n num_tokens = 50\n target_vocab_size = 600\n max_subtoken_length = 10 # Set this to `None` to get problems.\n max_count = 500\n\n # Generate some long random strings.\n random.seed(0)\n long_tokens = []\n for _ in range(num_tokens):\n long_token = \"\".join([random.choice(string.ascii_uppercase)\n for _ in range(token_length)])\n long_tokens.append(long_token)\n\n corpus = \" \".join(long_tokens)\n token_counts = collections.Counter(corpus.split(\" \"))\n alphabet = set(corpus) - {\" \"}\n\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n target_vocab_size, token_counts, 1, max_count, num_iterations=1,\n max_subtoken_length=max_subtoken_length)\n\n # All vocabulary elements are in the alphabet and subtoken strings even\n # if we requested a smaller vocabulary to assure all expected strings\n # are encodable.\n self.assertTrue(alphabet.issubset(encoder._alphabet))\n for a in alphabet:\n self.assertIn(a, encoder.all_subtoken_strings)\n\n def test_custom_reserved_tokens(self):\n \"\"\"Test that we can pass custom reserved tokens to SubwordTextEncoder.\"\"\"\n corpus = \"The quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n start_symbol = \"<S>\"\n end_symbol = \"<E>\"\n reserved_tokens = text_encoder.RESERVED_TOKENS + [start_symbol,\n end_symbol]\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 10, token_counts, 2, 10, reserved_tokens=reserved_tokens)\n\n # Make sure that reserved tokens appear in the right places.\n self.assertEqual(encoder.decode([2]), start_symbol)\n self.assertEqual(encoder.decode([3]), end_symbol)\n\n # Make sure that we haven't messed up the ability to reconstruct.\n reconstructed_corpus = encoder.decode(encoder.encode(corpus))\n self.assertEqual(corpus, reconstructed_corpus)\n\n def test_encodable_when_not_in_alphabet(self):\n corpus = \"the quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n original = \"This has UPPER CASE letters that are out of alphabet\"\n\n # Early versions could have an infinite loop when breaking into subtokens\n # if there was any out-of-alphabet characters in the encoded string.\n encoded = encoder.encode(original)\n decoded = encoder.decode(encoded)\n\n self.assertEqual(original, decoded)\n encoded_str = \"\".join(encoder.all_subtoken_strings[i] for i in encoded)\n self.assertIn(\"\\\\84;\", encoded_str)\n\n @mock.patch.object(text_encoder, \"_ESCAPE_CHARS\", new=set(\"\\\\_;13579\"))\n def test_raises_exception_when_not_encodable(self):\n corpus = \"the quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n # Deliberately exclude some required encoding chars from the alphabet\n # and token list, making some strings unencodable.\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n original = \"This has UPPER CASE letters that are out of alphabet\"\n\n # Previously there was a bug which produced an infinite loop in this case.\n with self.assertRaises(AssertionError):\n encoder.encode(original)\n\n def test_load_from_file(self):\n # Test a vocab file with words not wrapped with single quotes\n encoder = text_encoder.SubwordTextEncoder()\n correct_vocab = [\"the\", \"and\", \"of\"]\n vocab = io.StringIO(\"the\\n\"\n \"and\\n\"\n \"of\\n\")\n encoder._load_from_file_object(vocab)\n self.assertAllEqual(encoder.all_subtoken_strings, correct_vocab)\n\n # Test a vocab file with words wrapped in single quotes\n encoder = text_encoder.SubwordTextEncoder()\n vocab = io.StringIO(\"\\\"the\\\"\\n\"\n \"\\\"and\\\"\\n\"\n \"\\\"of\\\"\\n\")\n encoder._load_from_file_object(vocab)\n self.assertAllEqual(encoder.all_subtoken_strings, correct_vocab)\n\n def test_reserved_token_chars_not_in_alphabet(self):\n corpus = \"dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n encoder1 = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 100)\n filename = os.path.join(self.test_temp_dir, \"out.voc\")\n encoder1.store_to_file(filename)\n encoder2 = text_encoder.SubwordTextEncoder(filename=filename)\n\n self.assertEqual(encoder1._alphabet, encoder2._alphabet)\n\n for t in text_encoder.RESERVED_TOKENS:\n for c in t:\n # Verify that encoders can encode all reserved token chars.\n encoder1.encode(c)\n encoder2.encode(c)\n\n def test_save_and_reload(self):\n corpus = \"the quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n # Deliberately exclude some required encoding chars from the alphabet\n # and token list, making some strings unencodable.\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n\n filename = os.path.join(self.test_temp_dir, \"out.voc\")\n encoder.store_to_file(filename)\n new_encoder = text_encoder.SubwordTextEncoder(filename)\n\n self.assertEqual(encoder._alphabet, new_encoder._alphabet)\n self.assertEqual(encoder.all_subtoken_strings,\n new_encoder.all_subtoken_strings)\n self.assertEqual(encoder._subtoken_string_to_id,\n new_encoder._subtoken_string_to_id)\n self.assertEqual(encoder._max_subtoken_len, new_encoder._max_subtoken_len)\n\n def test_save_and_reload_no_single_quotes(self):\n corpus = \"the quick brown fox jumps over the lazy dog\"\n token_counts = collections.Counter(corpus.split(\" \"))\n\n # Deliberately exclude some required encoding chars from the alphabet\n # and token list, making some strings unencodable.\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size(\n 100, token_counts, 2, 10)\n\n filename = os.path.join(self.test_temp_dir, \"out.voc\")\n encoder.store_to_file(filename, add_single_quotes=False)\n new_encoder = text_encoder.SubwordTextEncoder(filename)\n\n self.assertEqual(encoder._alphabet, new_encoder._alphabet)\n self.assertEqual(encoder.all_subtoken_strings,\n new_encoder.all_subtoken_strings)\n self.assertEqual(encoder._subtoken_string_to_id,\n new_encoder._subtoken_string_to_id)\n self.assertEqual(encoder._max_subtoken_len, new_encoder._max_subtoken_len)\n\n def test_build_from_generator(self):\n\n corpus = \"The quick brown fox jumps over the lazy dog\"\n\n def gen():\n for _ in range(3):\n yield corpus\n\n start_symbol = \"<S>\"\n end_symbol = \"<E>\"\n reserved_tokens = text_encoder.RESERVED_TOKENS + [start_symbol,\n end_symbol]\n encoder = text_encoder.SubwordTextEncoder.build_from_generator(\n gen(), 10, reserved_tokens=reserved_tokens)\n\n # Make sure that reserved tokens appear in the right places.\n self.assertEqual(encoder.decode([2]), start_symbol)\n self.assertEqual(encoder.decode([3]), end_symbol)\n\n self.assertEqual(\"hi%s\" % start_symbol,\n encoder.decode(encoder.encode(\"hi\") + [2]))\n\n # Make sure that we haven't messed up the ability to reconstruct.\n reconstructed_corpus = encoder.decode(encoder.encode(corpus))\n self.assertEqual(corpus, reconstructed_corpus)\n\n\nclass OneHotClassLabelEncoderTest(tf.test.TestCase):\n\n def test_one_hot_encode(self):\n encoder = text_encoder.OneHotClassLabelEncoder(\n class_labels=[\"zero\", \"one\", \"two\"])\n self.assertEqual(encoder.encode(\"zero\"), [1, 0, 0])\n self.assertEqual(encoder.encode(\"one\"), [0, 1, 0])\n self.assertEqual(encoder.encode(\"two\"), [0, 0, 1])\n\n def test_one_hot_decode(self):\n encoder = text_encoder.OneHotClassLabelEncoder(\n class_labels=[\"zero\", \"one\", \"two\"])\n self.assertEqual(encoder.decode([1, 0, 0]), \"zero\")\n self.assertEqual(encoder.decode([0, 1, 0]), \"one\")\n self.assertEqual(encoder.decode([0, 0, 1]), \"two\")\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2022 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"TensorFlow data sources and associated prepocessing functions.\"\"\"\n\nimport functools\nimport itertools\nimport json\nimport math\nimport os\nimport random\nimport re\n\nfrom absl import logging\nimport gin\nimport jax\nimport numpy as np\nimport scipy\nimport tensorflow as tf\nfrom tensorflow import estimator as tf_estimator\nimport tensorflow_datasets as tfds\nimport tensorflow_text as tf_text\nfrom trax import data\nfrom trax import fastmath\nfrom trax import layers as tl\nfrom trax import supervised\nfrom trax.data import debug_data_pipeline\nfrom trax.data import text_encoder\nfrom trax.fastmath import numpy as jnp\n\n# How many examples from the stream to skip at random during training.\n# For now, we skip at most 100K examples for efficiency.\n# TODO(lukaszkaiser): can we improve efficiency, should that be changed?\n_MAX_SKIP_EXAMPLES = 1e5\n\n\ndef t5_data():\n \"\"\"Get the T5 data module if available.\"\"\"\n module = None\n try:\n import t5.data # pylint: disable=g-import-not-at-top\n module = t5.data\n except AttributeError as e:\n logging.error('pip install t5')\n raise e\n return module\n\n\ndef no_preprocess(dataset, training):\n del training\n return dataset\n\n\ndef t2t_problems():\n # Load t2t problems on request only, this should save some import time.\n from tensor2tensor import problems_colab as t2tp # pylint: disable=g-import-not-at-top\n return t2tp\n\n\n# TODO(jonni): Rename function to better match its return values.\n@gin.configurable(module='trax.data')\ndef data_streams(dataset_name,\n data_dir=None,\n preprocess_fn=no_preprocess,\n bare_preprocess_fn=None,\n shuffle_buffer_size=1024,\n eval_holdout_size=0,\n input_name=None,\n target_name=None):\n \"\"\"Creates `(train, eval)` data sources from ``dataset_name``.\n\n Args:\n dataset_name: Name of dataset belonging to TFDS or T2T. T2T dataset names\n must start with ``'t2t_'``.\n data_dir: Directory where the data is located.\n preprocess_fn: Function to use for pre-processing after appending targets to\n inputs.\n bare_preprocess_fn: Function to use for pre-processing before appending\n targets to inputs.\n shuffle_buffer_size: Size of the shuffle buffer.\n eval_holdout_size: If greater than 0, specifies a fraction of training data\n to siphon off and use as eval data, in place of an separate eval split.\n input_name: Name of the inputs from the dictionary.\n target_name: Name of the outputs either from the dictionary or as a result\n of post-processing.\n\n Returns:\n A pair of functions, `(f, g)` for use as data sources; call `f()` to get an\n iterator of training data samples, and call `g()` to get an iterator of eval\n data samples.\n \"\"\"\n data_dir = download_and_prepare(dataset_name, data_dir)\n\n cache = []\n\n def stream(which):\n \"\"\"Create the stream, cache TF streams if needed.\"\"\"\n if not cache:\n cache.append(\n _train_and_eval_streams(dataset_name, data_dir, preprocess_fn,\n bare_preprocess_fn, shuffle_buffer_size,\n eval_holdout_size, input_name, target_name))\n\n (train_ds, eval_ds, input_name_c) = cache[0]\n dataset = eval_ds if which == 'eval' else train_ds\n return dataset_to_stream(dataset, input_name_c)\n\n train_stream = lambda: stream('train')\n eval_stream = lambda: stream('eval')\n return train_stream, eval_stream\n\n\ndef dataset_to_stream(dataset, input_name):\n \"\"\"Takes a tf.Dataset and creates a numpy stream of ready batches.\"\"\"\n # All input-pipeline processing should be on CPU.\n for example in fastmath.dataset_as_numpy(dataset):\n features = example[0]\n inp, out = features[input_name], example[1]\n mask = features['mask'] if 'mask' in features else None\n # Some accelerators don't handle uint8 well, cast to int.\n if isinstance(inp, np.uint8):\n inp = inp.astype(np.int32)\n if isinstance(out, np.uint8):\n out = out.astype(np.int32)\n yield (inp, out) if mask is None else (inp, out, mask)\n\n\ndef _train_and_eval_streams(dataset, data_dir, preprocess_fn,\n bare_preprocess_fn, shuffle_buffer_size,\n eval_holdout_size, input_name, target_name):\n \"\"\"Return train and eval batches with input name and shape.\"\"\"\n (train_data, eval_data,\n keys) = _train_and_eval_dataset(dataset, data_dir, eval_holdout_size)\n # If provided select input_name/target_name else fall back to keys if that is\n # available, else [None].\n input_names = ([input_name] if input_name is not None else\n keys[0] if keys is not None else [None])\n target_names = ([target_name] if target_name is not None else\n keys[1] if keys is not None else [None])\n\n train_batches = _shuffle_data(train_data, target_names, True,\n shuffle_buffer_size, preprocess_fn,\n bare_preprocess_fn)\n eval_batches = _shuffle_data(eval_data, target_names, False,\n shuffle_buffer_size, preprocess_fn,\n bare_preprocess_fn)\n return (train_batches, eval_batches, input_names[0])\n\n\ndef _shuffle_data(dataset, target_names, training, shuffle_buffer_size,\n preprocess_fn, bare_preprocess_fn):\n \"\"\"Shuffle the given dataset and run pre-processing.\"\"\"\n\n def append_targets(example):\n \"\"\"Append targets to the example dictionary. Needed for Keras.\"\"\"\n if len(target_names) == 1:\n return (example, example[target_names[0]])\n targets = {}\n for name in target_names:\n targets[name] = example[name]\n return (example, targets)\n\n # `bare_preprocess_fn` is called before appending targets etc.\n if bare_preprocess_fn is not None:\n dataset = bare_preprocess_fn(dataset, training)\n dataset = dataset.map(append_targets)\n # TODO(pkozakowski): Repeat both the training and evaluation set, so we don't\n # have incomplete batches during evaluation. This will be a problem when we\n # add an option to evaluate on the whole dataset, then we'll need to think of\n # a different solution.\n dataset = dataset.repeat()\n if training:\n # Skip a random fraction at the beginning of the stream. The skip is\n # essential for synchronous highly-parallel training to avoid multiple\n # replicas reading the same data in lock-step.\n dataset = dataset.skip(random.randint(0, _MAX_SKIP_EXAMPLES))\n dataset = preprocess_fn(dataset, training)\n dataset = dataset.shuffle(shuffle_buffer_size)\n return dataset.prefetch(8)\n\n\ndef _train_and_eval_dataset(dataset_name,\n data_dir,\n eval_holdout_size,\n train_shuffle_files=True,\n eval_shuffle_files=False,\n use_alt_eval=False,\n subsplit=None):\n \"\"\"Return train and evaluation datasets, feature info and supervised keys.\n\n Args:\n dataset_name: a string, the name of the dataset; if it starts with 't2t_'\n then we'll search T2T Problem registry for it, otherwise we assume it is a\n dataset from TFDS and load it from there.\n data_dir: directory where the data is located.\n eval_holdout_size: float from 0 to <1; if >0 use this much of training data\n for evaluation (instead of looking for a pre-specified VALIDATION split).\n train_shuffle_files: Boolean determining whether or not to shuffle the train\n files at startup. Set to False if you want data determinism.\n eval_shuffle_files: Boolean determining whether or not to shuffle the test\n files at startup. Set to False if you want data determinism.\n use_alt_eval: If True, use the dataset's alternate/secondary eval split;\n else use the dataset's default/only eval split. Currently, only the\n `glue/mnli` dataset provides an alternate eval split, and this arg is\n ignored for other datasets.\n subsplit: a pair of floats (x, y), both in [0, 1], saying which part of the\n full training dataset we should return (default: all of it, [0, 1]).\n\n Returns:\n a 4-tuple consisting of:\n * the train tf.Dataset\n * the eval tf.Dataset\n * information about features: a python dictionary with feature names\n as keys and an object as value that provides .shape and .n_classes.\n * supervised_keys: information what's the input and what's the target,\n ie., a pair of lists with input and target feature names.\n \"\"\"\n logging.info('Building TF data pipeline for %s', dataset_name)\n if dataset_name.startswith('t2t_'):\n return _train_and_eval_dataset_v1(dataset_name[4:], data_dir,\n train_shuffle_files, eval_shuffle_files)\n dataset_builder = tfds.builder(dataset_name, data_dir=data_dir)\n info = dataset_builder.info\n splits = dataset_builder.info.splits\n if dataset_name != 'c4/multilingual' and tfds.Split.TRAIN not in splits:\n raise ValueError('To train we require a train split in the dataset.')\n train_split = tfds.Split.TRAIN if dataset_name != 'c4/multilingual' else 'en'\n eval_split = None\n train_examples = info.splits[train_split].num_examples\n eval_holdout_examples = int(train_examples * eval_holdout_size)\n if eval_holdout_examples > 0 or subsplit is not None:\n if subsplit is None:\n subsplit = (0, 1)\n n_train = train_examples - eval_holdout_examples\n train_start = int(n_train * subsplit[0])\n train_end = int(n_train * subsplit[1])\n if train_end - train_start < 1:\n raise ValueError('Requested train subsplit has no examples: '\n 'n_train %d subsplit %s' % (n_train, subsplit))\n # Eval holdout examples from the end of the training set.\n if eval_holdout_examples > 0:\n eval_split = f'{train_split}[-{eval_holdout_examples}:]'\n # Shard the training set for this host.\n train_split = f'{train_split}[{train_start}:{train_end}]'\n\n if dataset_name == 'glue/mnli':\n eval_split = (\n 'validation_mismatched' if use_alt_eval else 'validation_matched')\n elif dataset_name == 'c4/multilingual':\n eval_split = 'en-validation'\n elif eval_split is None:\n if tfds.Split.VALIDATION not in splits and 'test' not in splits:\n raise ValueError('We require a validation or test split in the dataset.')\n eval_split = tfds.Split.VALIDATION\n if tfds.Split.VALIDATION not in splits:\n eval_split = tfds.Split.TEST\n\n train = tfds.load(\n name=dataset_name,\n split=train_split,\n data_dir=data_dir,\n shuffle_files=train_shuffle_files)\n valid = tfds.load(\n name=dataset_name,\n split=eval_split,\n data_dir=data_dir,\n shuffle_files=eval_shuffle_files)\n keys = None\n if info.supervised_keys:\n keys = ([info.supervised_keys[0]], [info.supervised_keys[1]])\n return train, valid, keys\n\n\n# TODO(jonni): Consider renaming this function.\n@gin.configurable(module='trax.data')\ndef TFDS( # pylint: disable=invalid-name\n dataset_name,\n data_dir=None,\n tfds_preprocess_fn=None,\n keys=None,\n train=True,\n use_alt_eval=False,\n shuffle_train=True,\n host_id=None,\n n_hosts=None,\n eval_holdout_size=0):\n \"\"\"Creates a data source from TensorFlow dataset ``dataset_name``.\n\n Args:\n dataset_name: Name of the dataset, as registered in TensorFlow datasets\n (e.g., ``'glue/mnli'``).\n data_dir: Directory where the data is located.\n tfds_preprocess_fn: If specified, function that applies to items in raw\n dataset (before selecting specific features).\n keys: Tuple of dataset-specific strings that select features from the\n dataset.\n train: If True, select the training split from the dataset; else select an\n eval split.\n use_alt_eval: If True, and if ``train`` is False, select the dataset's\n alternate eval split if it has one (or fall back to the dataset's only\n eval split). This currently affects only the `glue/mnli` dataset.\n shuffle_train: If True, have TensorFlow pre-shuffle the training data; else\n receive training data in deterministic sequence.\n host_id: Integer id used for tracking data subsplits, in cases where\n ``n_hosts`` > 1.\n n_hosts: If greater than 1, prepare data subsplits for the given number of\n hosts.\n eval_holdout_size: If greater than 0, specifies a fraction of training data\n to siphon off and use as eval data, in place of an separate eval split.\n\n Returns:\n A function `f` for use as a training or eval data source; call `f()` to get\n an iterator of data samples.\n \"\"\"\n data_dir = download_and_prepare(dataset_name, data_dir)\n\n host_id = jax.process_index() if host_id is None else host_id\n n_hosts = n_hosts or jax.host_count()\n if n_hosts > 1:\n subsplit = (host_id / n_hosts, (host_id + 1) / n_hosts)\n else:\n subsplit = None\n train_data, eval_data, _ = (\n _train_and_eval_dataset(dataset_name,\n data_dir,\n eval_holdout_size,\n train_shuffle_files=shuffle_train,\n use_alt_eval=use_alt_eval,\n subsplit=subsplit))\n dataset = train_data if train else eval_data\n dataset = dataset if tfds_preprocess_fn is None else tfds_preprocess_fn(\n dataset)\n\n def select_from(example):\n return tuple(example[k] for k in keys)\n\n dataset = dataset.map(select_from)\n dataset = dataset.repeat()\n\n def gen(generator=None):\n del generator\n for example in fastmath.dataset_as_numpy(dataset):\n yield example\n\n return gen\n\n\ndef _select_features(example, feature_list=None):\n \"\"\"Select a subset of features from the example dict.\"\"\"\n feature_list = feature_list or ['inputs', 'targets']\n return {f: example[f] for f in feature_list if f in example}\n\n\ndef _eager_dataset_iterator(dataset):\n for item in dataset:\n flat = tf.nest.flatten(item)\n flat = [el.numpy() for el in flat]\n yield tf.nest.pack_sequence_as(item, flat)\n\n\ndef _train_and_eval_dataset_v1(problem_name, data_dir, train_shuffle_files,\n eval_shuffle_files):\n \"\"\"Return train and evaluation datasets, feature info and supervised keys.\"\"\"\n with tf.device('cpu:0'):\n problem = t2t_problems().problem(problem_name)\n hparams = None\n if problem_name == 'video_bair_robot_pushing':\n hparams = problem.get_hparams()\n bair_robot_pushing_hparams(hparams)\n train_dataset = problem.dataset(\n tf_estimator.ModeKeys.TRAIN,\n data_dir,\n shuffle_files=train_shuffle_files,\n hparams=hparams)\n train_dataset = train_dataset.map(_select_features)\n eval_dataset = problem.dataset(\n tf_estimator.ModeKeys.EVAL,\n data_dir,\n shuffle_files=eval_shuffle_files,\n hparams=hparams)\n eval_dataset = eval_dataset.map(_select_features)\n # TODO(lukaszkaiser): remove this need for one example, just input_key.\n examples = list(tfds.as_numpy(train_dataset.take(1)))\n # We use 'inputs' as input except for purely auto-regressive tasks like\n # language models where 'targets' are used as input_key.\n input_key = 'inputs' if 'inputs' in examples[0] else 'targets'\n supervised_keys = ([input_key], ['targets'])\n return train_dataset, eval_dataset, supervised_keys\n\n\n# Tokenization.\n@debug_data_pipeline.debug_pipeline\ndef tokenize(stream,\n keys=None,\n vocab_type='subword',\n vocab_file=None,\n vocab_dir=None,\n n_reserved_ids=0):\n \"\"\"Tokenize examples from the stream.\n\n This function assumes that `stream` generates either strings or tuples/dicts\n containing strings at some `keys`. This function maps these strings to\n numpy arrays of integers -- the tokenized version of each string.\n\n Args:\n stream: A python generator yielding strings, tuples or dicts.\n keys: which keys of the tuple/dict to tokenize (by default: all)\n vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'.\n vocab_file: Name of the vocabulary file.\n vocab_dir: Directory which contains the vocabulary file.\n n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused;\n This is common for example when reserving the 0 for padding and 1 for EOS,\n but it's only needed if these symbols are not already included (and thus\n reserved) in the vocab_file.\n\n Yields:\n Examples from stream with strings at `keys` replaced by np.arrays of\n integers -- the tokenized version of these strings.\n \"\"\"\n vocab = _get_vocab(vocab_type, vocab_file, vocab_dir)\n for example in stream:\n if isinstance(example, (list, tuple)):\n new_example = []\n for i, x in enumerate(example):\n if keys is None or i in keys:\n new_example.append(np.array(vocab.encode(x)) + n_reserved_ids)\n else:\n new_example.append(x)\n output = tuple(new_example)\n yield output\n elif isinstance(example, dict):\n new_example = {}\n for k in example:\n if keys is None or k in keys:\n new_example[k] = np.array(vocab.encode(example[k])) + n_reserved_ids\n else:\n new_example[k] = example[k]\n yield new_example\n else:\n output = np.array(vocab.encode(example)) + n_reserved_ids\n yield output\n\n\n@gin.configurable(module='trax.data')\ndef Tokenize( # pylint: disable=invalid-name\n keys=None,\n vocab_type='subword', # pylint: disable=invalid-name\n vocab_file=None,\n vocab_dir=None,\n n_reserved_ids=0):\n \"\"\"Returns a function that maps text to integer arrays; see `tokenize`.\"\"\"\n return lambda g: tokenize( # pylint: disable=g-long-lambda\n g,\n keys=keys,\n vocab_type=vocab_type,\n vocab_file=vocab_file,\n vocab_dir=vocab_dir,\n n_reserved_ids=n_reserved_ids)\n\n\ndef detokenize(x,\n vocab_type='subword',\n vocab_file=None,\n vocab_dir=None,\n n_reserved_ids=0):\n \"\"\"Maps integer arrays to text; the opposite of `tokenize`.\n\n In many cases (all char- and subword-type vocabularies and most sentencepiece\n ones) the tokenization is invertible, so detokenize(tokenize(x)) = x. In some\n more rare cases this can remove some spacing, but it is still often useful\n to run detokenize to get a readable version for a tokenized string.\n\n Args:\n x: a list or numpy array of integers.\n vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'.\n vocab_file: Name of the vocabulary file.\n vocab_dir: Directory which contains the vocabulary file.\n n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused;\n This is common for example when reserving the 0 for padding and 1 for EOS,\n but it's only needed if these symbols are not already included (and thus\n reserved) in the vocab_file.\n\n Returns:\n A string corresponding to the de-tokenized version of x.\n \"\"\"\n vocab = _get_vocab(vocab_type, vocab_file, vocab_dir)\n x_unreserved = np.array(x) - n_reserved_ids\n return str(vocab.decode(x_unreserved.tolist()))\n\n\ndef _to_unicode(s):\n # Errors of the casting are ignored (e.g. sequences not allowed by UTF-8),\n # in order not to stay with incomplete examples (with empty values).\n return str(s, encoding='utf-8', errors='ignore')\n\n\n@gin.configurable(module='trax.data')\ndef ConvertToUnicode(keys=None): # pylint: disable=invalid-name\n \"\"\"Converts to Unicode UTF-8 elements of an example.\n\n Useful for when TFDS outputs byte arrays. All of the errors of the conversion\n are ignored.\n\n Args:\n keys: tuple/list of example dimensions to convert.\n\n Returns:\n Function converting chosen elements of an example to UTF-8.\n \"\"\"\n\n @debug_data_pipeline.debug_pipeline\n def _convert_to_unicode_str(stream):\n for example in stream:\n if isinstance(example, (list, tuple)):\n new_example = []\n for i, x in enumerate(example):\n if keys is None or i in keys:\n new_example.append(_to_unicode(x))\n else:\n new_example.append(x)\n output = tuple(new_example)\n yield output\n elif isinstance(example, dict):\n new_example = {}\n for k in example:\n if keys is None or k in keys:\n new_example[k] = _to_unicode(example[k])\n else:\n new_example[k] = example[k]\n yield new_example\n else:\n output = _to_unicode(example)\n yield output\n\n return _convert_to_unicode_str\n\n\ndef vocab_size(vocab_type='subword',\n vocab_file=None,\n vocab_dir=None,\n n_reserved_ids=0):\n \"\"\"Returns the size of the vocabulary (number of symbols used).\n\n This function can be used to set the size of the final layers of a model that\n needs to predict symbols from a given vocabulary. More precisely, if this\n function returns N then the last layer size should be set to at least N (it\n can be more). Note that this function does take reserved IDs into account.\n\n Args:\n vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'.\n vocab_file: Name of the vocabulary file.\n vocab_dir: Directory which contains the vocabulary file.\n n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused.\n\n Returns:\n An integer, the number of symbols used (including reserved IDs).\n \"\"\"\n vocab = _get_vocab(vocab_type, vocab_file, vocab_dir)\n return vocab.vocab_size + n_reserved_ids\n\n\ndef _get_vocab(vocab_type='subword', vocab_file=None, vocab_dir=None,\n extra_ids=0):\n \"\"\"Gets the vocabulary object for tokenization; see tokenize for details.\"\"\"\n if vocab_type not in [\n 'char', 'subword', 'sentencepiece', 'bert', 'bert-lowercase'\n ]:\n raise ValueError(\n 'vocab_type must be \"subword\", \"char\", \"sentencepiece\", \"bert\" or \"bert-lowercase\" '\n f'but got {vocab_type}')\n\n if vocab_type == 'char':\n # Note that we set num_reserved_ids=0 below. We could instead pass\n # the value n_reserved_ids from tokenize here -- ByteTextEncoder does\n # exactly the same thing as tokenize above, ie., adds num_reserved_ids.\n return text_encoder.ByteTextEncoder(num_reserved_ids=0)\n\n vocab_dir = vocab_dir or 'gs://trax-ml/vocabs/'\n path = os.path.join(vocab_dir, vocab_file)\n\n if vocab_type == 'subword':\n return text_encoder.SubwordTextEncoder(path)\n\n if vocab_type == 'bert':\n return text_encoder.BertEncoder(path, do_lower_case=False)\n\n if vocab_type == 'bert-lowercase':\n return text_encoder.BertEncoder(path, do_lower_case=True)\n\n assert vocab_type == 'sentencepiece'\n return t5_data().SentencePieceVocabulary(sentencepiece_model_file=path,\n extra_ids=extra_ids)\n\n\n# Makes the function accessible in gin configs, even with all args denylisted.\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef cifar10_no_augmentation_preprocess(dataset, training):\n del training\n\n def cast_image(features, targets):\n features['image'] = tf.cast(features['image'], tf.float32) / 255.0\n return features, targets\n\n dataset = dataset.map(cast_image)\n return dataset\n\n\ndef _cifar_augment_image(image):\n \"\"\"Image augmentation suitable for CIFAR-10/100.\n\n As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).\n\n Args:\n image: a Tensor.\n\n Returns:\n Tensor of the same shape as image.\n \"\"\"\n image = tf.image.resize_with_crop_or_pad(image, 40, 40)\n image = tf.image.random_crop(image, [32, 32, 3])\n image = tf.image.random_flip_left_right(image)\n return image\n\n\n# Makes the function accessible in gin configs, even with all args denylisted.\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef cifar10_augmentation_preprocess(dataset, training):\n \"\"\"Preprocessing for cifar10 with augmentation (see below).\"\"\"\n\n def augment(features, targets):\n features['image'] = _cifar_augment_image(features['image'])\n return features, targets\n\n def cast_image(features, targets):\n features['image'] = tf.cast(features['image'], tf.float32) / 255.0\n return features, targets\n\n if training:\n dataset = dataset.map(augment)\n dataset = dataset.map(cast_image)\n return dataset\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef cifar10_augmentation_flatten_preprocess(dataset,\n training,\n predict_image_train_weight=0.01):\n \"\"\"Preprocessing for cifar10 that flattens it and appends targets.\"\"\"\n\n def augment(features, targets):\n features['image'] = _cifar_augment_image(features['image'])\n return features, targets\n\n def flatten_image(features, targets):\n \"\"\"Flatten the image.\"\"\"\n img = features['image']\n flat = tf.cast(tf.reshape(img, [-1]), tf.int64)\n tgt = tf.expand_dims(targets, axis=0)\n flat_with_target = tf.concat([flat, tgt], axis=0)\n new_features = {}\n new_features['image'] = flat_with_target\n predict_image_weight = predict_image_train_weight if training else 0.0\n mask_begin = tf.ones_like(flat)\n mask_begin = tf.cast(mask_begin, tf.float32) * predict_image_weight\n mask_end = tf.cast(tf.ones_like(tgt), tf.float32)\n new_features['mask'] = tf.concat([mask_begin, mask_end], axis=0)\n return new_features, flat_with_target\n\n if training:\n dataset = dataset.map(augment)\n dataset = dataset.map(flatten_image)\n\n return dataset\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef downsampled_imagenet_flatten_bare_preprocess(dataset, training):\n \"\"\"Preprocessing for downsampled_imagenet.\n\n Args:\n dataset: the dataset.\n training: unused option.\n\n Returns:\n Flattened dataset.\n\n Preprocessing for downsampled_imagenet 32x32 and 64x64 generation from\n http://arxiv.org/abs/1601.06759 (page 8).\n \"\"\"\n del training\n\n def flatten_image(features):\n img = features['image']\n flat = tf.cast(tf.reshape(img, [-1]), tf.int64)\n\n new_features = {'image': flat}\n return new_features\n\n return dataset.map(flatten_image)\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef concat_preprocess(dataset, training, pad_symbol=0):\n \"\"\"Pre-processing function that concatenates input and target for LM.\"\"\"\n del training\n\n def concat(features, targets):\n inp = features['inputs']\n pad = tf.expand_dims(tf.zeros_like(inp[0]) + pad_symbol, axis=0)\n concat = tf.concat([pad, inp, pad, targets], axis=0)\n # Note: we're updating existing features dictionary here, so make sure\n # it is not re-used in some other ways outside of this function.\n features['inputs'] = concat\n return features, concat\n\n dataset = dataset.map(concat)\n return dataset\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef squeeze_targets_preprocess(dataset, training):\n \"\"\"Pre-processing function that squeezes last axis of targets.\"\"\"\n del training\n\n def squeeze(features, targets):\n if targets.shape[-1] == 1:\n targets = tf.squeeze(targets, axis=-1)\n return features, targets\n\n dataset = dataset.map(squeeze)\n return dataset\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef lm1b_preprocess(dataset,\n training,\n max_target_length=-1,\n max_eval_target_length=-1):\n \"\"\"Preprocessing for LM1B: filter out targets exceeding maximum length.\"\"\"\n\n def target_right_length(_, target):\n return tf.less(tf.shape(target)[0], max_target_length + 1)\n\n def eval_target_right_length(_, target):\n return tf.less(tf.shape(target)[0], max_eval_target_length + 1)\n\n if max_target_length > 0 and training:\n dataset = dataset.filter(target_right_length)\n\n if max_eval_target_length > 0 and not training:\n dataset = dataset.filter(eval_target_right_length)\n\n return dataset\n\n\n# TODO(lukaszkaiser): find a single more abstract way of text pre-processing.\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef wmt_preprocess(dataset, training, max_length=-1, max_eval_length=-1):\n \"\"\"Preprocessing for LM1B: filter out targets exceeding maximum length.\"\"\"\n\n def train_right_length(example, target):\n l = tf.maximum(tf.shape(example['inputs'])[0], tf.shape(target)[0])\n return tf.less(l, max_length + 1)\n\n def eval_right_length(example, target):\n l = tf.maximum(tf.shape(example['inputs'])[0], tf.shape(target)[0])\n return tf.less(l, max_eval_length + 1)\n\n if max_length > 0 and training:\n dataset = dataset.filter(train_right_length)\n\n if max_eval_length > 0 and not training:\n dataset = dataset.filter(eval_right_length)\n\n return dataset\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef wmt_concat_preprocess(dataset, training, max_length=-1, max_eval_length=-1):\n \"\"\"Preprocessing for WMT: filter exceeding maximum length and concatenate.\"\"\"\n dataset = wmt_preprocess(dataset, training, max_length, max_eval_length)\n\n def concat_and_add_mask(features, targets):\n inp = features['inputs']\n pad = tf.expand_dims(tf.zeros_like(inp[0]), axis=0)\n concat = tf.concat([inp, pad, targets], axis=0)\n mask = tf.concat([tf.zeros_like(inp), pad, tf.ones_like(targets)], axis=0)\n features['inputs'] = concat\n features['mask'] = mask\n return features, concat\n\n dataset = dataset.map(concat_and_add_mask)\n return dataset\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef lm_token_preprocessing(dataset, training):\n \"\"\"Concatenates inputs, 0, targets, with masking only for targets.\"\"\"\n del training\n\n def concat_and_add_mask(x):\n inp = x['inputs']\n targets = x['targets']\n pad = tf.expand_dims(tf.zeros_like(inp[0]), axis=0)\n concat = tf.concat([inp, pad, targets], axis=0)\n mask = tf.concat([tf.zeros_like(inp), pad, tf.ones_like(targets)], axis=0)\n x['inputs'] = concat\n x['targets'] = concat\n x['mask'] = mask\n return x\n\n dataset = dataset.map(concat_and_add_mask)\n return dataset\n\n\n@gin.configurable(module='trax.data', denylist=['hparams'])\ndef bair_robot_pushing_hparams(hparams=None,\n video_num_input_frames=1,\n video_num_target_frames=15):\n if hparams is not None:\n hparams.video_num_input_frames = video_num_input_frames\n hparams.video_num_target_frames = video_num_target_frames\n else:\n return video_num_input_frames, video_num_target_frames\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef bair_robot_pushing_preprocess(dataset, training):\n \"\"\"Pre-processing function that concatenates input and target frames.\"\"\"\n del training\n\n def concat_and_add_mask(features, targets):\n \"\"\"Concatenate input and output frames to form a language modeling setup.\"\"\"\n inp = features['inputs']\n concat = tf.concat([inp, targets], axis=0)\n mask = tf.concat([tf.zeros_like(inp), tf.ones_like(targets)], axis=0)\n concat = tf.reshape(concat, (-1,))\n mask = tf.reshape(mask, (-1,))\n concat = tf.cast(concat, tf.int32)\n mask = tf.cast(mask, tf.float32)\n features['inputs'] = features['targets'] = concat\n features['mask'] = mask\n return features, concat\n\n dataset = dataset.map(concat_and_add_mask)\n return dataset\n\n\ndef sentencepiece_tokenize(stream, spm_path=None, extra_ids=0):\n \"\"\"Sentencepiece tokenization.\"\"\"\n spm_path = spm_path or t5_data().DEFAULT_SPM_PATH\n vocab_file = os.path.basename(spm_path)\n vocab_dir = os.path.dirname(spm_path)\n vocab = _get_vocab(vocab_type='sentencepiece',\n vocab_file=vocab_file,\n vocab_dir=vocab_dir,\n extra_ids=extra_ids)\n for example in stream:\n # example could either be str or (str,)\n if isinstance(example, tuple):\n example = example[0]\n yield np.array(vocab.encode(example))\n\n\n@gin.configurable(module='trax.data')\ndef SentencePieceTokenize( # pylint: disable=invalid-name\n spm_path=None,\n extra_ids=0):\n \"\"\"Returns a function that maps text to integer arrays.\"\"\"\n return lambda g: sentencepiece_tokenize( # pylint: disable=g-long-lambda\n g,\n spm_path=spm_path,\n extra_ids=extra_ids)\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef c4_preprocess(dataset,\n training,\n max_target_length=-1,\n tokenization=None,\n spm_path=None):\n \"\"\"Pre-processing function for C4 dataset.\"\"\"\n del training\n\n def unicode_decode_chars(features, targets):\n targets = tf.strings.unicode_decode(features['text'], 'UTF-8')\n targets = tf.cast(targets, tf.int64)\n features['targets'] = targets\n features['inputs'] = targets\n return (features, targets)\n\n def spc_tokenize(tokenizer, features, targets):\n del targets\n tokenized_text = tokenizer.tokenize(features['text'])\n features['targets'] = tf.cast(tokenized_text, tf.int64)\n features['inputs'] = features['targets']\n return features, features['targets']\n\n if tokenization == 'spc':\n spm_path = spm_path or t5_data().DEFAULT_SPM_PATH\n with tf.compat.v1.gfile.GFile(spm_path, 'rb') as f:\n spc_model = f.read()\n tokenizer = tf_text.SentencepieceTokenizer(model=spc_model)\n dataset = dataset.map(functools.partial(spc_tokenize, tokenizer))\n else:\n dataset = dataset.map(unicode_decode_chars)\n\n def target_right_length(_, target):\n return tf.less(tf.shape(target)[0], max_target_length + 1)\n\n if max_target_length > 0:\n dataset = dataset.filter(target_right_length)\n\n return dataset\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef c4_bare_preprocess_fn(dataset,\n training=True,\n spm_path=None,\n copy_pretokenized=True,\n sequence_length=None):\n \"\"\"Returns a dataset that contains 'inputs' and 'targets' from C4.\"\"\"\n # Set target key to be equal to the text content.\n dataset = t5_data().preprocessors.rekey(\n dataset, key_map={\n 'targets': 'text',\n 'inputs': None\n })\n\n # Vocabulary for tokenization.\n extra_ids = 0\n vocab = t5_data().SentencePieceVocabulary(\n sentencepiece_model_file=spm_path or t5_data().DEFAULT_SPM_PATH,\n extra_ids=extra_ids)\n feature = t5_data().Feature(vocab)\n output_features = {'targets': feature, 'inputs': feature}\n\n # Tokenize the targets.\n keys = output_features\n\n def encode_string_features_fn(features):\n \"\"\"Encode all specified feature that are strings and return a dictionary.\n\n Args:\n features: a dictionary\n\n Returns:\n a dictionary\n \"\"\"\n ret = {}\n for k, v in features.items():\n if k in keys and v.dtype == tf.string:\n if copy_pretokenized:\n ret['%s_pretokenized' % k] = v\n v = tf.cast(output_features[k].vocabulary.encode_tf(v), tf.int64)\n ret[k] = v\n return ret\n\n dataset = dataset.map(\n encode_string_features_fn,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # Preprocess the tokens - the exact preprocessors are set via gin.\n dataset = t5_data().preprocessors.unsupervised(\n dataset, sequence_length=sequence_length, output_features=output_features)\n\n # Add EOS.\n dataset = add_eos_to_output_features(dataset, training)\n\n # Truncate and then pad the examples -- all examples have the same shape.\n dataset = truncate_dataset_on_len(dataset, training, sequence_length, True)\n dataset = pad_dataset_to_length(dataset, training, sequence_length)\n\n return dataset\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef filter_dataset_on_len(dataset,\n training,\n len_map=None,\n filter_on_eval=False):\n \"\"\"Filters a dataset of lengths given in `len_map`.\n\n Args:\n dataset: `tf.data.Dataset` the dataset to filter.\n training: bool, true if we are in training mode.\n len_map: optional dict of str to (int, int). We filter examples where a\n feature's size is beyond the specified bounds. Ex:\n {'inputs': (1, 512), 'targets': (64, 128)} will keep only those examples\n where 1 <= len(inputs) <= 512 and 64 <= len(targets) <= 128.\n filter_on_eval: bool if true, we will filter in eval mode also.\n\n Returns:\n a filtered `tf.data.Dataset`.\n \"\"\"\n if (len_map is None) or (not training and not filter_on_eval):\n return dataset\n\n assert isinstance(len_map, dict)\n for k, bounds in len_map.items():\n # pylint: disable=cell-var-from-loop\n # TODO(afrozm): Investigate `cell-var-from-loop` - since this is WAI and\n # there is a test too.\n def within_bounds(x, key, len_bounds):\n size = tf.shape(x[key])[0]\n min_len, max_len = len_bounds\n return (min_len <= size) and (size <= max_len)\n\n dataset = dataset.filter(lambda x: within_bounds(x, k, bounds))\n # pylint: enable=cell-var-from-loop\n\n return dataset\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef truncate_dataset_on_len(dataset,\n training,\n len_map=None,\n truncate_on_eval=False):\n \"\"\"Truncates features in an example to lengths given in `len_map`.\n\n Args:\n dataset: `tf.data.Dataset` the dataset to filter.\n training: bool, true if we are in training mode.\n len_map: optional dict of str to int, we truncate examples where a feature's\n size is beyond the max. Ex: {'inputs': 512, 'targets': 64} will truncate\n examples to be within those bounds.\n truncate_on_eval: bool if true, we will truncate in eval mode also.\n\n Returns:\n a filtered `tf.data.Dataset`.\n \"\"\"\n if (len_map is None) or (not training and not truncate_on_eval):\n return dataset\n\n assert isinstance(len_map, dict)\n\n def truncate_example(x):\n for key, max_len in len_map.items():\n x_len = tf.shape(x[key])[0]\n if x_len > max_len:\n x[key] = x[key][:max_len, ...]\n return x\n\n return dataset.map(truncate_example)\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef pad_dataset_to_length(dataset, training, len_map=None):\n \"\"\"Pad features less than specified length to specified length.\"\"\"\n del training\n if len_map is None:\n return dataset\n\n def pad_to_len(x):\n for key, max_len in len_map.items():\n x_shape = tf.shape(x[key])\n x_len = x_shape[0]\n if x_len < max_len:\n pad_shape = [\n max_len - x_len,\n ]\n zeros = tf.zeros(pad_shape, dtype=x[key].dtype)\n x[key] = tf.concat([x[key], zeros], 0)\n return x\n\n return dataset.map(pad_to_len)\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef add_eos_to_output_features(dataset,\n training,\n output_features='targets',\n eos=1):\n \"\"\"Adds `EOS` to all features in `output_features`.\"\"\"\n del training\n if not isinstance(output_features, (list, tuple)):\n output_features = [output_features]\n\n def add_eos(x):\n for output_feature in output_features:\n x[output_feature] = tf.concat([x[output_feature], [eos]], axis=0)\n return x\n\n return dataset.map(add_eos)\n\n\n@gin.configurable(module='trax.data', denylist=['dataset', 'training'])\ndef generic_text_dataset_preprocess_fn(dataset,\n training=True,\n text_preprocess_fns=None,\n token_preprocess_fns=None,\n spm_path=None,\n copy_pretokenized=False,\n debug_print_examples=False,\n debug_print_examples_rate=0.01):\n \"\"\"Pre-processes, tokenizes and post-processes a `tf.data.Dataset`.\n\n Args:\n dataset: `tf.data.Dataset` to process.\n training: boolean, set to True if training, False otherwise.\n text_preprocess_fns: None or list of callables: `tf.data.Dataset`, bool ->\n `tf.data.Dataset` this operates before tokenization. Typically used to\n select which fields we want to learn over or change something into \"text\n to text\" form.\n token_preprocess_fns: None or list of callables: `tf.data.Dataset`, bool ->\n `tf.data.Dataset`, this operates after tokenization. Since this can view\n the tokenized fields, this can be used to filter on length etc.\n spm_path: None or str, path to a sentencepiece model to use for tokenization\n by default uses the 32k vocabulary from T5.\n copy_pretokenized: bool, if True retains the original fields after\n tokenization.\n debug_print_examples: bool, if True this prints examples to the logging\n stream for inspection, both before and after tokenization.\n debug_print_examples_rate: float, [0, 1.0], on average this fraction of\n dataset examples will be printed out in each phase i.e. pre and post\n tokenization.\n\n Returns:\n a `tf.data.Dataset` with all the preprocessing and tokenization performed.\n \"\"\"\n\n # The assumption is that `text_preprocess_fns` finally gives us a dataset\n # which has `inputs` and `targets`.\n if text_preprocess_fns is not None:\n for text_preprocess_fn in text_preprocess_fns:\n dataset = text_preprocess_fn(dataset, training)\n\n # Print debugging examples if needed before tokenization.\n if debug_print_examples:\n\n def print_examples(x):\n if np.random.uniform() < debug_print_examples_rate:\n tf.print(x, output_stream=logging.info)\n return x\n\n dataset = dataset.map(print_examples)\n\n # Vocabulary for tokenization.\n extra_ids = 0\n vocab = t5_data().SentencePieceVocabulary(\n sentencepiece_model_file=spm_path or t5_data().DEFAULT_SPM_PATH,\n extra_ids=extra_ids)\n feature = t5_data().Feature(vocab)\n output_features = {'targets': feature, 'inputs': feature}\n\n # Tokenize the inputs and targets.\n dataset = t5_data().preprocessors.tokenize(\n dataset, output_features, copy_pretokenized=copy_pretokenized)\n\n # Apply the token-preprocessors.\n if token_preprocess_fns is not None:\n for token_preprocess_fn in token_preprocess_fns:\n dataset = token_preprocess_fn(dataset, training)\n\n if debug_print_examples:\n\n def print_examples_and_shapes(x):\n if np.random.uniform() < debug_print_examples_rate:\n tf.print(\n {\n 'inputs_shape': tf.size(x['inputs']),\n 'targets_shape': tf.size(x['targets']),\n 'inputs': x['inputs'],\n 'targets': x['targets'],\n },\n output_stream=logging.info)\n return x\n\n dataset = dataset.map(print_examples_and_shapes)\n\n return dataset\n\n\n@gin.configurable(module='trax.data')\ndef get_t5_preprocessor_by_name(name=None, fn_kwargs=None):\n \"\"\"Returns a closure of any T5 preprocessor function with its arguments.\n\n The main use-case is to use this (with gin scopes) to make any preprocessor\n function available in a gin file to configure and use.\n\n See: `TFInputs.test_gin_configurable_preprocessors`\n\n Args:\n name: str, name of the preprocessor function to configure.\n fn_kwargs: optional dictionary, the arguments to configure, these will be\n partially applied to the function given by `name`.\n\n Returns:\n a closure of the preprocessor function along with its arguments, this\n function takes two arguments only, dataset and boolean training and ignores\n the training and calls the t5 processor with the dataset (and closed over\n arguments only).\n \"\"\"\n\n assert name is not None\n f = getattr(t5_data().preprocessors, name)\n if fn_kwargs is not None:\n f = functools.partial(f, **fn_kwargs)\n return lambda ds, unused_training: f(ds)\n\n\ndef download_and_prepare(dataset_name, data_dir):\n \"\"\"Downloads and prepares T2T or TFDS dataset.\n\n Args:\n dataset_name: tfds dataset or t2t problem name prefixed by 't2t_'.\n data_dir: location of existing dataset or None.\n\n Returns:\n data_dir: path string of downloaded data.\n \"\"\"\n if not data_dir:\n data_dir = os.path.expanduser('~/tensorflow_datasets/')\n dl_dir = os.path.join(data_dir, 'download')\n logging.info(\n 'No dataset directory provided. '\n 'Downloading and generating dataset for %s inside data directory %s '\n 'For large datasets it is better to prepare datasets manually!',\n dataset_name, data_dir)\n if dataset_name.startswith('t2t_'):\n # Download and run dataset generator for T2T problem.\n data_dir = os.path.join(data_dir, dataset_name)\n tf.io.gfile.makedirs(data_dir)\n tf.io.gfile.makedirs(dl_dir)\n t2t_problems().problem(dataset_name[len('t2t_'):]).generate_data(\n data_dir, dl_dir)\n else:\n # Download and prepare TFDS dataset.\n tfds_builder = tfds.builder(dataset_name)\n tfds_builder.download_and_prepare(download_dir=dl_dir)\n else:\n data_dir = os.path.expanduser(data_dir)\n return data_dir\n\n\ndef BertSingleSentenceInputs(batch, # pylint: disable=invalid-name\n labeled=True,\n cls_id=101,\n sep_id=102):\n \"\"\"Prepares inputs for BERT: add [SEP], [CLS] and create embeddings.\"\"\"\n if labeled:\n for sent1, label in batch:\n value_vector = np.concatenate(([cls_id], sent1, [sep_id]))\n segment_embs = np.zeros(sent1.shape[0] + 2, dtype=np.int32)\n yield value_vector, segment_embs, segment_embs, label, np.int32(1)\n else:\n for (sent1,) in batch: # row is a tuple with 1 element\n value_vector = np.concatenate(([cls_id], sent1, [sep_id]))\n segment_embs = np.zeros(sent1.shape[0] + 2, dtype=np.int32)\n yield value_vector, segment_embs, segment_embs\n\n\ndef BertDoubleSentenceInputs(batch, # pylint: disable=invalid-name\n labeled=True,\n cls_id=101,\n sep_id=102):\n \"\"\"Prepares inputs for BERT models by adding [SEP] and [CLS] tokens and creating segment embeddings.\"\"\"\n if labeled:\n for sent1, sent2, label in batch:\n value_vector = np.concatenate(\n ([cls_id], sent1, [sep_id], sent2, [sep_id]))\n\n segment_embs = np.zeros(\n sent1.shape[0] + sent2.shape[0] + 3, dtype=np.int32)\n second_sent_start = sent1.shape[0] + 2\n segment_embs[second_sent_start:] = 1\n yield value_vector, segment_embs, segment_embs, label, np.int32(1)\n else:\n for sent1, sent2 in batch:\n value_vector = np.concatenate(\n ([cls_id], sent1, [sep_id], sent2, [sep_id]))\n\n segment_embs = np.zeros(\n sent1.shape[0] + sent2.shape[0] + 3, dtype=np.int32)\n second_sent_start = sent1.shape[0] + 2\n segment_embs[second_sent_start:] = 1\n yield value_vector, segment_embs, segment_embs\n\n\n@gin.configurable(module='trax.data')\ndef CreateBertInputs(double_sentence=True, # pylint: disable=invalid-name\n labeled=True,\n cls_id=101,\n sep_id=102):\n bert_inputs_fn = BertDoubleSentenceInputs if double_sentence else BertSingleSentenceInputs\n return functools.partial(\n bert_inputs_fn, labeled=labeled, cls_id=cls_id, sep_id=sep_id)\n\n\n@gin.configurable(module='trax.data')\ndef mask_random_tokens(batch,\n explicit_vocab_size=30522,\n masking_prob=0.15,\n cls_id=101,\n sep_id=102,\n mask_id=103,\n vocab_start_id=999):\n \"\"\"Prepares input for the masking task.\n\n Preparation consist in masking masking_prob percentage of non-special tokens\n at each input row; round(masking_prob * num_nonspecial_tokens) random tokens\n are selected out of which each token is either\n - replaced with [MASK] token with 80% probability,\n - replaced with random token with 10% probability,\n - or unchanged with 10%.\n The implentation is based on\n https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L342\n\n Examples:\n - batch is a stream with each row having tuple (token_ids,). Function yields\n rows of form (modified_token_ids, original_tokens, token_weights), where\n modified_token_ids have [MASK] tokens or random tokens according to the\n procedure described above.\n - batch is a stream with each row having tuple (token_ids, segment_embeddings,\n nsp_label, nsp_weight).Function yields rows of form (modified_token_ids,\n segment_embeddings, nsp_label, nsp_weight, original_tokens, token_weights).\n\n Args:\n batch: stream of inputs. Each row in the stream is a tuple which first\n element is an array of tokens\n explicit_vocab_size: the total size of the vocabulary.\n masking_prob: Determines percent of non-special tokens to be selected for\n masking.\n cls_id: id of the special CLS token.\n sep_id: id of the special SEP token.\n mask_id: id of the special MASK token.\n vocab_start_id: id of first non-special token in the vocabulary.\n\n Yields:\n a stream with tokens masked for MLM training and 2 appended arrays:\n - original tokens: a copy of original tokens used as a label for mlm\n training\n - token_weights: weights distributed uniformly over selected tokens (sum\n is 1). Other tokens have 0 weight.\n \"\"\"\n for token_ids, *row_rest in batch:\n original_tokens = token_ids.copy()\n\n # choose tokens for prediction. Chooses 0.15 of\n # all non-special tokens\n is_special_token = np.logical_or(token_ids == cls_id,\n token_ids == sep_id) # CLS and SEP tokens\n is_special_token = np.logical_or(is_special_token,\n token_ids == 0) # padding\n viable_ids = np.arange(token_ids.shape[0])[~is_special_token]\n num_to_sample = round(masking_prob * viable_ids.shape[0])\n if num_to_sample == 0:\n # sentence is too short to select given percentage of tokens to mask\n continue\n candidate_ids = np.random.choice(viable_ids, num_to_sample, replace=False)\n\n # create weights\n token_weights = np.zeros(token_ids.shape)\n token_weights[candidate_ids] = 1 / candidate_ids.shape[0]\n\n prob_scores = np.random.random(candidate_ids.shape)\n\n # change 80 % of tokens to [MASK]\n mask_token_ids = candidate_ids[prob_scores < 0.8]\n token_ids[mask_token_ids] = mask_id\n\n # change 10% of tokens to random token\n random_token_ids = candidate_ids[(0.8 <= prob_scores) & (prob_scores < 0.9)]\n token_ids[random_token_ids] = np.random.randint(vocab_start_id,\n explicit_vocab_size,\n random_token_ids.shape[0])\n\n # rest (10%) is left unchaged\n yield (token_ids, *row_rest, original_tokens, token_weights)\n\n\n@gin.configurable(module='trax.data')\ndef BertNextSentencePredictionInputs(dataset_name, # pylint: disable=invalid-name\n data_dir=None,\n text_key='text',\n train=True,\n shuffle_size=50000):\n \"\"\"Defines a stream for the next sentence prediction task.\"\"\"\n stream = TFDS(\n dataset_name,\n data_dir=data_dir,\n tfds_preprocess_fn=functools.partial(\n t5_data().preprocessors.next_sentence_prediction,\n text_key=text_key,\n label_sentences=True,\n buffer_size=shuffle_size),\n keys=['inputs', 'targets'],\n train=train)\n\n def split_stream(generator=None):\n # split string with 'sentence1:' and 'sentence2:' into two separate strings\n for text, target in stream(generator):\n text_str = str(text)[:-1] # removes last '\"' which is always at the end\n sentences = text_str.split('sentence1: ')[1].split(' sentence2: ')\n if len(sentences) != 2:\n # 'sentence2:' appeared in the text and got mixed up with the label\n continue\n sent1, sent2 = sentences\n yield sent1, sent2, target == 'next'\n\n return split_stream\n\n\n@gin.configurable(module='trax.data')\ndef CorpusToRandomChunks(dataset_name, num_tokens=512, train=True): # pylint: disable=invalid-name\n return TFDS(\n dataset_name,\n tfds_preprocess_fn=functools.partial(\n t5_data().preprocessors.random_split_text,\n max_words_per_segment=num_tokens),\n train=train,\n keys=['text'])\n\n\n_GLUE_KEYS = {\n 'cola': ('sentence',),\n 'sst2': ('sentence',),\n 'mrpc': ('sentence1', 'sentence2'),\n 'qqp': ('question1', 'question2'),\n 'stsb': ('sentence1', 'sentence2'),\n 'mnli': ('premise', 'hypothesis'),\n 'qnli': ('question', 'sentence'),\n 'rte': ('sentence1', 'sentence2'),\n 'wnli': ('sentence1', 'sentence2'),\n}\n\n\n# Labels inferred from the T5 paper: https://arxiv.org/pdf/1910.10683.pdf\n_GLUE_LABELS = {\n 'cola': ('unacceptable', 'acceptable'),\n 'sst2': ('negative', 'positive'),\n 'mrpc': ('not_equivalent', 'equivalent'),\n 'qqp': ('not_duplicate', 'duplicate'),\n 'stsb': ('sentence1', 'sentence2'),\n 'mnli': ('entailment', 'neutral', 'contradiction'),\n 'qnli': ('entailment', 'not_entailment'),\n 'rte': ('entailment', 'not_entailment'),\n 'wnli': ('sentence1', 'sentence2'),\n}\n\n# Defining separate <Foo>TrainStream and <Foo>EvalStream functions (below)\n# makes gin configuration expressions more direct. A single gin line can\n# configure each; for example:\n#\n# BertGlueTrainStream.benchmark= 'mnli'\n# BertGlueEvalStream.benchmark = 'mnli'\n\n\n# pylint: disable=invalid-name\n@gin.configurable(module='trax.data')\ndef BertGlueTrainStream(benchmark=gin.REQUIRED):\n \"\"\"Returns a Bert-preprocessed training stream for ``benchmark``.\n\n Args:\n benchmark: Simple lower-case name of a GLUE benchmark, e.g., ``'cola'``,\n ``'mnli'``, ``'rte'``.\n \"\"\"\n return _BertGlueDataStream(benchmark + '_t')\n\n\n# GLUE evals need special handling because one eval in particular, MNLI, has\n# two different eval sets: \"matched\" and \"mismatched\". The code in this module\n# distinguishes between the two using the suffixes '_e' versus '_e2',\n# respectively.\ndef _ensure_eval_suffix(benchmark):\n \"\"\"Returns a string ending in an eval suffix; adds ``'_e'`` suffix if needed.\n\n Args:\n benchmark: Name of a benchmark or task, that might already include an\n eval-indicating suffix (``'_e'`` or ``'_e2'``).\n \"\"\"\n if benchmark.endswith('_e') or benchmark.endswith('_e2'):\n return benchmark\n else:\n return benchmark + '_e'\n\n\n@gin.configurable(module='trax.data')\ndef BertGlueEvalStream(benchmark=gin.REQUIRED):\n \"\"\"Returns a Bert-preprocessed eval data stream for ``benchmark``.\n\n Args:\n benchmark: Simple lower-case name of a GLUE benchmark, e.g., ``'cola'``,\n ``'mnli'``, ``'rte'``. If the benchmark includes an alternate\n eval (e.g., MNLI's \"mismatched\" eval/validation split), you can\n specify it with an ``'_e2'`` suffix, e.g., ``'mnli_e2'``.\n \"\"\"\n return _BertGlueDataStream(_ensure_eval_suffix(benchmark))\n\n\ndef _BertGlueDataStream(benchmark_id):\n \"\"\"Returns a Bert-preprocessed data stream for ``benchmark_id``.\n\n Args:\n benchmark_id: String that indicates the name and data split of a GLUE\n benchmark. Data splits are indicated as underscore suffixes, e.g.,\n ``'cola_t'`` (Cola benchmark, training split), ``'rte_e'`` (RTE\n benchmark, eval/validation split), and ``'mnli_e2'`` (MNLI benchmark,\n alternate \"mismatched\" eval/validation split).\n \"\"\"\n benchmark_id = _ensure_eval_suffix(benchmark_id)\n benchmark, split = benchmark_id.rsplit('_', 1)\n glue_data = TFDS(f'glue/{benchmark}',\n keys=_GLUE_KEYS[benchmark],\n train=(split == 't'),\n use_alt_eval=(split == 'e2'))\n return data.Serial(\n glue_data,\n data.Tokenize(),\n data.CreateBertInputs(),\n data.Shuffle(),\n data.PadToLength(),\n data.TruncateToLength(),\n data.Batch(),\n )\n\n\n@gin.configurable(module='trax.data')\ndef T5GlueTrainStream(benchmark=gin.REQUIRED):\n \"\"\"Returns a T5-preprocessed training data stream for ``benchmark``.\n\n Args:\n benchmark: Simple lower-case name of a GLUE benchmark, e.g., ``'cola'``,\n ``'mnli'``, ``'rte'``.\n \"\"\"\n return _T5GlueDataStream(benchmark + '_t')\n\n\n@gin.configurable(module='trax.data')\ndef T5GlueTrainStreamsParallel(benchmark_list=gin.REQUIRED,\n counters=None,\n reweight_by_minimum=False,\n gradually_reweight=False):\n \"\"\"Returns a parallel set of training streams, based on ``benchmark_list``.\n\n Args:\n benchmark_list: List of simple lower-case names of GLUE benchmarks, e.g.,\n ``'cola'``, ``'mnli'``, ``'rte'``.\n counters: a list of counters to be passed to data.Parallel, e.g.,\n [8551, 392702, 2490] would be a reasonable counterpart to\n benchmark_list = [\"cola\", \"mnli\", \"rte\"], see\n https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/data/glue_utils.py#L42\n for more details on counters.\n reweight_by_minimum: divide by the minimal counter.\n gradually_reweight: a more refined reweighting policy, see inputs.py\n for more details.\n \"\"\"\n stream_list = list(map(T5GlueTrainStream, benchmark_list))\n return data.Parallel(\n stream_list,\n counters=counters,\n reweight_by_minimum=reweight_by_minimum,\n gradually_reweight=gradually_reweight)()\n\n\n@gin.configurable(module='trax.data')\ndef T5GlueEvalStream(benchmark=gin.REQUIRED):\n \"\"\"Returns a T5-preprocessed eval data stream for ``benchmark``.\n\n Args:\n benchmark: Simple lower-case name of a GLUE benchmark, e.g., ``'cola'``,\n ``'mnli'``, ``'rte'``. If the benchmark includes an alternate\n eval (e.g., MNLI's \"mismatched\" eval/validation split), you can\n specify it with an ``'_e2'`` suffix, e.g., ``'mnli_e2'``.\n \"\"\"\n return _T5GlueDataStream(_ensure_eval_suffix(benchmark))\n\n\n@gin.configurable(module='trax.data')\ndef T5GlueEvalStreamsParallel(benchmark_list=gin.REQUIRED):\n \"\"\"Returns a parallel set of T5 eval streams, based on ``benchmark_list``.\n\n Args:\n benchmark_list: List of strings, each of which is a simple lower-case name\n of a GLUE benchmark, e.g., ``'cola'``, ``'mnli'``, ``'rte'``. If a\n benchmark includes an alternate eval (e.g., MNLI's \"mismatched\"\n eval/validation split), you can specify it with an ``'_e2'`` suffix,\n e.g., ``'mnli_e2'``.\n \"\"\"\n stream_list = list(map(T5GlueEvalStream, benchmark_list))\n return data.Parallel(stream_list)()\n\n\ndef _T5GlueDataStream(benchmark_id, t5_tokenization=False):\n \"\"\"Returns a T5-preprocessed data stream for ``benchmark_id``.\n\n Args:\n benchmark_id: String that indicates the name and data split of a GLUE\n benchmark. Data splits are indicated as underscore suffixes, e.g.,\n ``'cola_t'`` (Cola benchmark, training split), ``'rte_e'`` (RTE\n benchmark, eval/validation split), and ``'mnli_e2'`` (MNLI benchmark,\n alternate \"mismatched\" eval/validation split).\n t5_tokenization: if true, then use t5_tokenization.\n \"\"\"\n return data.Serial(\n _t5_glue_data_split(benchmark_id)\n if t5_tokenization else _t5_glue_data_split_no_token(benchmark_id),\n data.Tokenize(),\n data.Shuffle(),\n data.PadToLength(),\n data.TruncateToLength(),\n data.Batch(),\n )\n\n\n@gin.configurable(module='trax.data')\ndef T5GlueEvalTasks(benchmark_list=gin.REQUIRED):\n \"\"\"Returns a list of T5 GLUE eval tasks, based on ``benchmark_list``.\n\n Args:\n benchmark_list: List of strings, each of which indicates the name and\n data split of a GLUE benchmark. Data splits are indicated as underscore\n suffixes, e.g., ``'cola_t'`` (Cola benchmark, training split),\n ``'rte_e'`` (RTE benchmark, eval/validation split), and ``'mnli_e2'``\n (MNLI alternate \"mismatched\" eval/validation split).\n \"\"\"\n task_list = list(map(_T5GlueEvalTask, benchmark_list))\n return task_list\n\n\ndef _T5GlueEvalTask(benchmark_id):\n \"\"\"Returns a T5 GLUE eval task, based on ``benchmark_id``.\"\"\"\n eval_data = T5GlueEvalStream(benchmark_id)\n benchmark_id = _ensure_eval_suffix(benchmark_id)\n metrics = [tl.WeightedCategoryAccuracy(), tl.SequenceAccuracy()]\n benchmark, split = benchmark_id.rsplit('_', 1)\n if benchmark == 'cola':\n name_upper = 'Cola'\n elif benchmark == 'mnli':\n name_upper = 'MNLI_matched' if split == 'e' else 'MNLI_mismatched'\n else:\n name_upper = benchmark.upper()\n return supervised.training.EvalTask(\n eval_data(),\n metrics,\n metric_names=[f'{name_upper} accuracy',\n f'{name_upper} sequence accuracy'])\n\n\ndef _t5_glue_data_split_no_token(benchmark_id):\n \"\"\"Returns a GLUE data split prepared with the standard T5 preprocessor.\"\"\"\n benchmark, split = _t5_glue_benchmark_and_split(benchmark_id)\n dataset = tfds.load(name=f'glue/{benchmark}', split=split)\n processed_dataset = t5_data().preprocessors.glue( # pylint: disable=g-long-lambda\n dataset,\n benchmark_name=benchmark,\n label_names=_GLUE_LABELS[benchmark])\n\n def stream_of_inputs_targets_weights(generator=None):\n del generator\n while True:\n for example in processed_dataset:\n input_values = example['inputs'].numpy()\n target_values = example['targets'].numpy()\n yield (input_values,\n target_values,\n jnp.array([1] * len(target_values)))\n\n return stream_of_inputs_targets_weights\n\n\ndef _t5_glue_data_split(benchmark_id):\n \"\"\"Returns a GLUE data split prepared with the standard T5 preprocessor.\"\"\"\n benchmark, split = _t5_glue_benchmark_and_split(benchmark_id)\n dataset = tfds.load(name=f'glue/{benchmark}', split=split)\n processed_dataset = generic_text_dataset_preprocess_fn(\n dataset,\n spm_path=t5_data().DEFAULT_SPM_PATH,\n text_preprocess_fns=[\n lambda ds, training: t5_data().preprocessors.glue( # pylint: disable=g-long-lambda\n ds,\n benchmark_name=benchmark,\n label_names=_GLUE_LABELS[benchmark])\n ],\n copy_pretokenized=True,\n debug_print_examples=True,\n debug_print_examples_rate=0.05)\n dataset_as_numpy = tfds.as_numpy(processed_dataset)\n\n def stream_of_inputs_targets_weights(generator=None):\n del generator\n while True:\n for example in dataset_as_numpy:\n input_values = example['inputs']\n target_values = example['targets']\n yield (jnp.array(input_values),\n jnp.array(target_values),\n jnp.array([1] * len(target_values)))\n\n return stream_of_inputs_targets_weights\n\n\ndef _t5_glue_benchmark_and_split(benchmark_id):\n benchmark, mode = benchmark_id.rsplit('_', 1)\n if mode == 't':\n split = 'train'\n elif benchmark == 'mnli':\n split = 'validation_mismatched' if mode == 'e2' else 'validation_matched'\n else:\n split = 'validation'\n return benchmark, split\n# pylint: enable=invalid-name\n\n\ndef compute_single_result(op_name, num_args):\n \"\"\"An implementation of the most popular ops from the MathQA dataset.\"\"\"\n # See https://gitlab.cs.washington.edu/amini91/mathqa-categorization/\n # and specfically line 142 and following in new_DataStructure.py\n # for an implementation which covers more details.\n if op_name == 'add':\n return num_args[0] + num_args[1]\n elif op_name == 'circle_arc':\n return num_args[0] / 360 * math.pi * 2 * num_args[1]\n elif op_name == 'circle_area':\n return math.pi * num_args[0]**2\n elif op_name == 'circle_sector_area':\n return num_args[1] / 360 * math.pi * (num_args[0]**2)\n elif op_name == 'circumface':\n return 2 * math.pi * num_args[0]\n elif op_name == 'choose':\n # Older versions of scipy may require scipy.misc.comb.\n return scipy.special.comb(num_args[0], num_args[1]) # pylint: disable=unreachable\n elif op_name == 'cosine':\n return math.cos(num_args[0])\n elif op_name == 'cube_edge_by_volume':\n return num_args[0]**(1 / 3)\n elif op_name == 'combined_work':\n return 1 / (\n min(num_args[0], 1 / num_args[0]) + min(num_args[1], 1 / num_args[1]))\n elif op_name == 'count_interval':\n return num_args[0] - num_args[1] + 1\n elif op_name == 'diagonal':\n return math.sqrt(num_args[0]**2 + num_args[1]**2)\n elif op_name == 'divide' or op_name == 'speed':\n if num_args[1] != 0:\n return num_args[0] / num_args[1]\n else:\n return 0\n elif op_name == 'factorial':\n return math.factorial(min(15, int(num_args[0])))\n elif op_name == 'floor':\n return math.floor(num_args[0])\n elif op_name == 'find_work':\n return 1 / (\n max(\n min(num_args[0], 1 / num_args[0]), min(\n num_args[1], 1 / num_args[1])) - min(\n min(num_args[0], 1 / num_args[0]),\n min(num_args[1], 1 / num_args[1])))\n elif op_name == 'from_percent':\n return num_args[0] / 100\n elif op_name == 'gain_percent':\n return 100 + num_args[0]\n elif op_name == 'gcd':\n return scipy.gcd(int(num_args[0]), int(num_args[1]))\n elif op_name == 'inverse':\n if num_args[0] != 0:\n return 1 / num_args[0]\n else:\n return 0\n elif op_name == 'lcm':\n return scipy.lcm(int(num_args[0]), int(num_args[1]))\n elif op_name == 'log':\n return math.log(max(1e-5, num_args[0]), 2)\n elif op_name == 'loss_percent':\n return 100 - num_args[0]\n elif op_name == 'max':\n return max(num_args[0], num_args[1])\n elif op_name == 'multiply':\n return num_args[0] * num_args[1]\n elif op_name == 'negate_percent':\n return 100 - num_args[0]\n elif op_name == 'negate':\n return -num_args[0]\n elif op_name == 'original_price_before_loss':\n return num_args[1] * 100 / (100 + 1e-5 - num_args[0])\n elif op_name == 'original_price_before_gain':\n return num_args[1] * 100 / (100 + num_args[0])\n elif op_name == 'permutation':\n n, m = min(num_args[0], num_args[1]), max(num_args[0], num_args[1])\n return math.factorial(int(m)) / math.factorial(int(m - n))\n elif op_name == 'power':\n return num_args[0]**min(num_args[1], 5)\n elif op_name == 'percent':\n return num_args[0] / 100 * num_args[1]\n elif op_name == 'price_after_gain' or op_name == 'p_after_gain':\n return (1 + num_args[0] / 100) * num_args[1]\n elif op_name == 'price_after_loss' or op_name == 'price_after_loss':\n return (1 - num_args[0] / 100) * num_args[1]\n elif op_name == 'quadrilateral_area':\n return num_args[0] * (num_args[1] + num_args[2]) / 2\n elif op_name == 'reminder':\n return num_args[0] % num_args[1]\n elif op_name == 'rectangle_area':\n return num_args[0] * num_args[1]\n elif op_name == 'rectangle_perimeter':\n return 2 * (num_args[0] + num_args[1])\n elif op_name == 'rhombus_area':\n return num_args[0] * num_args[1] / 2\n elif op_name == 'sine':\n return math.sin(num_args[0])\n elif op_name == 'sqrt':\n return math.sqrt(max(0, num_args[0]))\n elif op_name == 'subtract':\n return num_args[0] - num_args[1]\n elif op_name == 'square_edge_by_perimeter':\n return num_args[0] / 4\n elif op_name == 'square_edge_by_area':\n return math.sqrt(num_args[0])\n elif op_name == 'square_area':\n return num_args[0]**2\n elif op_name == 'surface_cube':\n return 6 * num_args[0]**2\n elif op_name == 'surface_rectangular_prism':\n return 2 * (\n num_args[0] * num_args[1] + num_args[0] * num_args[2] +\n num_args[1] * num_args[2])\n elif op_name == 'semi_circle_perimiter':\n return math.pi * num_args[0] + 2 * num_args[0]\n elif op_name == 'square_perimeter' or op_name == 'rhombus_perimeter':\n return 4 * num_args[0]\n elif op_name == 'surface_sphere':\n return 4 * math.pi * num_args[0]**2\n elif op_name == 'speed_ratio_steel_to_stream':\n return (num_args[0] + num_args[1]) / (num_args[0] - num_args[1])\n elif op_name == 'speed_in_still_water':\n return (num_args[0] + num_args[1]) / 2\n elif op_name == 'stream_speed':\n return (num_args[0] - num_args[1]) / 2\n elif op_name == 'trapezium_area':\n return num_args[0] * (num_args[1] + num_args[2]) / 2\n elif op_name == 'triangle_area':\n return num_args[0] * num_args[1] / 2\n elif op_name == 'triangle_perimeter':\n return num_args[0] + num_args[1] + num_args[2]\n elif op_name == 'triangle_area_three_edges':\n # Heron's formula\n s = (num_args[0] + num_args[1] + num_args[2]) / 2\n return math.sqrt(\n max(0,\n s * (s - num_args[0]) * (s - num_args[1]) * (s - num_args[2])))\n elif op_name == 'union_prob':\n return num_args[0] + num_args[1] - num_args[2]\n elif op_name == 'negate_prob':\n return 1 - num_args[0]\n elif op_name == 'volume_cube':\n return num_args[0]**3\n elif op_name == 'volume_cone':\n return math.pi * num_args[0]**2 * num_args[1] / 3\n elif op_name == 'volume_cylinder':\n return math.pi * num_args[0]**2 * num_args[1]\n elif op_name == 'volume_rectangular_prism':\n return num_args[0] * num_args[1] * num_args[2]\n elif op_name == 'volume_sphere':\n return 4 / 3 * math.pi * num_args[0]**3\n\n\ndef compute_result(list_op, list_num):\n \"\"\"Python execution of MathQA ops.\"\"\"\n # The last of temporary results is the final answer.\n temporary_results = []\n for op in list_op:\n op_name = op.split('(')[0]\n start_bracket = op.find('(')\n end_bracket = op.find(')')\n op_args = op[start_bracket + 1:end_bracket].split(',')\n num_args = []\n for arg in op_args:\n # The hash stands for a number stored in temporary_results.\n # For example #2 refers to the third temporary result.\n if arg[0] == '#':\n temp_index = int(\n re.findall(r'[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?',\n arg)[0])\n num_args.append(temporary_results[temp_index])\n # The n prefix stands for numbers which listed in list_num -\n # originally they were contained in the text.\n elif arg[0] == 'n':\n n_index = int(\n re.findall(r'[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?',\n arg)[0])\n num_args.append(list_num[n_index])\n elif arg[0] == 'c':\n if arg == 'const_pi':\n constant = math.pi\n elif arg == 'const_deg_to_rad':\n constant = math.pi / 180\n else:\n consts = re.findall(\n r'[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?', arg)\n if len(consts) == 1:\n constant = float(consts[0])\n else:\n constant1 = float(consts[0])\n constant2 = float('0.' + consts[1])\n constant = constant1 + constant2\n num_args.append(constant)\n temporary_results.append(compute_single_result(op_name, num_args))\n return temporary_results\n\n\ndef single_op_to_python_command(op_name, num_args):\n \"\"\"An implementation of the most popular ops from the MathQA dataset.\"\"\"\n # See https://gitlab.cs.washington.edu/amini91/mathqa-categorization/\n # and specfically line 142 and following in new_DataStructure.py\n # for an implementation which covers more details.\n if op_name == 'add':\n return '{} + {}'.format(num_args[0], num_args[1])\n elif op_name == 'circle_arc':\n return '{} / 360 * math.pi * 2 * {}'.format(num_args[0], num_args[1])\n elif op_name == 'circle_area':\n return 'math.pi * {}**2'.format(num_args[0])\n elif op_name == 'circle_sector_area':\n return '{} / 360 * math.pi * ({}**2)'.format(num_args[1], num_args[0])\n elif op_name == 'circumface':\n return '2 * math.pi * {}'.format(num_args[0])\n elif op_name == 'choose':\n # Older versions of scipy may require scipy.misc.comb.\n return 'scipy.special.comb({}, {})'.format(num_args[0], num_args[1]) # pylint: disable=unreachable\n elif op_name == 'cosine':\n return 'math.cos({})'.format(num_args[0])\n elif op_name == 'cube_edge_by_volume':\n return '{}**(1 / 3)'.format(num_args[0])\n elif op_name == 'combined_work':\n return '1 / (min({}, 1 / {}) + min({}, 1 / {}))'.format(\n num_args[0], num_args[0], num_args[1], num_args[1])\n elif op_name == 'count_interval':\n return '{} - {} + 1'.format(num_args[0], num_args[1])\n elif op_name == 'diagonal':\n return 'math.sqrt({}**2 + {}**2)'.format(num_args[0], num_args[1])\n elif op_name == 'divide' or op_name == 'speed':\n # safe divide\n if num_args[1] != 0:\n return '{} / {}'.format(num_args[0], num_args[1])\n else:\n return '0'\n elif op_name == 'factorial':\n return 'math.factorial(min(15, int({})))'.format(num_args[0])\n elif op_name == 'floor':\n return 'math.floor({})'.format(num_args[0])\n elif op_name == 'find_work':\n return ('1 / (max(min({}, 1 / {}), min({}, 1 / {})) - min(min({}, 1 / {}), '\n 'min({}, 1 / {})))').format(num_args[0], num_args[0], num_args[1],\n num_args[1], num_args[0], num_args[0],\n num_args[1], num_args[1])\n elif op_name == 'from_percent':\n return '{} / 100'.format(num_args[0])\n elif op_name == 'gain_percent':\n return '100 + {}'.format(num_args[0])\n elif op_name == 'gcd':\n return 'scipy.gcd(int({}), int({}))'.format(num_args[0], num_args[1])\n elif op_name == 'inverse':\n # safe inverse\n if num_args[0] != 0:\n return '1 / {}'.format(num_args[0])\n else:\n return '0'\n elif op_name == 'lcm':\n return 'scipy.lcm(int({}), int({}))'.format(num_args[0], num_args[1])\n elif op_name == 'log':\n return 'math.log(max(1e-5, {}), 2)'.format(num_args[0])\n elif op_name == 'loss_percent':\n return '100 - {}'.format(num_args[0])\n elif op_name == 'max':\n return 'max({},{})'.format(num_args[0], num_args[1])\n elif op_name == 'multiply':\n return '{} * {}'.format(num_args[0], num_args[1])\n elif op_name == 'negate_percent':\n return '100 - {}'.format(num_args[0])\n elif op_name == 'negate':\n return '-{}'.format(num_args[0])\n elif op_name == 'original_price_before_loss':\n return '{} * 100 / (100 + 1e-5 - {}) # original price before loss'.format(\n num_args[1], num_args[0])\n elif op_name == 'original_price_before_gain':\n return '{} * 100 / (100 + {}) # original_price_before gain'.format(\n num_args[1], num_args[0])\n elif op_name == 'permutation':\n return ('math.factorial(int(max({}, {}))) / math.factorial(int(max({}, {}) '\n '- min({}, {}))) # find all permutations').format(\n num_args[0], num_args[1], num_args[0], num_args[1], num_args[0],\n num_args[1])\n elif op_name == 'power':\n return '{}**min({}, 5)'.format(num_args[0], num_args[1])\n elif op_name == 'percent':\n return '{} / 100 * {}'.format(num_args[0], num_args[1])\n elif op_name == 'price_after_gain' or op_name == 'p_after_gain':\n return '(1 + {} / 100) * {}'.format(num_args[0], num_args[1])\n elif op_name == 'price_after_loss' or op_name == 'price_after_loss':\n return '(1 - {} / 100) * {}'.format(num_args[0], num_args[1])\n elif op_name == 'quadrilateral_area':\n return '{} * ({} + {}) / 2 # quadrilateral area'.format(\n num_args[0], num_args[1], num_args[2])\n elif op_name == 'reminder':\n return '{} % {}'.format(num_args[0], num_args[1])\n elif op_name == 'rectangle_area':\n return '{} * {} # area of rectangle'.format(num_args[0], num_args[1])\n elif op_name == 'rectangle_perimeter':\n return '2 * ({} + {}) # perimetere of rectangle'.format(\n num_args[0], num_args[1])\n elif op_name == 'rhombus_area':\n return '{} * {} / 2'.format(num_args[0], num_args[1])\n elif op_name == 'sine':\n return 'math.sin({})'.format(num_args[0])\n elif op_name == 'sqrt':\n return 'math.sqrt(max(0, {}))'.format(num_args[0])\n elif op_name == 'subtract':\n return '{} - {}'.format(num_args[0], num_args[1])\n elif op_name == 'square_edge_by_perimeter':\n return '{} / 4. # square edge given perimeter'.format(num_args[0])\n elif op_name == 'square_edge_by_area':\n return 'math.sqrt({}) # square edge given area'.format(num_args[0])\n elif op_name == 'square_area':\n return '{}**2'.format(num_args[0])\n elif op_name == 'surface_cube':\n return '6 * {}**2 # surface of a cube'.format(num_args[0])\n elif op_name == 'surface_rectangular_prism':\n return '2 * ({} * {} + {} * {} + {} * {}) # surface of a rectangular prism'.format(\n num_args[0], num_args[1], num_args[0], num_args[2], num_args[1],\n num_args[2])\n elif op_name == 'semi_circle_perimiter':\n return 'math.pi * {} + 2 * {} # perimeter of a semi-circle'.format(\n num_args[0], num_args[0])\n elif op_name == 'square_perimeter' or op_name == 'rhombus_perimeter':\n return '4 * {}'.format(num_args[0])\n elif op_name == 'surface_sphere':\n return '4 * math.pi * {}**2'.format(num_args[0])\n elif op_name == 'speed_ratio_steel_to_stream':\n return '({} + {}) / ({} - {})'.format(num_args[0], num_args[1], num_args[0],\n num_args[1])\n elif op_name == 'speed_in_still_water':\n return '{} + {} / 2'.format(num_args[0], num_args[1])\n elif op_name == 'stream_speed':\n return '{} - {} / 2'.format(num_args[0], num_args[1])\n elif op_name == 'trapezium_area':\n return '{} * ({} + {}) / 2'.format(num_args[0], num_args[1], num_args[2])\n elif op_name == 'triangle_area':\n return '{} * {} / 2'.format(num_args[0], num_args[1])\n elif op_name == 'triangle_perimeter':\n return '{} + {} + {} # perimeter of a triangle'.format(\n num_args[0], num_args[1], num_args[2])\n elif op_name == 'triangle_area_three_edges':\n return (\"(lambda s, a, b, c: math.sqrt(max(0, s * (s - a) * (s - b) * (s - \"\n \"c))))(({} + {} + {}) / 2, {}, {}, {}) # Heron's formula\").format(\n num_args[0], num_args[1], num_args[2], num_args[0], num_args[1],\n num_args[2])\n elif op_name == 'union_prob':\n return '{} + {} - {}'.format(num_args[0], num_args[1], num_args[2])\n elif op_name == 'negate_prob':\n return '1 - {}'.format(num_args[0])\n elif op_name == 'volume_cube':\n return '{}**3'.format(num_args[0])\n elif op_name == 'volume_cone':\n return 'math.pi * {}**2 * {} / 3'.format(num_args[0], num_args[1])\n elif op_name == 'volume_cylinder':\n return 'math.pi * {}**2 * {}'.format(num_args[0], num_args[1])\n elif op_name == 'volume_rectangular_prism':\n return '{} * {} * {}'.format(num_args[0], num_args[1], num_args[2])\n elif op_name == 'volume_sphere':\n return '4 / 3 * math.pi * {}**3'.format(num_args[0])\n\n\ndef compute_program(list_op):\n \"\"\"Python execution of MathQA ops.\"\"\"\n # The last of temporary results is the final answer.\n temporary_results = []\n num_op = 0\n for op in list_op:\n op_name = op.split('(')[0]\n start_bracket = op.find('(')\n end_bracket = op.find(')')\n op_args = op[start_bracket + 1:end_bracket].split(',')\n num_args = []\n for arg in op_args:\n # The hash stands for a number stored in temporary_results.\n # For example #2 refers to the third temporary result.\n if arg[0] == '#':\n temp_index = int(\n re.findall(r'[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?',\n arg)[0])\n num_args.append('t{}'.format(temp_index))\n # The n prefix stands for numbers which listed in list_num -\n # originally they were contained in the text.\n elif arg[0] == 'n':\n # n_index = int(\n # re.findall(r'[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?',\n # arg)[0])\n num_args.append(arg)\n elif arg[0] == 'c':\n if arg == 'const_pi':\n constant = math.pi\n elif arg == 'const_deg_to_rad':\n constant = math.pi / 180\n else:\n consts = re.findall(\n r'[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?', arg)\n if len(consts) == 1:\n constant = float(consts[0])\n else:\n constant1 = float(consts[0])\n constant2 = float('0.' + consts[1])\n constant = constant1 + constant2\n num_args.append(str(constant))\n temporary_result = 't{} = {}'.format(\n num_op, single_op_to_python_command(op_name, num_args))\n temporary_results.append(temporary_result)\n num_op += 1\n return temporary_results\n\n\ndef compute_nums(question):\n \"\"\"Finds numbers in a string and convert them to floats.\"\"\"\n # The funny looking replace is needed to deal with numbers such as 4,000\n # TODO(henrykm) deal with numbers written as words \"one\", \"two\", ...\n return [\n float(num.replace(',', '')) for num in re.findall(\n r'[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?', question)\n ]\n\n\ndef compute_ops(linear_formula):\n list_op = linear_formula.split('|')\n # In some cases the list of operations contains a superflous last element,\n # namely an empty string.\n if not list_op[-1]:\n list_op = list_op[:-1]\n return list_op\n\n\ndef process_single_mathqa_example(example):\n \"\"\"Execute a single example and verify coherence of a MathQA problem.\n\n Args:\n example: a dictionary with the following fields: Problem - a natural\n language formulation of the problem Rationale - a natural language\n solution of the problem options - five possible answers ( a) b) c) d) and\n e) ) correct - the letter representing the correct answer\n annotated_formula - formula representing the full solution linear_formula\n - a string of operations separated by the | character, e.g.\n multiply(n2,const_100)|multiply(n0,n1)|divide(#0,#1)|\n multiply(#2,const_100)|divide(#3,#1)| category - a natural language\n description of the category to which a given problem belongs.\n\n Returns:\n answer_num: numerical answer contained in the example\n python_result: numerical answers computed in Python, including intermediate\n results. The answer_num should be close python_result[-1]\n list_op: list of arithmetic operations\n list_num: list of identified numbers in the text\n \"\"\"\n question = example['Problem']\n list_num = compute_nums(question)\n list_op = compute_ops(example['linear_formula'])\n answers = example['options']\n correct_answer = example['correct']\n index = answers.find('{} )'.format(correct_answer))\n answer_string = re.findall(\n r'[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?', answers[index:])\n # The if statement deals with empty lists - they are needed to treat\n # a correct non-numerical answer e) None of the above. Here we do not want\n # non-numerical answers, hence we return None.\n if answer_string:\n answer_num = float(\n re.findall(r'[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?',\n answers[index:])[0].replace(',', ''))\n else:\n return None\n # The if statements below deals with answers written as fractions e.g.\n # a ) 1 / 2 , b ) 1 / 3 , c ) 1 / 5 , d ) 10 / 30 , e ) 2 / 5 ?\n index_end_of_answer = index + len(str(answer_num)) + 3\n if index_end_of_answer < len(answers) and answers[index_end_of_answer] == '/':\n answer_denom = float(\n re.findall(r'[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?',\n answers[index_end_of_answer:])[0].replace(',', ''))\n answer_num /= answer_denom\n python_result = compute_result(list_op, list_num)\n python_program = compute_program(list_op)\n return answer_num, python_result, python_program, list_op, list_num\n\n\ndef convert_float_to_mathqa(number):\n floor = int(float(number))\n if floor == number:\n return 'const_' + str(floor)\n else:\n return 'const_' + str(floor) + '_' + str(number)[len(str(floor)) + 1:]\n\n\ndef convert_to_subtract(const_string):\n return 'subtract({},const_0)'.format(const_string)\n\n\ndef execute_mathqa_dsl_program(problem, dsl_code):\n \"\"\"Executes the DSL code for a given problem.\n\n Args:\n problem: problem formulation (needed to get parameters).\n dsl_code: DSL code.\n\n Returns:\n the result of executing of the DSL code.\n \"\"\"\n n0_loc = problem.find('n0')\n list_num = compute_nums(problem[n0_loc:])\n # The list contains _all_ numbers in the string, hence in particular\n # for n0 = 2.0 n1 = 3.0 we are getting list_num = [0.0, 2.0, 1.0, 3.0],\n # so that below we are filtering the odd occurrences.\n assert len(list_num) % 2 == 0\n list_num = [list_num[2 * i + 1] for i in range(int(len(list_num) / 2))]\n\n # dsl_code is a list of strings; since all DSL programs are single liners,\n # we need to guess the correct line. For now we use the same location as in\n # in the ground truth examples, that is the first line.\n list_op = compute_ops(dsl_code[0])\n\n try:\n results = compute_result(list_op, list_num)[-1]\n except: # pylint: disable=bare-except\n results = None\n return results\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except: # pylint: disable=bare-except\n return False\n\n\ndef execute_mathqa_program(problem, program):\n \"\"\"Executes the DSL code for a given problem.\n\n Args:\n problem: problem formulation (not needed, but we want the same API as\n in the DSL case).\n program: Python code.\n\n Returns:\n the result of executing of the Python code.\n \"\"\"\n del problem # problem only needed in the DSL version.\n # Programs are lists of strings. We need to concatenate them in order to exec.\n program = '\\n'.join(program)\n var_dict = {}\n try:\n # The logic of this is the following: if exec with timeout is working\n # without exceptions, then we can call exec again and gather the variables.\n exec(program, globals(), var_dict) # pylint: disable=exec-used\n if 'answer' in var_dict and is_number(var_dict['answer']):\n return float(var_dict['answer'])\n else:\n return None\n except: # pylint: disable=bare-except\n return None\n\n\n@gin.configurable(module='trax.data')\ndef CreateMathQAInputs( # pylint: disable=invalid-name\n dataset_path=None,\n train=True,\n test=False,\n challenge=False,\n tolerance=0.01,\n cumulative=True,\n python_code=False,\n full_dict=False,\n partial_results=True,\n nlp_rationale=False,\n correct_answer=False,\n answer_in_mathqa_format=True,\n correct_answer_given_reasoning=False,\n category=False,\n order_prediction=False,\n reduced_operation_name=True,\n qed=False):\n \"\"\"Prepares MathQA inputs.\n\n The generation procedure leaves a lot parameters to be set by the user.\n Currently we support only correct examples in the following sense:\n python execution agrees with the declared answer up to 1%.\n\n According to this criterion wrong examples such as\n problem: calculate 85184 ÷ ? = 352\n operations ['multiply(n0,n1)']\n are ignored (this should be divide(n0,n1) in this case).\n\n Args:\n dataset_path: a path with the MathQA dataset.\n train: if True, then generate training examples; if train, test and\n challenge are set to False generate validation examples.\n test: if train is set to False and test is set to True,\n then generate test examples.\n challenge: if train and test are set to False and challenge is set to True,\n then generate challenge examples.\n tolerance: if for a given example relative difference between Python result\n and the result declared in the dataset exceeds the level, then the example\n is dropped; tolerances ranging from 0.1 to 0.001 yield from 18K to 21K\n examples.\n cumulative: if set to True, then generate examples in the format input -\n problem + numbers + op1 + op2 + op3 target - op4 If set to False, then\n examples are in the format input - problem + numbers target - all\n operations.\n python_code: if set to True, then generates python code instead of\n MathQA commands.\n full_dict: if set to True, then Python examples are returned together with\n the DSL code and the NLP rationale.\n partial_results: if set to True, then partial results will be reported as\n part of the input, e.g. input - problem + numbers + op1 + #1 + op2 + #2 +\n op3 + #3, target - op4, where #k is the partial results from operation\n opk. Activated only in cumulative set to True.\n nlp_rationale: if set to True, then input is the problem and the target is\n the nlp rationale.\n correct_answer: if set to True, then input is the problem plus all possible\n answers and the target is the correct answer.\n answer_in_mathqa_format: if set to True, then convert numerical answer to\n the MathQA format and wrap it in the subtract operation.\n E.g. \"3.13\" is converted to \"subtract(const_3_13,const_0)\".\n correct_answer_given_reasoning: if set to True, then input is the problem\n plus linear formula plus all possible answers and the target is the\n correct answer.\n category: if set to True, then input is the problem and the target is its\n category.\n order_prediction: if set to True, then input is the problem and a list of\n all operations; with probability 0.5 two operations are swapped; the task\n consists in detecting whether the operations were swapped. See the\n order prediction task in CreateAquaInputs in this file.\n reduced_operation_name: If set to True, then in order prediction consider\n only the operation token without parameterers.\n qed: if set to True, then the reasoning is finished with an additional\n operation qed.\n\n Returns:\n mathqa_yield_examples: a generator of MathQA examples; the generator yields\n non-tokenized examples - they can be further processed using for example\n the tokenize function from this module\n \"\"\"\n if train:\n dataset_path = os.path.join(dataset_path, 'train.json')\n elif test:\n dataset_path = os.path.join(dataset_path, 'test.json')\n elif challenge:\n dataset_path = os.path.join(dataset_path, 'challenge_test.json')\n else:\n dataset_path = os.path.join(dataset_path, 'dev.json')\n # Opening with GFile allows to use remotely stored files, e.g.\n # in a gs bucket.\n dataset_handle = tf.io.gfile.GFile(dataset_path, 'r')\n dataset = json.load(dataset_handle)\n\n def mathqa_yield_examples(generator=None):\n del generator\n while True:\n for example in itertools.cycle(dataset):\n result = process_single_mathqa_example(example)\n # TODO(henrykm): Remove the first two ifs.\n if not result:\n continue\n answer_num, python_result, python_program, list_op, list_num = result\n if not answer_num or not python_result[-1]:\n continue\n if qed:\n list_op.append('qed')\n if math.isclose(answer_num, python_result[-1], rel_tol=tolerance):\n input_prefix = example['Problem']\n for i in range(len(list_num)):\n input_prefix += ' n{} = {}'.format(i, list_num[i])\n if cumulative:\n for i in range(len(list_op)):\n input_values = input_prefix\n target_values = list_op[i]\n input_prefix += ' ' + list_op[i]\n if partial_results:\n input_prefix += ' #{} = {}'.format(i, answer_num)\n yield input_values, target_values, np.array([1] *\n len(target_values))\n elif python_code:\n input_values = '# ' + input_prefix\n target_values = ''\n for command in python_program:\n if 'math' in command:\n target_values += 'import math\\n'\n break\n for command in python_program:\n if 'scipy' in command:\n target_values += 'import scipy\\n'\n break\n for i in range(len(list_num)):\n target_values += 'n{} = {}\\n'.format(i, list_num[i])\n target_values += '\\n'.join(python_program[:-1])\n final_line = python_program[-1].split('=')[1]\n target_values += '\\nanswer ={}'.format(final_line)\n var_dict = {}\n # We generate a python code and want to check whether the answer\n # is coorect.\n exec(target_values, globals(), var_dict) # pylint: disable=exec-used\n if math.isclose(answer_num, var_dict['answer'], rel_tol=tolerance):\n if full_dict:\n yield input_values, target_values, example[\n 'linear_formula'], example['Rationale']\n else:\n yield input_values, target_values, np.array([1] *\n len(target_values))\n elif nlp_rationale:\n input_values = 'infer full rationale: ' + input_prefix\n target_values = example['Rationale']\n yield input_values, target_values, np.array([1] *\n len(target_values))\n elif correct_answer:\n input_values = 'infer correct answer: ' + input_prefix\n input_values += ' ' + example['options']\n if answer_in_mathqa_format:\n target_values = str(answer_num)\n target_values = convert_to_subtract(\n convert_float_to_mathqa(target_values))\n else:\n target_values = example['correct']\n yield input_values, target_values, np.array([1] *\n len(target_values))\n elif correct_answer_given_reasoning:\n input_values = 'infer correct answer given reasoning: ' + input_prefix\n input_values += ' ' + ' '.join(list_op) + ' ' + example['options']\n target_values = example['correct']\n yield input_values, target_values, np.array([1] *\n len(target_values))\n elif category:\n input_values = 'infer category: ' + input_prefix\n target_values = example['category']\n yield input_values, target_values, np.array([1] *\n len(target_values))\n elif order_prediction:\n if np.random.uniform() < 0.5 and len(list_op) >= 2:\n idx = range(len(list_op))\n i1, i2 = random.sample(idx, 2)\n list_op[i1], list_op[i2] = list_op[i2], list_op[i1]\n target_values = 'not_ordered'\n else:\n target_values = 'ordered'\n if reduced_operation_name:\n list_op = [op.split('(')[0] for op in list_op]\n input_values = 'order prediction: ' + input_prefix + ' ' + ' '.join(\n list_op)\n yield input_values, target_values, np.array([1] *\n len(target_values))\n else:\n input_values = 'infer full calculation: ' + input_prefix\n target_values = example['linear_formula']\n yield input_values, target_values, np.array([1] *\n len(target_values))\n\n return mathqa_yield_examples\n\n\n@gin.configurable(module='trax.data')\ndef CreateAquaInputs( # pylint: disable=invalid-name\n dataset_path=None,\n train=True,\n cumulative=False,\n rationale=False,\n correct_answer=False,\n correct_answer_given_reasoning=False,\n partial_reasoning=True,\n order_prediction=False):\n \"\"\"Prepares Aqua inputs.\n\n Args:\n dataset_path: a path with the Aqua dataset.\n train: if True, then generate training examples, otherwhise generate\n validation examples (the dataset has also a test set).\n cumulative: if set to True, then generate examples in the format input -\n problem + step1 + step3 + step3 target - step4 If set to False, then\n examples are in the format input - problem, target - all operations.\n rationale: if set to True, then input is the problem and the target is the\n rationale.\n correct_answer: if set to True, then input is the problem plus all possible\n answers and the target is the correct answer.\n correct_answer_given_reasoning: if set to True, then input is the problem\n plus reasoning (aka rationale) plus all possible answers and the target is\n the correct answer.\n partial_reasoning: an additional option related to\n correct_answer_given_reasoning; if set to True, then we take a random\n prefix of the reasoning.\n order_prediction: if set to True, then input is the problem and a list of\n all operations; with probability 0.5 two operations are swapped; the task\n consists in detecting whether the operations were swapped. A similar\n additional task was considered in https://arxiv.org/pdf/1909.11942.pdf and\n in a recent work of Piotr Piękos, henrykm@ and mateuszm@.\n\n Returns:\n aqua_yield_examples: a generator of Aqua examples; the generator yields\n non-tokenized examples - they can be further processed using for example\n the tokenize function from this module\n \"\"\"\n if train:\n dataset_path = os.path.join(dataset_path, 'train.json')\n else:\n dataset_path = os.path.join(dataset_path, 'dev.json')\n # Opening with GFile allows to use remotely stored files, e.g.\n # in a gs bucket.\n dataset_handle = tf.io.gfile.GFile(dataset_path, 'r')\n dataset = []\n for line in dataset_handle:\n dataset.append(json.loads(line))\n\n def aqua_yield_examples(generator=None):\n del generator\n while True:\n for example in itertools.cycle(dataset):\n input_prefix = example['question']\n steps = example['rationale'].split('\\n')\n if cumulative:\n for i in range(len(steps)):\n input_values = 'infer cumulative rationale: ' + input_prefix\n target_values = steps[i]\n input_prefix += ' ' + steps[i]\n yield input_values, target_values, np.array([1] *\n len(target_values))\n elif rationale:\n input_values = 'infer full rationale: ' + input_prefix\n target_values = example['rationale']\n yield input_values, target_values, np.array([1] * len(target_values))\n elif correct_answer:\n input_values = 'infer correct answer: ' + input_prefix\n input_values += ' ' + ' '.join(example['options'])\n target_values = example['correct']\n yield input_values, target_values, np.array([1] * len(target_values))\n elif correct_answer_given_reasoning:\n input_values = 'infer correct answer given reasoning: ' + input_prefix\n if partial_reasoning:\n reasoning_list = example['rationale'].split('\\n')\n reasoning_list = reasoning_list[0:np.random\n .randint(0, len(reasoning_list))]\n reasoning = '\\n'.join(reasoning_list)\n else:\n reasoning = example['rationale']\n input_values += ' ' + example['rationale'] + ' ' + ' '.join(\n example['options'])\n target_values = example['correct']\n yield input_values, target_values, np.array([1] * len(target_values))\n elif order_prediction:\n if np.random.uniform() < 0.5 and len(steps) >= 2:\n idx = range(len(steps))\n i1, i2 = random.sample(idx, 2)\n steps[i1], steps[i2] = steps[i2], steps[i1]\n target_values = 'not_ordered'\n else:\n target_values = 'ordered'\n input_values = 'order prediction: ' + input_prefix + ' ' + '\\n'.join(\n steps)\n yield input_values, target_values, np.array([1] * len(target_values))\n else:\n raise ValueError(\n 'One of the boolean parameters of the Aqua generator must be set to True.'\n )\n\n return aqua_yield_examples\n\n\n@gin.configurable(module='trax.data')\ndef CreateDropInputs( # pylint: disable=invalid-name\n train=True, mathqa_format=False):\n \"\"\"Prepares Drop inputs.\n\n Args:\n train: if True, then generate training examples, otherwhise generate\n validation examples (the dataset has also a test set).\n mathqa_format: if True, then floats in targets are converted to the\n the MathQA convention and wrapped in the subtract operation.\n E.g. \"3.13\" is converted to \"subtract(const_3_13,const_0)\".\n\n Returns:\n drop_yield_examples: a generator of Drop examples; the generator yields\n non-tokenized examples - they can be further processed using for example\n the tokenize function from this module\n \"\"\"\n if train:\n dataset = tfds.load(name='drop', split='train')\n else:\n dataset = tfds.load(name='drop', split='dev')\n dataset = tfds.as_numpy(dataset)\n\n def drop_yield_examples(generator=None):\n del generator\n while True:\n for example in itertools.cycle(dataset):\n input_values = 'drop question: ' + example['passage'].decode(\n 'utf-8') + ' ' + example['question'].decode('utf-8')\n target_values = example['answer'].decode('utf-8')\n # Apparently the dataset has some empty \"target values\" -\n # when such a value is encountered, the Tokenizer decides to assign\n # to it a float32 tensor and the training fails.\n if not target_values:\n continue\n if mathqa_format:\n if target_values.replace('.', '', 1).isdigit():\n target_values = convert_to_subtract(\n convert_float_to_mathqa(target_values))\n yield input_values, target_values, np.array(\n [1] * len(target_values), dtype=np.int32)\n\n return drop_yield_examples\n\n\n@gin.configurable(module='trax.data')\ndef CreateAnnotatedDropInputs( # pylint: disable=invalid-name\n dataset_path=None,\n train=True,\n single_file=True,\n unique=False,\n total_number_of_samples=None,\n percentile=1.):\n r\"\"\"Prepares annotated Drop inputs.\n\n Example of an annotated input which can be used with this interface:\n\n {\n 'passage': 'The Armenian Prelature of Cyprus was established in 973 by\n Catholicos Khatchig I. Historically, the Prelature has been under the\n jurisdiction of the Catholicosate of the Great House of Cilicia, while today\n it is the oldest theme that falls under its jurisdiction. Since 2014 the\n Prelate, a Catholicosal Vicar General, has been Archbishop Nareg Alemezian.\n The parish priest in Nicosia is Fr. Momik Habeshian, while the parish priest\n in Larnaca and Limassol is Fr. Mashdots Ashkarian. For centuries, the\n Prelature building was located within the Armenian compound in Victoria\n street in walled Nicosia; when that area was taken over by Turkish-Cypriot\n extremists in 1963-1964, the Prelature was temporarily housed in Aram\n Ouzounian street and, later on, in Kyriakos Matsis street in Ayios\n Dhometios. Thanks to the efforts of Bishop Zareh Aznavorian and with\n financial aid from the Evangelical Church of Westphalia, the new Prelature\n building was erected in 1983, next to the Virgin Mary church and the Nareg\n school in Nicosia, by architects Athos Dikaios & Alkis Dikaios; it was\n officially inaugurated on 4 March 1984, during the pastoral visit of\n Catholicos Karekin II. By initiative of Archbishop Varoujan Hergelian, in\n 1998 the basement of the building was renovated and the \"Vahram Utidjian\"\n Hall was formed; previously a store room, it became a reality from the\n proceeds of the auction in 1994 of the art collection that Vahram Utidjian\n had donated to the Prelature in 1954. It was inaugurated on 3 February 1999\n by Catholicos Aram I; numerous charity, communal and cultural events take\n place there. The Prelature\\'s consistory houses a collection of\n ecclesiastical relics, some of which were previously in the old Virgin Mary\n church or the Magaravank.',\n 'question': 'How many years after the Vahram Utidjian was donated to the\n Prelature was it sold at an auction?',\n 'answer': 40,\n 'calculation': 'subtract(n8,n9)'\n }\n\n In this example the calculation is formulated using the notation from the\n MathQA dataset, but this is not required. subtract(n8,n9) means that the\n answer 40 can be obtained through the substraction of the 9th and and the 10th\n number in the input. The input consists of the passage concatened with the\n question. The annotations can be generated using, for example, a method\n from the paper https://arxiv.org/abs/1909.00109.\n\n Args:\n dataset_path: a path with the Aqua dataset.\n train: if True, then generate training examples, otherwhise generate\n validation examples (the dataset has also a test set).\n single_file: if True, then look just for one file. If False, read all\n json files in a given directory and assume that each file contains one\n example. Applied only to training data.\n unique: if set to True, then the generator will provide at most one question\n per passage.\n total_number_of_samples: if set to a positive integer, then the total number\n of unique samples will be bounded total_number_of_samples.\n percentile: the percentile of the train dataset used for training; default\n set to 1., though setting to a lower value can be interesting when\n combined train is combined with another source of data.\n\n Returns:\n drop_annotated_yield_examples: a generator of annotated Drop examples;\n the generator yields non-tokenized examples - they can be further processed\n using for example the tokenize function from this module.\n \"\"\"\n if train:\n if single_file:\n dataset_path = os.path.join(dataset_path, 'train_annotated.json')\n else:\n dataset_path = os.path.join(dataset_path, 'dev_annotated.json')\n\n def load_dataset():\n dataset = []\n if single_file:\n # Opening with GFile allows to use remotely stored files, e.g.\n # in a gs bucket.\n dataset_handle = tf.io.gfile.GFile(dataset_path, 'r')\n for line in dataset_handle:\n dataset.append(json.loads(line))\n else:\n all_files = tf.io.gfile.listdir(dataset_path)\n for filename in all_files:\n if 'json' in filename:\n print('Loading data from file {}'.format(filename))\n with tf.io.gfile.GFile(os.path.join(dataset_path, filename)) as f:\n for line in f:\n dataset.append(json.loads(line))\n print('The total size of the dataset {}'.format(len(dataset)))\n return dataset[:int(len(dataset) * percentile)]\n\n def drop_annotated_yield_examples(generator=None):\n del generator\n while True:\n passages = set()\n unique_examples = set()\n # Notice that below we enable a poor man RL loop\n # aka the DAgger algorithm: https://arxiv.org/pdf/1011.0686.pdf\n # tl;dr: after parsing all examples we re-load the dataset - this\n # may become handy if a prediction service generates new examples.\n dataset = load_dataset()\n for example in dataset:\n # If total_number_of_samples is not None and we have reached this\n # number of samples, then we re-load the dataset.\n if total_number_of_samples:\n if len(unique_examples) >= total_number_of_samples:\n break\n # Do we have a pre-calculated input in the example?\n if 'input' in example.keys():\n question = example['input']\n # Remove the old prompt\n question = question[question.find(':') + 2:]\n else:\n # If input is not present, then we expect that this is an\n # original drop example.\n if unique and example['passage'] in passages:\n continue\n passages.add(example['passage'])\n question = example['passage'] + ' ' + example['question']\n list_num = [\n float(num.replace(',', '').rstrip('.').lstrip('.')) # pylint: disable=g-complex-comprehension\n for num in re.findall(\n r'[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?',\n question)\n ]\n for i in range(len(list_num)):\n question += ' n{} = {}'.format(i, list_num[i])\n input_values = 'drop annotated question: ' + question\n target_values = example['calculation']\n unique_examples.add((input_values, target_values))\n yield input_values, target_values, np.array(\n [1] * len(target_values), dtype=np.int32)\n\n return drop_annotated_yield_examples\n"
] |
[
[
"tensorflow.compat.v1.test.get_temp_dir",
"tensorflow.compat.v1.gfile.MakeDirs",
"tensorflow.compat.v1.test.main"
],
[
"tensorflow.nest.pack_sequence_as",
"tensorflow.image.resize_with_crop_or_pad",
"tensorflow.io.gfile.GFile",
"numpy.random.choice",
"tensorflow.image.random_flip_left_right",
"tensorflow.ones_like",
"tensorflow.reshape",
"tensorflow.zeros_like",
"numpy.random.random",
"tensorflow.strings.unicode_decode",
"tensorflow.cast",
"tensorflow.compat.v1.gfile.GFile",
"numpy.concatenate",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.less",
"tensorflow.nest.flatten",
"tensorflow.io.gfile.listdir",
"numpy.random.randint",
"tensorflow.squeeze",
"numpy.arange",
"numpy.int32",
"scipy.special.comb",
"numpy.logical_or",
"numpy.array",
"tensorflow.zeros",
"numpy.zeros",
"tensorflow.expand_dims",
"tensorflow.io.gfile.makedirs",
"tensorflow.size",
"tensorflow.image.random_crop",
"tensorflow.print",
"numpy.random.uniform",
"tensorflow.device"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.