repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
ChenChunShenG19/PyTorch-StackGAN
[ "1aadc6488aafa4aaf3a883667215f4684a684f71" ]
[ "src/trainer.py" ]
[ "from __future__ import print_function\nfrom six.moves import range\nfrom PIL import Image\n\nimport torch.backends.cudnn as cudnn\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport os\nimport time\n\nimport numpy as np\nimport torchfile\n\nfrom miscc.config import cfg\nfrom miscc.utils import mkdir_p\nfrom miscc.utils import weights_init\nfrom miscc.utils import save_img_results, save_model\nfrom miscc.utils import KL_loss\nfrom miscc.utils import compute_discriminator_loss, compute_generator_loss\n\nfrom torch.utils.tensorboard import summary\nfrom torch.utils.tensorboard import FileWriter\n\n\nclass GANTrainer(object):\n def __init__(self, output_dir):\n if cfg.TRAIN.FLAG:\n self.model_dir = os.path.join(output_dir, 'Model')\n self.image_dir = os.path.join(output_dir, 'Image')\n self.log_dir = os.path.join(output_dir, 'Log')\n mkdir_p(self.model_dir)\n mkdir_p(self.image_dir)\n mkdir_p(self.log_dir)\n self.summary_writer = FileWriter(self.log_dir)\n\n self.max_epoch = cfg.TRAIN.MAX_EPOCH\n self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL\n\n s_gpus = cfg.GPU_ID.split(',')\n self.gpus = [int(ix) for ix in s_gpus]\n self.num_gpus = len(self.gpus)\n self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus\n torch.cuda.set_device(self.gpus[0])\n cudnn.benchmark = True\n\n # ############# For training stageI GAN #############\n def load_network_stageI(self):\n from model import STAGE1_G, STAGE1_D\n netG = STAGE1_G()\n netG.apply(weights_init)\n print(netG)\n netD = STAGE1_D()\n netD.apply(weights_init)\n print(netD)\n\n if cfg.NET_G != '':\n state_dict = \\\n torch.load(cfg.NET_G,\n map_location=lambda storage, loc: storage)\n netG.load_state_dict(state_dict)\n print('Load from: ', cfg.NET_G)\n if cfg.NET_D != '':\n state_dict = \\\n torch.load(cfg.NET_D,\n map_location=lambda storage, loc: storage)\n netD.load_state_dict(state_dict)\n print('Load from: ', cfg.NET_D)\n if cfg.CUDA:\n netG.cuda()\n netD.cuda()\n return netG, netD\n\n # ############# For training stageII GAN #############\n def load_network_stageII(self):\n from model import STAGE1_G, STAGE2_G, STAGE2_D\n\n Stage1_G = STAGE1_G()\n netG = STAGE2_G(Stage1_G)\n netG.apply(weights_init)\n print(netG)\n if cfg.NET_G != '':\n state_dict = \\\n torch.load(cfg.NET_G,\n map_location=lambda storage, loc: storage)\n netG.load_state_dict(state_dict)\n print('Load from: ', cfg.NET_G)\n elif cfg.STAGE1_G != '':\n state_dict = \\\n torch.load(cfg.STAGE1_G,\n map_location=lambda storage, loc: storage)\n netG.STAGE1_G.load_state_dict(state_dict)\n print('Load from: ', cfg.STAGE1_G)\n else:\n print(\"Please give the Stage1_G path\")\n return\n\n netD = STAGE2_D()\n netD.apply(weights_init)\n if cfg.NET_D != '':\n state_dict = \\\n torch.load(cfg.NET_D,\n map_location=lambda storage, loc: storage)\n netD.load_state_dict(state_dict)\n print('Load from: ', cfg.NET_D)\n print(netD)\n\n if cfg.CUDA:\n netG.cuda()\n netD.cuda()\n return netG, netD\n\n def train(self, data_loader, stage=1):\n if stage == 1:\n netG, netD = self.load_network_stageI()\n else:\n netG, netD = self.load_network_stageII()\n\n nz = cfg.Z_DIM\n batch_size = self.batch_size\n noise = Variable(torch.FloatTensor(batch_size, nz))\n fixed_noise = \\\n Variable(torch.FloatTensor(batch_size, nz).normal_(0, 1),\n volatile=True)\n real_labels = Variable(torch.FloatTensor(batch_size).fill_(1))\n fake_labels = Variable(torch.FloatTensor(batch_size).fill_(0))\n if cfg.CUDA:\n noise, fixed_noise = noise.cuda(), fixed_noise.cuda()\n real_labels, fake_labels = real_labels.cuda(), fake_labels.cuda()\n\n generator_lr = cfg.TRAIN.GENERATOR_LR\n discriminator_lr = cfg.TRAIN.DISCRIMINATOR_LR\n lr_decay_step = cfg.TRAIN.LR_DECAY_EPOCH\n optimizerD = \\\n optim.Adam(netD.parameters(),\n lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5, 0.999))\n netG_para = []\n for p in netG.parameters():\n if p.requires_grad:\n netG_para.append(p)\n optimizerG = optim.Adam(netG_para,\n lr=cfg.TRAIN.GENERATOR_LR,\n betas=(0.5, 0.999))\n count = 0\n for epoch in range(self.max_epoch):\n start_t = time.time()\n if epoch % lr_decay_step == 0 and epoch > 0:\n generator_lr *= 0.5\n for param_group in optimizerG.param_groups:\n param_group['lr'] = generator_lr\n discriminator_lr *= 0.5\n for param_group in optimizerD.param_groups:\n param_group['lr'] = discriminator_lr\n\n for i, data in enumerate(data_loader, 0):\n ######################################################\n # (1) Prepare training data\n ######################################################\n real_img_cpu, txt_embedding = data\n real_imgs = Variable(real_img_cpu)\n txt_embedding = Variable(txt_embedding)\n if cfg.CUDA:\n real_imgs = real_imgs.cuda()\n txt_embedding = txt_embedding.cuda()\n\n #######################################################\n # (2) Generate fake images\n ######################################################\n noise.data.normal_(0, 1)\n inputs = (txt_embedding, noise)\n _, fake_imgs, mu, logvar = \\\n nn.parallel.data_parallel(netG, inputs, self.gpus)\n\n ############################\n # (3) Update D network\n ###########################\n netD.zero_grad()\n errD, errD_real, errD_wrong, errD_fake = \\\n compute_discriminator_loss(netD, real_imgs, fake_imgs,\n real_labels, fake_labels,\n mu, self.gpus)\n errD.backward()\n optimizerD.step()\n ############################\n # (2) Update G network\n ###########################\n netG.zero_grad()\n errG = compute_generator_loss(netD, fake_imgs,\n real_labels, mu, self.gpus)\n kl_loss = KL_loss(mu, logvar)\n errG_total = errG + kl_loss * cfg.TRAIN.COEFF.KL\n errG_total.backward()\n optimizerG.step()\n\n count = count + 1\n if i % 100 == 0:\n summary_D = summary.scalar('D_loss', errD.data[0])\n summary_D_r = summary.scalar('D_loss_real', errD_real)\n summary_D_w = summary.scalar('D_loss_wrong', errD_wrong)\n summary_D_f = summary.scalar('D_loss_fake', errD_fake)\n summary_G = summary.scalar('G_loss', errG.data[0])\n summary_KL = summary.scalar('KL_loss', kl_loss.data[0])\n\n self.summary_writer.add_summary(summary_D, count)\n self.summary_writer.add_summary(summary_D_r, count)\n self.summary_writer.add_summary(summary_D_w, count)\n self.summary_writer.add_summary(summary_D_f, count)\n self.summary_writer.add_summary(summary_G, count)\n self.summary_writer.add_summary(summary_KL, count)\n\n # save the image result for each epoch\n inputs = (txt_embedding, fixed_noise)\n lr_fake, fake, _, _ = \\\n nn.parallel.data_parallel(netG, inputs, self.gpus)\n save_img_results(real_img_cpu, fake, epoch, self.image_dir)\n if lr_fake is not None:\n save_img_results(None, lr_fake, epoch, self.image_dir)\n end_t = time.time()\n print('''[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f Loss_KL: %.4f\n Loss_real: %.4f Loss_wrong:%.4f Loss_fake %.4f\n Total Time: %.2fsec\n '''\n % (epoch, self.max_epoch, i, len(data_loader),\n errD.data[0], errG.data[0], kl_loss.data[0],\n errD_real, errD_wrong, errD_fake, (end_t - start_t)))\n if epoch % self.snapshot_interval == 0:\n save_model(netG, netD, epoch, self.model_dir)\n #\n save_model(netG, netD, self.max_epoch, self.model_dir)\n #\n self.summary_writer.close()\n\n def sample(self, datapath, stage=1):\n if stage == 1:\n netG, _ = self.load_network_stageI()\n else:\n netG, _ = self.load_network_stageII()\n netG.eval()\n\n # Load text embeddings generated from the encoder\n t_file = torchfile.load(datapath)\n captions_list = t_file.raw_txt\n embeddings = np.concatenate(t_file.fea_txt, axis=0)\n num_embeddings = len(captions_list)\n print('Successfully load sentences from: ', datapath)\n print('Total number of sentences:', num_embeddings)\n print('num_embeddings:', num_embeddings, embeddings.shape)\n # path to save generated samples\n save_dir = cfg.NET_G[:cfg.NET_G.find('.pth')]\n mkdir_p(save_dir)\n\n batch_size = np.minimum(num_embeddings, self.batch_size)\n nz = cfg.Z_DIM\n noise = Variable(torch.FloatTensor(batch_size, nz))\n if cfg.CUDA:\n noise = noise.cuda()\n count = 0\n while count < num_embeddings:\n if count > 3000:\n break\n iend = count + batch_size\n if iend > num_embeddings:\n iend = num_embeddings\n count = num_embeddings - batch_size\n embeddings_batch = embeddings[count:iend]\n # captions_batch = captions_list[count:iend]\n txt_embedding = Variable(torch.FloatTensor(embeddings_batch))\n if cfg.CUDA:\n txt_embedding = txt_embedding.cuda()\n\n #######################################################\n # (2) Generate fake images\n ######################################################\n noise.data.normal_(0, 1)\n inputs = (txt_embedding, noise)\n _, fake_imgs, mu, logvar = \\\n nn.parallel.data_parallel(netG, inputs, self.gpus)\n for i in range(batch_size):\n save_name = '%s/%d.png' % (save_dir, count + i)\n im = fake_imgs[i].data.cpu().numpy()\n im = (im + 1.0) * 127.5\n im = im.astype(np.uint8)\n # print('im', im.shape)\n im = np.transpose(im, (1, 2, 0))\n # print('im', im.shape)\n im = Image.fromarray(im)\n im.save(save_name)\n count += batch_size\n\n" ]
[ [ "torch.optim.Adam", "torch.utils.tensorboard.FileWriter", "numpy.minimum", "torch.cuda.set_device", "torch.load", "torch.nn.parallel.data_parallel", "torch.utils.tensorboard.summary.scalar", "numpy.concatenate", "torch.FloatTensor", "numpy.transpose", "torch.autograd.Variable" ] ]
Bhaskers-Blu-Org2/PDP-Solver
[ "e5fa96802500f8e38525a47e1276497cba08b835" ]
[ "src/satyr.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nMain script to run a trained PDP solver against a test dataset.\n\"\"\"\n\n# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE.md file\n# in the project root for full license information.\n\nimport argparse\nimport yaml, os, logging, sys\nimport numpy as np\nimport torch\nfrom datetime import datetime\n\nimport dimacs2json\n\nfrom pdp.trainer import SatFactorGraphTrainer\n\n\ndef run(config, logger, output):\n \"Runs the prediction engine.\"\n \n np.random.seed(config['random_seed'])\n torch.manual_seed(config['random_seed'])\n\n if config['verbose']:\n logger.info(\"Building the computational graph...\")\n\n predicter = SatFactorGraphTrainer(config=config, use_cuda=not config['cpu_mode'], logger=logger)\n\n if config['verbose']:\n logger.info(\"Starting the prediction phase...\")\n\n predicter._counter = 0\n if output == '':\n predicter.predict(test_list=config['test_path'], out_file=sys.stdout, import_path_base=config['model_path'], \n post_processor=predicter._post_process_predictions, batch_replication=config['batch_replication'])\n else:\n with open(output, 'w') as file:\n predicter.predict(test_list=config['test_path'], out_file=file, import_path_base=config['model_path'], \n post_processor=predicter._post_process_predictions, batch_replication=config['batch_replication'])\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('model_config', help='The model configuration yaml file')\n parser.add_argument('test_path', help='The input test path')\n parser.add_argument('test_recurrence_num', help='The number of iterations for the PDP', type=int)\n parser.add_argument('-b', '--batch_replication', help='Batch replication factor', type=int, default=1)\n parser.add_argument('-z', '--batch_size', help='Batch size', type=int, default=5000)\n parser.add_argument('-m', '--max_cache_size', help='Maximum cache size', type=int, default=100000)\n parser.add_argument('-l', '--test_batch_limit', help='Memory limit for mini-batches', type=int, default=40000000)\n parser.add_argument('-w', '--local_search_iteration', help='Number of iterations for post-processing local search', type=int, default=100)\n parser.add_argument('-e', '--epsilon', help='Epsilon probablity for post-processing local search', type=float, default=0.5)\n parser.add_argument('-v', '--verbose', help='Verbose', action='store_true')\n parser.add_argument('-c', '--cpu_mode', help='Run on CPU', action='store_true')\n parser.add_argument('-d', '--dimacs', help='The input folder contains DIMACS files', action='store_true')\n parser.add_argument('-s', '--random_seed', help='Random seed', type=int, default=int(datetime.now().microsecond))\n parser.add_argument('-o', '--output', help='The JSON output file', default='')\n\n args = vars(parser.parse_args())\n\n # Load the model config\n with open(args['model_config'], 'r') as f:\n model_config = yaml.load(f)\n\n # Set the logger\n format = '[%(levelname)s] %(asctime)s - %(name)s: %(message)s'\n logging.basicConfig(level=logging.DEBUG, format=format)\n logger = logging.getLogger(model_config['model_name'])\n\n # Convert DIMACS input files into JSON\n if args['dimacs']:\n if args['verbose']:\n logger.info(\"Converting DIMACS files into JSON...\")\n temp_file_name = 'temp_problem_file.json'\n \n if os.path.isfile(args['test_path']):\n head, _ = os.path.split(args['test_path'])\n temp_file_name = os.path.join(head, temp_file_name)\n dimacs2json.convert_file(args['test_path'], temp_file_name, False)\n else:\n temp_file_name = os.path.join(args['test_path'], temp_file_name)\n dimacs2json.convert_directory(args['test_path'], temp_file_name, False)\n\n args['test_path'] = temp_file_name\n\n # Merge model config and other arguments into one config dict\n config = {**model_config, **args}\n\n if config['model_type'] == 'p-d-p' or config['model_type'] == 'walk-sat' or config['model_type'] == 'reinforce':\n config['model_path'] = None\n config['hidden_dim'] = 3\n\n if config['model_type'] == 'walk-sat':\n config['local_search_iteration'] = config['test_recurrence_num']\n\n config['dropout'] = 0\n config['error_dim'] = 1\n config['exploration'] = 0\n\n # Run the prediction engine\n run(config, logger, config['output'])\n\n if args['dimacs']:\n os.remove(temp_file_name)\n\n print('')\n" ]
[ [ "torch.manual_seed", "numpy.random.seed" ] ]
rudsonramon/machine_learning_udacity_ud120-projects
[ "b3254e3b053c84283a779005d3dcc4f84bfae4b5" ]
[ "svm/svm_author_id.py" ]
[ "#!/usr/bin/python\n\n\"\"\" \n This is the code to accompany the Lesson 2 (SVM) mini-project.\n\n Use a SVM to identify emails from the Enron corpus by their authors: \n Sara has label 0\n Chris has label 1\n\"\"\"\n \nimport sys\nfrom time import time\nsys.path.append(\"../tools/\")\nimport matplotlib.pyplot as plt\nfrom email_preprocess import preprocess\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\n\n\n#XXX: features_train and features_test are the features for the training\n### and testing datasets, respectively\n### labels_train and labels_test are the corresponding item labels\nfeatures_train, features_test, labels_train, labels_test = preprocess()\n\n#One way to speed up an algorithm is to train it on a smaller training dataset.\n#The tradeoff is that the accuracy almost always goes down when you do this.\n#Let’s explore this more concretely: add in the following two lines immediately before training your classifier. \n#features_train = features_train[:len(features_train)/100]\n#labels_train = labels_train[:len(labels_train)/100] \n\"\"\"\n REDUCING THE DATA TO IMPROVE THE PERFORMANCE\n\"\"\"\n### organize the feature to avoid any error message\n#qtd_features = len(features_train)\n#qtd_features = qtd_features/100\n#qtd_features = int(qtd_features)\n#### the same with the label\n#qtd_labels = len(labels_train)\n#qtd_labels = qtd_labels/100\n#qtd_labels = int(qtd_labels)\n##print('qtd_features: ==>> ', qtd_features)\n##print('qtd_labels : ==>> ', qtd_labels )#features_train = features_train[:qtd_features]\n#labels_train = labels_train[:qtd_labels] \n\n#print(features_train[:qtd_features])\n#########################################################\n### your code goes here ###\n# clf = SVC(kernel=\"linear\")\n\n#clf = SVC(kernel='linear')\n#clf = SVC(kernel='rbf')\n#clf = SVC(kernel='rbf', C=10.0) ## SVM accuracy: 0.6160409556313993\n#clf = SVC(kernel='rbf', C=100) ## SVM accuracy: 0.6160409556313993\n#clf = SVC(kernel='rbf', C=1000) ## SVM accuracy: 0.8213879408418657\nclf = SVC(kernel='rbf', C=10000) ## SVM accuracy: 0.8924914675767918\nt0 = time()\n\n\"\"\"\n\tOne way to speed up an algorithm is to train it on a smaller training \n\tdataset. The tradeoff is that the accuracy almost always goes down when \n\tyou do this. Let's explore this more concretely:\n\t\n\toriginal (linear):\n\ttraining time: 188.996 s \n\tpredict time: 20.275 s \n\tSVM accuracy: 0.98407281001137659 \n\tThese lines effectively slice the training dataset down to 1 percent of its \n\toriginal size, tossing out 99 percent of the training data. \n\tSliced (linear):\n\ttraining time: 0.09 s\n\tpredict time: 0.961 s\n\taccuracy: 0.88452787258248011\n\tIf speed is a major consideration (and for many real-time machine learning \n\tapplications, it certainly is) then you may want to sacrifice a bit of \n\taccuracy if it means you can train/predict faster.\n\tDifferent Kernel:\n\tclf = SVC(kernel=\"rbf\", C=10000)\n\tAlso, C is very effective in this assignment, try to change it and see.\n\t\tSliced data set:\n\t\ttraining time: 0.098 s\n\t\tpredict time: 0.833 s\n\t\taccuracy: 0.89249146757679176\n\t\tFull sized data set:\n\t\ttraining time: 118.729 s\n\t\tpredict time: 13.075 s\n\t\taccuracy: 0.99089874857792948 #FTW\n\"\"\"\n# comment out those two lines if you want to see original one\n#features_train = features_train[:len(features_train)/100] \n#labels_train = labels_train[:len(labels_train)/100] \n\nclf.fit(features_train, labels_train)\nprint(\"training time:\", round(time()-t0, 3), \"s\")\nt0 = time()\npred = clf.predict(features_test)\nprint(\"predict time:\", round(time()-t0, 3), \"s\")\n\n# originally: 0.98407281001137659 acc... FTW but it takes time\nprint(\"SVM accuracy: %r\" % accuracy_score(pred, labels_test))\n\n\"\"\"\n\tWhat class does your SVM (0 or 1, corresponding to Sara and Chris respectively) \n\tpredict for element 10 of the test set? The 26th? The 50th? \n\t(Use the RBF kernel, C=10000, and 1% of the training set. Normally you'd get \n\tthe best results using the full training set, but we found that using 1% sped up \n\tthe computation considerably and did not change our results--so feel free to use \n\tthat shortcut here.)\n\"\"\"\nprint(\"10th: %r, 26th: %r, 50th: %r\" % (pred[10], pred[26], pred[50]))\n\n# There are over 1700 test events, how many are predicted to be in the \"Chris\" (1) class?\nprint(\"No. of predicted to be in the 'Chris'(1): %r\" % sum(pred))\n\n\"\"\"\n\tHopefully it's becoming clearer what they told us about the Naive Bayes -- is \n\tgreat for text -- it's faster and generally gives better performance than an SVM \n\tfor this particular problem. Of course, there are plenty of other problems where \n\tan SVM might work better. Knowing which one to try when you're tackling a problem \n\tfor the first time is part of the art and science of machine learning. In addition \n\tto picking your algorithm, depending on which one you try, there are parameter \n\ttunes to worry about as well, and the possibility of overfitting (especially if \n\tyou don't have lots of training data).\n\tOur general suggestion is to try a few different algorithms for each problem. \n\tTuning the parameters can be a lot of work, but just sit tight for now--toward \n\tthe end of the class we will introduce you to GridCV, a great sklearn tool that \n\tcan find an optimal parameter tune almost automatically.\n\"\"\"\n\n#predicts = clf.predict(features_test)\n#accuracy = accuracy_score(labels_test, predicts)\n#print(\"accuracy=\", accuracy )\n\n#########################################################\n\n########## \n########## RESULTS c/ parametro c=150000\n########## \n#no. of Chris training emails: 7936\n#no. of Sara training emails: 7884\n#training time: 245.032 s\n#predict time: 22.848 s\n#SVM accuracy: 0.9943117178612059\n#10th: 1, 26th: 0, 50th: 1\n#No. of predicted to be in the 'Chris'(1): 871\n\n########## \n########## RESULTS c/ parametro c=10000\n########## \n#no. of Chris training emails: 7936\n#no. of Sara training emails: 7884\n#training time: 309.46 s\n#predict time: 26.414 s\n#SVM accuracy: 0.9908987485779295\n#10th: 1, 26th: 0, 50th: 1\n#No. of predicted to be in the 'Chris'(1): 877\n\n########## ANSWER ACEPTED\n########## RESULTS c/ parametro \"\"clf = SVC(kernel=\"linear\", gamma=1.0)\"\"\n########## \n#no. of Chris training emails: 7936\n#no. of Sara training emails: 7884\n#training time: 432.789 s\n#predict time: 43.84 s\n#SVM accuracy: 0.9840728100113766\n#10th: 1, 26th: 0, 50th: 1\n#No. of predicted to be in the 'Chris'(1): 881\n\n########## \n########## RESULTS c/ parametro \"\"clf = SVC(kernel='linear')\"\"\n########## \n\n" ]
[ [ "sklearn.svm.SVC", "sklearn.metrics.accuracy_score" ] ]
gokhankuscu/CNN-layer-reuse
[ "1dd8d42bf58442c9530dd660fabe054d434008ed" ]
[ "models/mobilenet.py" ]
[ "'''MobileNet in PyTorch.\n\nSee the paper \"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications\" for more details.\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef conv_bn(inp, oup, stride):\n return nn.Sequential(\n nn.Conv2d(inp, oup, kernel_size=3, stride=stride, padding=1, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU(inplace=True)\n )\n\n\nclass Block(nn.Module):\n '''Depthwise conv + Pointwise conv'''\n def __init__(self, in_planes, out_planes, stride=1):\n super(Block, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn2 = nn.BatchNorm2d(out_planes)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n return out\n\n\nclass MobileNet(nn.Module):\n def __init__(self,\n num_classes=10,\n width_mult=1,\n init_ch=3):\n super(MobileNet, self).__init__()\n\n input_channel = 32\n last_channel = 1024\n input_channel = int(input_channel * width_mult)\n last_channel = int(last_channel * width_mult)\n cfg = [\n # c, n, s\n [64, 1, 2],\n [128, 2, 2],\n [256, 2, 1], # for CIFAR-10, stride is 1\n [512, 6, 2],\n [1024, 2, 1],\n ]\n\n self.features = [conv_bn(init_ch, input_channel, 1)]\n # building inverted residual blocks\n for c, n, s in cfg:\n output_channel = int(c * width_mult)\n for i in range(n):\n if i == 0:\n self.features.append(Block(input_channel, output_channel, s))\n else:\n self.features.append(Block(input_channel, output_channel, 1))\n input_channel = output_channel\n # make it nn.Sequential\n self.features = nn.Sequential(*self.features)\n\n # building classifier\n self.classifier = nn.Sequential(\n nn.Dropout(0.2),\n nn.Linear(last_channel, num_classes),\n )\n\n\n def forward(self, x):\n x = self.features(x)\n x = F.avg_pool2d(x, x.data.size()[-2:])\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n\nif __name__ == '__main__':\n net = MobileNet(num_classes=10, width_mult=1, init_ch=3)\n x = torch.randn(8, 3, 32, 32)\n out = net(x)\n print(out.shape)\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.randn", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
ilyankou/leeds-gis-python-practicals
[ "0d861ba88fe832201665353ff0afcac972e4b2e2" ]
[ "model/__main__.py" ]
[ "import random\nimport operator\nimport csv\nimport sys\n\nimport tkinter\nimport requests\nimport bs4\n\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot\nimport matplotlib.animation\n\nimport agentframework\n\n\ndef total_stored(agents):\n \"\"\"Calculate total stored by all agents and append to stored.txt file\"\"\"\n total_stored = 0\n for a in agents:\n total_stored += a.store\n\n with open('output/stored.txt', 'a') as f:\n f.write( \"{}\\n\".format( str(total_stored) ) )\n\n\ndef create_environment(path):\n \"\"\"\n Construct the environment form a CSV-like file given in `path`.\n Return a 2D list with floats that define the environment\n \"\"\"\n env = []\n with open(path, 'r') as csvfile:\n reader = csv.reader(csvfile)\n\n for row in reader:\n env.append( list( map(float, row) ) ) # Convert ints to floats for matplotlib\n\n return env\n\n\ndef save_environment(filename):\n \"\"\"Dump current environment into a CSV file given in `filename`\"\"\"\n with open(filename, 'w') as csvfile:\n writer = csv.writer(csvfile)\n\n for row in environment:\n writer.writerow(map(int, row))\n\n\nif __name__ == '__main__':\n\n # Read arguments from the Terminal\n assert len(sys.argv) >= 4, \"\"\"\n To run model.py, you need to specify at least 3 integer arguments:\n number of agents, number of iterations, and neighbourhood\n \"\"\"\n\n # Make sure arguments are integers\n try:\n num_of_agents, num_of_iterations, neighbourhood = map(int, sys.argv[1:4])\n show_visual = False if (len(sys.argv) == 5 and sys.argv[4] == 'nodisplay') else True\n\n except:\n print( \"Could not convert arguments into integers. Aborting.\" )\n sys.exit()\n\n # Construct the environment from csv\n environment = create_environment('in.txt')\n\n # Read the data from the web with BeautifulSoup\n data = requests.get('http://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html')\n soup = bs4.BeautifulSoup(data.text, 'html.parser')\n\n # Create a list of tuples of (y, x) coordinates\n ys = [int(y.text) for y in soup.find_all(attrs={'class': 'y'})]\n xs = [int(x.text) for x in soup.find_all(attrs={'class': 'x'})]\n coords = list( zip(ys, xs) )\n\n # Generate a list of agents\n agents = []\n for i in range(num_of_agents):\n y, x = coords[i] if i < len(coords) else (None, None)\n agents.append(agentframework.Agent(environment, agents, y, x))\n\n\n def single_iteration():\n \"\"\"\n Update all `agents` one step forward (move-eat-share steps).\n Shuffle list of agents to avoid model artifacts.\n \"\"\"\n random.shuffle(agents)\n\n # Process each agent (move-eat-share)\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat()\n agents[i].share_with_neighbours(neighbourhood)\n\n\n def update(x):\n \"\"\" Runs a single iteration and updates the frame \"\"\"\n single_iteration()\n\n # Display the new environment\n fig.clear()\n matplotlib.pyplot.imshow(environment)\n\n # Draw each agent on top of the new environment\n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x, agents[i].y)\n\n\n def run():\n \"\"\" Starts the animation (called by GUI) \"\"\"\n animation = matplotlib.animation.FuncAnimation(fig, update, frames=num_of_iterations, repeat=False)\n # Use canvas.draw(), not canvas.show(), to avoid\n # AttributeError: 'FigureCanvasTkAgg' object has no attribute 'show'\n canvas.draw()\n\n\n # Choose how to run the program: quietly with the output as text, or interactively\n if show_visual:\n\n # Build the main GUI window\n root = tkinter.Tk()\n root.wm_title(\"Environment Model\") # Set window's title\n\n # Add canvas to the GUI window\n fig = matplotlib.pyplot.figure(figsize=(7, 7))\n ax = fig.add_axes([0, 0, 300, 300])\n\n canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)\n canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n\n # Add menu\n menu = tkinter.Menu(root)\n root.config(menu=menu)\n model_menu = tkinter.Menu(menu)\n menu.add_cascade(label=\"Model\", menu=model_menu)\n model_menu.add_command(label=\"Run model\", command=run)\n\n # Wait for GUI events\n tkinter.mainloop() \n\n else:\n for i in range(num_of_iterations):\n single_iteration()\n\n # Output the sum the environment to `output/stored.txt`\n total_stored(agents)\n\n # Save the resulting environment\n save_environment('output/{}-{}-{}.txt'.format(\n num_of_agents,\n num_of_iterations,\n neighbourhood)\n )" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.scatter", "matplotlib.use", "matplotlib.pyplot.figure", "matplotlib.animation.FuncAnimation", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg" ] ]
rdocking/bcbio-nextgen
[ "858c4e02dbf0b03418a51741d62f7ab2dbcc8431" ]
[ "bcbio/qc/multiqc.py" ]
[ "\"\"\"High level summaries of samples and programs with MultiQC.\n\nhttps://github.com/ewels/MultiQC\n\"\"\"\nimport collections\nimport glob\nimport io\nimport json\nimport mimetypes\nimport os\nimport pandas as pd\nimport shutil\nimport numpy as np\nfrom collections import OrderedDict\n\nimport pybedtools\nimport six\nimport toolz as tz\nimport yaml\n\nfrom bcbio import utils\nfrom bcbio.cwl import cwlutils\nfrom bcbio.distributed.transaction import file_transaction, tx_tmpdir\nfrom bcbio.log import logger\nfrom bcbio.provenance import do, programs\nfrom bcbio.provenance import data as provenancedata\nfrom bcbio.pipeline import datadict as dd\nfrom bcbio.pipeline import config_utils\nfrom bcbio.bam import ref\nfrom bcbio.qc.qsignature import get_qsig_multiqc_files\nfrom bcbio.structural import annotate\nfrom bcbio.utils import walk_json\nfrom bcbio.variation import bedutils\nfrom bcbio.qc.variant import get_active_vcinfo\nfrom bcbio.upload import get_all_upload_paths_from_sample\nfrom bcbio.variation import coverage\nfrom bcbio.chipseq import atac\n\ndef summary(*samples):\n \"\"\"Summarize all quality metrics together\"\"\"\n samples = list(utils.flatten(samples))\n work_dir = dd.get_work_dir(samples[0])\n multiqc = config_utils.get_program(\"multiqc\", samples[0][\"config\"])\n if not multiqc:\n logger.debug(\"multiqc not found. Update bcbio_nextgen.py tools to fix this issue.\")\n out_dir = utils.safe_makedir(os.path.join(work_dir, \"qc\", \"multiqc\"))\n out_data = os.path.join(out_dir, \"multiqc_data\")\n out_file = os.path.join(out_dir, \"multiqc_report.html\")\n file_list = os.path.join(out_dir, \"list_files.txt\")\n work_samples = cwlutils.unpack_tarballs([utils.deepish_copy(x) for x in samples], samples[0])\n work_samples = _summarize_inputs(work_samples, out_dir)\n if not utils.file_exists(out_file):\n with tx_tmpdir(samples[0], work_dir) as tx_out:\n in_files = _get_input_files(work_samples, out_dir, tx_out)\n in_files += _merge_metrics(work_samples, out_dir)\n if _one_exists(in_files):\n with utils.chdir(out_dir):\n _create_config_file(out_dir, work_samples)\n input_list_file = _create_list_file(in_files, file_list)\n if dd.get_tmp_dir(samples[0]):\n export_tmp = \"export TMPDIR=%s && \" % dd.get_tmp_dir(samples[0])\n else:\n export_tmp = \"\"\n locale_export = utils.locale_export()\n path_export = utils.local_path_export()\n other_opts = config_utils.get_resources(\"multiqc\", samples[0][\"config\"]).get(\"options\", [])\n other_opts = \" \".join([str(x) for x in other_opts])\n cmd = (\"{path_export}{export_tmp}{locale_export} \"\n \"{multiqc} -f -l {input_list_file} {other_opts} -o {tx_out}\")\n do.run(cmd.format(**locals()), \"Run multiqc\")\n if utils.file_exists(os.path.join(tx_out, \"multiqc_report.html\")):\n shutil.move(os.path.join(tx_out, \"multiqc_report.html\"), out_file)\n shutil.move(os.path.join(tx_out, \"multiqc_data\"), out_data)\n samples = _group_by_sample_and_batch(samples)\n if utils.file_exists(out_file) and samples:\n data_files = set()\n for i, data in enumerate(samples):\n data_files.add(os.path.join(out_dir, \"report\", \"metrics\", dd.get_sample_name(data) + \"_bcbio.txt\"))\n data_files.add(os.path.join(out_dir, \"report\", \"metrics\", \"target_info.yaml\"))\n data_files.add(os.path.join(out_dir, \"multiqc_config.yaml\"))\n [data_files.add(f) for f in glob.glob(os.path.join(out_dir, \"multiqc_data\", \"*\"))]\n data_files = [f for f in data_files if f and utils.file_exists(f)]\n if \"summary\" not in samples[0]:\n samples[0][\"summary\"] = {}\n samples[0][\"summary\"][\"multiqc\"] = {\"base\": out_file, \"secondary\": data_files}\n\n data_json = os.path.join(out_dir, \"multiqc_data\", \"multiqc_data.json\")\n data_json_final = _save_uploaded_data_json(samples, data_json, os.path.join(out_dir, \"multiqc_data\"))\n if data_json_final:\n samples[0][\"summary\"][\"multiqc\"][\"secondary\"].append(data_json_final)\n\n # Prepare final file list and inputs for downstream usage\n file_list_final = _save_uploaded_file_list(samples, file_list, out_dir)\n if file_list_final:\n samples[0][\"summary\"][\"multiqc\"][\"secondary\"].append(file_list_final)\n if any([cwlutils.is_cwl_run(d) for d in samples]):\n for indir in [\"inputs\", \"report\"]:\n tarball = os.path.join(out_dir, \"multiqc-%s.tar.gz\" % (indir))\n if not utils.file_exists(tarball):\n with utils.chdir(out_dir):\n cmd = [\"tar\", \"-czvpf\", tarball, indir]\n do.run(cmd, \"Compress multiqc inputs: %s\" % indir)\n samples[0][\"summary\"][\"multiqc\"][\"secondary\"].append(tarball)\n\n if any([cwlutils.is_cwl_run(d) for d in samples]):\n samples = _add_versions(samples)\n\n return [[data] for data in samples]\n\ndef _add_versions(samples):\n \"\"\"Add tool and data versions to the summary.\n \"\"\"\n samples[0][\"versions\"] = {\"tools\": programs.write_versions(samples[0][\"dirs\"], samples[0][\"config\"]),\n \"data\": provenancedata.write_versions(samples[0][\"dirs\"], samples)}\n return samples\n\ndef _summarize_inputs(samples, out_dir):\n \"\"\"Summarize inputs for MultiQC reporting in display.\n \"\"\"\n logger.info(\"summarize target information\")\n if samples[0].get(\"analysis\", \"\").lower() in [\"variant\", \"variant2\"]:\n metrics_dir = utils.safe_makedir(os.path.join(out_dir, \"report\", \"metrics\"))\n samples = _merge_target_information(samples, metrics_dir)\n\n logger.info(\"summarize fastqc\")\n out_dir = utils.safe_makedir(os.path.join(out_dir, \"report\", \"fastqc\"))\n with utils.chdir(out_dir):\n _merge_fastqc(samples)\n\n preseq_samples = [s for s in samples if tz.get_in([\"config\", \"algorithm\", \"preseq\"], s)]\n if preseq_samples:\n logger.info(\"summarize preseq\")\n out_dir = utils.safe_makedir(os.path.join(out_dir, \"report\", \"preseq\"))\n with utils.chdir(out_dir):\n _merge_preseq(preseq_samples)\n return samples\n\ndef _save_uploaded_data_json(samples, data_json_work, out_dir):\n \"\"\" Fixes all absolute work-rooted paths to relative final-rooted paths\n \"\"\"\n if not utils.file_exists(data_json_work):\n return None\n\n upload_path_mapping = dict()\n for sample in samples:\n upload_path_mapping.update(get_all_upload_paths_from_sample(sample))\n if not upload_path_mapping:\n return data_json_work\n\n with io.open(data_json_work, encoding=\"utf-8\") as f:\n data = json.load(f, object_pairs_hook=OrderedDict)\n upload_base = samples[0][\"upload\"][\"dir\"]\n data = walk_json(data, lambda s: _work_path_to_rel_final_path(s, upload_path_mapping, upload_base))\n\n data_json_final = os.path.join(out_dir, \"multiqc_data_final.json\")\n with io.open(data_json_final, \"w\", encoding=\"utf-8\") as f:\n json.dump(data, f, indent=4)\n return data_json_final\n\ndef _save_uploaded_file_list(samples, file_list_work, out_dir):\n \"\"\" Fixes all absolute work-rooted paths to relative final-rooted paths\n\n For CWL, prepare paths relative to output directory.\n \"\"\"\n if not utils.file_exists(file_list_work):\n return None\n\n if any([cwlutils.is_cwl_run(d) for d in samples]):\n upload_paths = []\n with open(file_list_work) as f:\n for p in (l.strip() for l in f.readlines() if os.path.exists(l.strip())):\n if p.startswith(out_dir):\n upload_paths.append(p.replace(out_dir + \"/\", \"\"))\n else:\n upload_path_mapping = dict()\n for sample in samples:\n upload_path_mapping.update(get_all_upload_paths_from_sample(sample))\n if not upload_path_mapping:\n return None\n\n with open(file_list_work) as f:\n paths = [l.strip() for l in f.readlines() if os.path.exists(l.strip())]\n upload_paths = [p for p in [\n _work_path_to_rel_final_path(path, upload_path_mapping, samples[0][\"upload\"][\"dir\"])\n for path in paths\n ] if p]\n if not upload_paths:\n return None\n\n file_list_final = os.path.join(out_dir, \"list_files_final.txt\")\n with open(file_list_final, \"w\") as f:\n for path in upload_paths:\n f.write(path + '\\n')\n return file_list_final\n\ndef _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir):\n \"\"\" Check if `path` is a work-rooted path, and convert to a relative final-rooted path\n \"\"\"\n if not path or not isinstance(path, str):\n return path\n upload_path = None\n\n # First, check in the mapping: if it's there is a direct reference and\n # it's a file, we immediately return it (saves lots of iterations)\n if upload_path_mapping.get(path) is not None and os.path.isfile(path):\n upload_path = upload_path_mapping[path]\n else:\n # Not a file: check for elements in the mapping that contain\n # it\n paths_to_check = [key for key in upload_path_mapping\n if path.startswith(key)]\n\n if paths_to_check:\n for work_path in paths_to_check:\n if os.path.isdir(work_path):\n final_path = upload_path_mapping[work_path]\n upload_path = path.replace(work_path, final_path)\n break\n\n if upload_path is not None:\n return os.path.relpath(upload_path, upload_base_dir)\n else:\n return None\n\ndef _one_exists(input_files):\n \"\"\"\n at least one file must exist for multiqc to run properly\n \"\"\"\n for f in input_files:\n if os.path.exists(f):\n return True\n return False\n\ndef _get_input_files(samples, base_dir, tx_out_dir):\n \"\"\"Retrieve input files, keyed by sample and QC method name.\n\n Stages files into the work directory to ensure correct names for\n MultiQC sample assessment when running with CWL.\n \"\"\"\n in_files = collections.defaultdict(list)\n for data in samples:\n sum_qc = tz.get_in([\"summary\", \"qc\"], data, {})\n if sum_qc in [None, \"None\"]:\n sum_qc = {}\n elif isinstance(sum_qc, six.string_types):\n sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc}\n elif not isinstance(sum_qc, dict):\n raise ValueError(\"Unexpected summary qc: %s\" % sum_qc)\n for program, pfiles in sum_qc.items():\n if isinstance(pfiles, dict):\n pfiles = [pfiles[\"base\"]] + pfiles.get(\"secondary\", [])\n # CWL: presents output files as single file plus associated secondary files\n elif isinstance(pfiles, six.string_types):\n if os.path.exists(pfiles):\n pfiles = [os.path.join(basedir, f) for basedir, subdir, filenames in os.walk(os.path.dirname(pfiles)) for f in filenames]\n else:\n pfiles = []\n in_files[(dd.get_sample_name(data), program)].extend(pfiles)\n staged_files = []\n for (sample, program), files in in_files.items():\n cur_dir = utils.safe_makedir(os.path.join(base_dir, \"inputs\", sample, program))\n for f in files:\n if _check_multiqc_input(f) and _is_good_file_for_multiqc(f):\n if _in_temp_directory(f) or any([cwlutils.is_cwl_run(d) for d in samples]):\n staged_f = os.path.join(cur_dir, os.path.basename(f))\n shutil.copy(f, staged_f)\n staged_files.append(staged_f)\n else:\n staged_files.append(f)\n staged_files.extend(get_qsig_multiqc_files(samples))\n # Back compatible -- to migrate to explicit specifications in input YAML\n if not any([cwlutils.is_cwl_run(d) for d in samples]):\n staged_files += [\"trimmed\", \"htseq-count/*summary\"]\n # Add in created target_info file\n if os.path.isfile(os.path.join(base_dir, \"report\", \"metrics\", \"target_info.yaml\")):\n staged_files += [os.path.join(base_dir, \"report\", \"metrics\", \"target_info.yaml\")]\n return sorted(list(set(staged_files)))\n\ndef _in_temp_directory(f):\n return any(x.startswith(\"tmp\") for x in f.split(\"/\"))\n\ndef _get_batches(data):\n batches = dd.get_batch(data) or dd.get_sample_name(data)\n if not isinstance(batches, (list, tuple)):\n batches = [batches]\n return batches\n\ndef _group_by_sample_and_batch(samples):\n \"\"\"Group samples split by QC method back one per sample-batch.\n \"\"\"\n out = collections.defaultdict(list)\n for data in samples:\n out[(dd.get_sample_name(data), dd.get_align_bam(data), tuple(_get_batches(data)))].append(data)\n return [xs[0] for xs in out.values()]\n\ndef _create_list_file(paths, out_file):\n with open(out_file, \"w\") as f:\n for path in paths:\n f.write(path + '\\n')\n return out_file\n\ndef _create_config_file(out_dir, samples):\n \"\"\"Provide configuration file hiding duplicate columns.\n\n Future entry point for providing top level configuration of output reports.\n \"\"\"\n out_file = os.path.join(out_dir, \"multiqc_config.yaml\")\n out = {\"table_columns_visible\": dict()}\n\n # Avoid duplicated bcbio columns with qualimap\n if any((\"qualimap\" in dd.get_tools_on(d) or \"qualimap_full\" in dd.get_tools_on(d)) for d in samples):\n # Hiding metrics duplicated by Qualimap\n out[\"table_columns_visible\"][\"bcbio\"] = {\"Average_insert_size\": False}\n out[\"table_columns_visible\"][\"FastQC\"] = {\"percent_gc\": False}\n\n # Setting up thresholds for Qualimap depth cutoff calculations, based on sample avg depths\n avg_depths = [tz.get_in([\"summary\", \"metrics\", \"Avg_coverage\"], s) for s in samples]\n avg_depths = [x for x in avg_depths if x]\n # Picking all thresholds up to the highest sample average depth\n thresholds = [t for t in coverage.DEPTH_THRESHOLDS if not avg_depths or t <= max(avg_depths)]\n # ...plus one more\n if len(thresholds) < len(coverage.DEPTH_THRESHOLDS):\n thresholds.append(coverage.DEPTH_THRESHOLDS[len(thresholds)])\n\n # Showing only thresholds surrounding any of average depths\n thresholds_hidden = []\n for i, t in enumerate(thresholds):\n if t > 20: # Not hiding anything below 20x\n if any(thresholds[i-1] <= c < thresholds[i] for c in avg_depths if c and i-1 >= 0) or \\\n any(thresholds[i] <= c < thresholds[i+1] for c in avg_depths if c and i+1 < len(thresholds)):\n pass\n else:\n thresholds_hidden.append(t)\n\n # Hide coverage unless running full qualimap, downsampled inputs are confusing\n if not any((\"qualimap_full\" in dd.get_tools_on(d)) for d in samples):\n thresholds_hidden = thresholds + thresholds_hidden\n thresholds_hidden.sort()\n thresholds = []\n out['qualimap_config'] = {\n 'general_stats_coverage': [str(t) for t in thresholds],\n 'general_stats_coverage_hidden': [str(t) for t in thresholds_hidden]}\n\n # Avoid confusing peddy outputs, sticking to ancestry and sex prediction\n out[\"table_columns_visible\"][\"Peddy\"] = {\"family_id\": False, \"sex_het_ratio\": False,\n \"error_sex_check\": False}\n\n # Setting the module order\n module_order = []\n module_order.extend([\n \"bcbio\",\n \"samtools\",\n \"goleft_indexcov\",\n \"peddy\"\n ])\n out['bcftools'] = {'write_separate_table': True}\n # if germline calling was performed:\n if any(\"germline\" in (get_active_vcinfo(s) or {}) or # tumor-only somatic with germline extraction\n dd.get_phenotype(s) == \"germline\" or # or paired somatic with germline calling for normal\n _has_bcftools_germline_stats(s) # CWL organized statistics\n for s in samples):\n # Split somatic and germline variant stats into separate multiqc submodules,\n # with somatic going into General Stats, and germline going into a separate table:\n module_order.extend([{\n 'bcftools': {\n 'name': 'Bcftools (somatic)',\n 'info': 'Bcftools stats for somatic variant calls only.',\n 'path_filters': ['*_bcftools_stats.txt'],\n 'write_general_stats': True,\n }},\n {'bcftools': {\n 'name': 'Bcftools (germline)',\n 'info': 'Bcftools stats for germline variant calls only.',\n 'path_filters': ['*_bcftools_stats_germline.txt'],\n 'write_general_stats': False\n }},\n ])\n else:\n module_order.append(\"bcftools\")\n module_order.extend([\n \"salmon\",\n \"star\",\n \"picard\",\n \"qualimap\",\n \"snpeff\",\n \"fastqc\",\n \"preseq\",\n ])\n out[\"module_order\"] = module_order\n\n preseq_samples = [s for s in samples if tz.get_in([\"config\", \"algorithm\", \"preseq\"], s)]\n if preseq_samples:\n out[\"preseq\"] = _make_preseq_multiqc_config(preseq_samples)\n\n with open(out_file, \"w\") as out_handle:\n yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)\n return out_file\n\ndef _has_bcftools_germline_stats(data):\n \"\"\"Check for the presence of a germline stats file, CWL compatible.\n \"\"\"\n stats_file = tz.get_in([\"summary\", \"qc\"], data)\n if isinstance(stats_file, dict):\n stats_file = tz.get_in([\"variants\", \"base\"], stats_file)\n if not stats_file:\n stats_file = \"\"\n return stats_file.find(\"bcftools_stats_germline\") > 0\n\ndef _check_multiqc_input(path):\n \"\"\"Check if file exists, and return empty if it doesn't\"\"\"\n if utils.file_exists(path):\n return path\n\n# ## report and coverage\n\ndef _is_good_file_for_multiqc(fpath):\n \"\"\"Returns False if the file is binary or image.\"\"\"\n # Use mimetypes to exclude binary files where possible\n (ftype, encoding) = mimetypes.guess_type(fpath)\n if encoding is not None:\n return False\n if ftype is not None and ftype.startswith('image'):\n return False\n return True\n\ndef _parse_disambiguate(disambiguatestatsfilename):\n \"\"\"Parse disambiguation stats from given file.\n \"\"\"\n disambig_stats = [0, 0, 0]\n with open(disambiguatestatsfilename, \"r\") as in_handle:\n for i, line in enumerate(in_handle):\n fields = line.strip().split(\"\\t\")\n if i == 0:\n assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']\n else:\n disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]\n return disambig_stats\n\ndef _add_disambiguate(sample):\n # check if disambiguation was run\n if \"disambiguate\" in sample:\n if utils.file_exists(sample[\"disambiguate\"][\"summary\"]):\n disambigStats = _parse_disambiguate(sample[\"disambiguate\"][\"summary\"])\n sample[\"summary\"][\"metrics\"][\"Disambiguated %s reads\" % str(sample[\"genome_build\"])] = disambigStats[0]\n disambigGenome = (sample[\"config\"][\"algorithm\"][\"disambiguate\"][0]\n if isinstance(sample[\"config\"][\"algorithm\"][\"disambiguate\"], (list, tuple))\n else sample[\"config\"][\"algorithm\"][\"disambiguate\"])\n sample[\"summary\"][\"metrics\"][\"Disambiguated %s reads\" % disambigGenome] = disambigStats[1]\n sample[\"summary\"][\"metrics\"][\"Disambiguated ambiguous reads\"] = disambigStats[2]\n return sample\n\ndef _add_atac(sample):\n atac_metrics = atac.calculate_encode_complexity_metrics(sample)\n if not atac_metrics:\n return sample\n sample[\"summary\"][\"metrics\"] = tz.merge(atac_metrics, sample[\"summary\"][\"metrics\"])\n return sample\n\ndef _fix_duplicated_rate(dt):\n \"\"\"Get RNA duplicated rate if exists and replace by samtools metric\"\"\"\n if \"Duplication_Rate_of_Mapped\" in dt:\n dt[\"Duplicates_pct\"] = 100.0 * dt[\"Duplication_Rate_of_Mapped\"]\n return dt\n\ndef _merge_metrics(samples, out_dir):\n \"\"\"Merge metrics from multiple QC steps\n \"\"\"\n logger.info(\"summarize metrics\")\n out_dir = utils.safe_makedir(os.path.join(out_dir, \"report\", \"metrics\"))\n sample_metrics = collections.defaultdict(dict)\n for s in samples:\n s = _add_disambiguate(s)\n s = _add_atac(s)\n m = tz.get_in(['summary', 'metrics'], s)\n if isinstance(m, six.string_types):\n m = json.loads(m)\n if m:\n for me in list(m.keys()):\n if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple):\n m.pop(me, None)\n sample_metrics[dd.get_sample_name(s)].update(m)\n out = []\n for sample_name, m in sample_metrics.items():\n sample_file = os.path.join(out_dir, \"%s_bcbio.txt\" % sample_name)\n with file_transaction(samples[0], sample_file) as tx_out_file:\n dt = pd.DataFrame(m, index=['1'])\n dt.columns = [k.replace(\" \", \"_\").replace(\"(\", \"\").replace(\")\", \"\") for k in dt.columns]\n dt['sample'] = sample_name\n dt['rRNA_rate'] = m.get('rRNA_rate', \"NA\")\n dt['RiP_pct'] = \"%.3f\" % (int(m.get(\"RiP\", 0)) / float(m.get(\"Total_reads\", 1)) * 100)\n dt = _fix_duplicated_rate(dt)\n dt.transpose().to_csv(tx_out_file, sep=\"\\t\", header=False)\n out.append(sample_file)\n return out\n\ndef _merge_fastqc(samples):\n \"\"\"\n merge all fastqc samples into one by module\n \"\"\"\n fastqc_list = collections.defaultdict(list)\n seen = set()\n for data in samples:\n name = dd.get_sample_name(data)\n if name in seen:\n continue\n seen.add(name)\n fns = glob.glob(os.path.join(dd.get_work_dir(data), \"qc\", dd.get_sample_name(data), \"fastqc\") + \"/*\")\n for fn in fns:\n if fn.endswith(\"tsv\"):\n metric = os.path.basename(fn)\n fastqc_list[metric].append([name, fn])\n\n for metric in fastqc_list:\n dt_by_sample = []\n for fn in fastqc_list[metric]:\n dt = pd.read_csv(fn[1], sep=\"\\t\")\n dt['sample'] = fn[0]\n dt_by_sample.append(dt)\n dt = utils.rbind(dt_by_sample)\n dt.to_csv(metric, sep=\"\\t\", index=False, mode ='w')\n return samples\n\ndef _merge_preseq(samples):\n metrics = [utils.get_in(s, (\"summary\", \"metrics\")) for s in samples]\n real_counts_file = os.path.abspath(os.path.join(\"preseq_real_counts.txt\"))\n with file_transaction(samples[0], real_counts_file) as tx_out_file:\n with open(tx_out_file, \"w\") as f:\n for s, m in zip(samples, metrics):\n line = dd.get_sample_name(s) + \"\\t\" + str(m[\"Preseq_read_count\"])\n if m.get(\"Preseq_unique_count\") is not None:\n line += \"\\t\" + str(m[\"Preseq_unique_count\"])\n line += \"\\n\"\n f.write(line)\n samples[0][\"summary\"][\"qc\"][\"preseq\"][\"secondary\"] = [real_counts_file]\n\ndef _make_preseq_multiqc_config(samples):\n metrics = [utils.get_in(s, (\"summary\", \"metrics\")) for s in samples]\n out = {\"read_length\": float(np.median([m[\"Preseq_read_length\"] for m in metrics]))}\n\n genome_sizes = list(set(m[\"Preseq_genome_size\"] for m in metrics))\n if len(genome_sizes) == 1:\n out[\"genome_size\"] = genome_sizes[0]\n\n return out\n\ndef _merge_target_information(samples, metrics_dir):\n out_file = os.path.abspath(os.path.join(metrics_dir, \"target_info.yaml\"))\n if utils.file_exists(out_file):\n return samples\n\n genomes = set(dd.get_genome_build(data) for data in samples)\n coverage_beds = set(dd.get_coverage(data) for data in samples)\n original_variant_regions = set(dd.get_variant_regions_orig(data) for data in samples)\n\n data = samples[0]\n info = {}\n\n # Reporting in MultiQC only if the genome is the same across all samples\n if len(genomes) == 1:\n info[\"genome_info\"] = {\n \"name\": dd.get_genome_build(data),\n \"size\": sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data[\"config\"])]),\n }\n\n # Reporting in MultiQC only if the target is the same across all samples\n vcr_orig = None\n if len(original_variant_regions) == 1 and list(original_variant_regions)[0] is not None:\n vcr_orig = list(original_variant_regions)[0]\n vcr_clean = bedutils.clean_file(vcr_orig, data)\n info[\"variants_regions_info\"] = {\n \"bed\": vcr_orig,\n \"size\": sum(len(x) for x in pybedtools.BedTool(dd.get_variant_regions_merged(data))),\n \"regions\": pybedtools.BedTool(vcr_clean).count(),\n }\n gene_num = annotate.count_genes(vcr_clean, data)\n if gene_num is not None:\n info[\"variants_regions_info\"][\"genes\"] = gene_num\n else:\n info[\"variants_regions_info\"] = {\n \"bed\": \"callable regions\",\n }\n # Reporting in MultiQC only if the target is the same across samples\n if len(coverage_beds) == 1:\n cov_bed = list(coverage_beds)[0]\n if cov_bed not in [None, \"None\"]:\n if vcr_orig and vcr_orig == cov_bed:\n info[\"coverage_bed_info\"] = info[\"variants_regions_info\"]\n else:\n clean_bed = bedutils.clean_file(cov_bed, data, prefix=\"cov-\", simple=True)\n info[\"coverage_bed_info\"] = {\n \"bed\": cov_bed,\n \"size\": pybedtools.BedTool(cov_bed).total_coverage(),\n \"regions\": pybedtools.BedTool(clean_bed).count(),\n }\n gene_num = annotate.count_genes(clean_bed, data)\n if gene_num is not None:\n info[\"coverage_bed_info\"][\"genes\"] = gene_num\n else:\n info[\"coverage_bed_info\"] = info[\"variants_regions_info\"]\n\n coverage_intervals = set(data[\"config\"][\"algorithm\"][\"coverage_interval\"] for data in samples)\n if len(coverage_intervals) == 1:\n info[\"coverage_interval\"] = list(coverage_intervals)[0]\n\n if info:\n with open(out_file, \"w\") as out_handle:\n yaml.safe_dump(info, out_handle)\n\n return samples\n" ]
[ [ "numpy.median", "pandas.read_csv", "pandas.DataFrame" ] ]
csukuangfj/lhotse
[ "9b12055ca75718914c5457b33e498d1c8e8b86d8" ]
[ "lhotse/dataset/vis.py" ]
[ "from typing import Any, Mapping\n\n\ndef plot_batch(batch: Mapping[str, Any], supervisions: bool = True, text=True):\n import matplotlib.pyplot as plt\n\n batch_size = _get_one_of(batch, 'features', 'audio', 'inputs').shape[0]\n fig, axes = plt.subplots(batch_size, figsize=(16, batch_size), sharex=True)\n\n def _plot_features(key):\n feats = batch[key]\n feat_actors = []\n for idx in range(batch_size):\n feat_actors.append(axes[idx].imshow(feats[idx].numpy().transpose()))\n fig.tight_layout(h_pad=2)\n fig.colorbar(feat_actors[-1], ax=axes)\n\n if 'features' in batch:\n _plot_features(key='features')\n\n if 'audio' in batch:\n raise NotImplementedError(\"Plotting audio for batches is not supported yet.\")\n\n if 'inputs' in batch:\n # For now, assume it is features and not multi-channel audio...\n if len(batch['inputs'].shape) == 3:\n _plot_features(key='inputs')\n else:\n raise NotImplementedError(\"We could not infer what does the key 'inputs' represent yet.\")\n\n if supervisions and 'supervisions' in batch:\n sups = batch['supervisions']\n for idx in range(len(sups['sequence_idx'])):\n seq_idx = sups['sequence_idx'][idx]\n if all(k in sups for k in ('start_frame', 'num_frames')):\n start, end = sups['start_frame'][idx], sups['start_frame'][idx] + sups['num_frames'][idx]\n elif all(k in sups for k in ('start_sample', 'num_samples')):\n start, end = sups['start_sample'][idx], sups['start_sample'][idx] + sups['num_samples'][idx]\n else:\n raise ValueError(\n \"Cannot plot supervisions: missing 'start_frame/sample' and 'num_frames/samples' fields.\"\n )\n axes[seq_idx].axvspan(start, end, fill=False, edgecolor='red', linestyle='--', linewidth=4)\n if text and 'text' in sups:\n axes[seq_idx].text(start, -3, sups['text'][idx])\n\n\ndef _get_one_of(d, *keys):\n for k in keys:\n if k in d:\n return d[k]\n" ]
[ [ "matplotlib.pyplot.subplots" ] ]
WeChatCV/up-detr
[ "1d953f2c3c9e3343dea4fb128046488869f87709" ]
[ "datasets/selfdet.py" ]
[ "# ------------------------------------------------------------------------\n# UP-DETR\n# Copyright (c) Tencent, Inc. and its affiliates. All Rights Reserved.\n# ------------------------------------------------------------------------\nfrom torch.utils.data import Dataset\nimport os\nfrom PIL import Image\nimport torch\nimport numpy as np\nimport datasets.transforms as T\nfrom torchvision.transforms import transforms\nfrom PIL import ImageFilter\nimport random\n\ndef get_random_patch_from_img(img, min_pixel=8):\n \"\"\"\n :param img: original image\n :param min_pixel: min pixels of the query patch\n :return: query_patch,x,y,w,h\n \"\"\"\n w, h = img.size\n min_w, max_w = min_pixel, w - min_pixel\n min_h, max_h = min_pixel, h - min_pixel\n sw, sh = np.random.randint(min_w, max_w + 1), np.random.randint(min_h, max_h + 1)\n x, y = np.random.randint(w - sw) if sw != w else 0, np.random.randint(h - sh) if sh != h else 0\n patch = img.crop((x, y, x + sw, y + sh))\n return patch, x, y, sw, sh\n\n\nclass SelfDet(Dataset):\n \"\"\"\n SelfDet is a dataset class which implements random query patch detection.\n It randomly crops patches as queries from the given image with the corresponding bounding box.\n The format of the bounding box is same to COCO.\n \"\"\"\n def __init__(self, root, detection_transform, query_transform, num_patches=10):\n super(SelfDet, self).__init__()\n self.root = root\n self.detection_transform = detection_transform\n self.query_transform = query_transform\n self.files = []\n self.num_patches = num_patches\n for (troot, _, files) in os.walk(root, followlinks=True):\n for f in files:\n path = os.path.join(troot, f)\n self.files.append(path)\n print(f'num of files:{len(self.files)}')\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, item):\n img_path = self.files[item]\n img = Image.open(img_path).convert(\"RGB\")\n w, h = img.size\n # the format of the dataset is same with COCO.\n target = {'orig_size': torch.as_tensor([int(h), int(w)]), 'size': torch.as_tensor([int(h), int(w)])}\n iscrowd = []\n labels = []\n boxes = []\n area = []\n patches = []\n while len(area) < self.num_patches:\n patch, x, y, sw, sh = get_random_patch_from_img(img)\n boxes.append([x, y, x + sw, y + sh])\n area.append(sw * sh)\n iscrowd.append(0)\n labels.append(1)\n patches.append(self.query_transform(patch))\n target['iscrowd'] = torch.tensor(iscrowd)\n target['labels'] = torch.tensor(labels)\n target['boxes'] = torch.tensor(boxes)\n target['area'] = torch.tensor(area)\n img, target = self.detection_transform(img, target)\n return img, torch.stack(patches, dim=0), target\n\n\ndef make_self_det_transforms(image_set):\n normalize = T.Compose([\n T.ToTensor(),\n T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n # The image of ImageNet is relatively small.\n scales = [320, 336, 352, 368, 400, 416, 432, 448, 464, 480]\n\n if image_set == 'train':\n return T.Compose([\n # T.RandomHorizontalFlip(), HorizontalFlip may cause the pretext too difficult, so we remove it\n T.RandomResize(scales, max_size=600),\n normalize,\n ])\n\n if image_set == 'val':\n return T.Compose([\n T.RandomResize([480], max_size=600),\n normalize,\n ])\n\n raise ValueError(f'unknown {image_set}')\n\n\n\n\nclass GaussianBlur(object):\n \"\"\"Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709\"\"\"\n\n def __init__(self, sigma=[.1, 2.]):\n self.sigma = sigma\n\n def __call__(self, x):\n sigma = random.uniform(self.sigma[0], self.sigma[1])\n x = x.filter(ImageFilter.GaussianBlur(radius=sigma))\n return x\n\n\ndef get_query_transforms(image_set):\n if image_set == 'train':\n # SimCLR style augmentation\n return transforms.Compose([\n transforms.Resize((128, 128)),\n transforms.RandomApply([\n transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened\n ], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),\n transforms.ToTensor(),\n # transforms.RandomHorizontalFlip(), HorizontalFlip may cause the pretext too difficult, so we remove it\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n if image_set == 'val':\n return transforms.Compose([\n transforms.Resize((128, 128)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n raise ValueError(f'unknown {image_set}')\n\n\ndef build_selfdet(image_set, args):\n return SelfDet(args.imagenet_path, detection_transform=make_self_det_transforms(image_set),\n query_transform=get_query_transforms(image_set), num_patches=args.num_patches)\n" ]
[ [ "torch.stack", "torch.tensor", "numpy.random.randint" ] ]
LukePeltier/cassiopeia
[ "3abdc3e6aab69996344a05da6212f83070439dd0" ]
[ "cassiopeia/core/match.py" ]
[ "import functools\nimport arrow\nimport datetime\nimport itertools\nfrom collections import Counter\nfrom typing import List, Dict, Union, Generator\n\nfrom datapipelines import NotFoundError\nfrom merakicommons.cache import lazy, lazy_property\nfrom merakicommons.container import searchable, SearchableList, SearchableLazyList, SearchableDictionary\n\nfrom .. import configuration\nfrom .staticdata import Versions\nfrom ..data import Region, Platform, Continent, Tier, GameType, GameMode, MatchType, Queue, Side, Season, Lane, Role, Key, SummonersRiftArea, Tower\nfrom .common import CoreData, CoreDataList, CassiopeiaObject, CassiopeiaGhost, CassiopeiaLazyList, ghost_load_on\nfrom ..dto import match as dto\nfrom .patch import Patch\nfrom .summoner import Summoner\nfrom .staticdata.champion import Champion\nfrom .staticdata.rune import Rune\nfrom .staticdata.summonerspell import SummonerSpell\nfrom .staticdata.item import Item\nfrom .staticdata.map import Map\n\n\ndef load_match_on_attributeerror(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n try:\n return method(self, *args, **kwargs)\n except AttributeError: # teamId\n # The match has only partially loaded this participant and it doesn't have all it's data, so load the full match\n match = getattr(self, \"_{}__match\".format(self.__class__.__name__))\n if not match._Ghost__is_loaded(MatchData):\n match.__load__(MatchData)\n match._Ghost__set_loaded(MatchData)\n if isinstance(self, Participant):\n old_participant = self\n elif isinstance(self, ParticipantStats):\n old_participant = getattr(self, \"_{}__participant\".format(self.__class__.__name__))\n else:\n raise RuntimeError(\"Impossible!\")\n for participant in match.participants:\n if participant.summoner.name == old_participant.summoner.name:\n if isinstance(self, Participant):\n self._data[ParticipantData] = participant._data[ParticipantData]\n elif isinstance(self, ParticipantStats):\n self._data[ParticipantStatsData] = participant.stats._data[ParticipantStatsData]\n return method(self, *args, **kwargs)\n return method(self, *args, **kwargs)\n return wrapper\n\n\n_staticdata_to_version_mapping = {}\ndef _choose_staticdata_version(match):\n # If we want to pull the data for the correct version, we need to pull the entire match data.\n # However, we can use the creation date (which comes with a matchref) and get the ~ patch and therefore extract the version from the patch.\n if configuration.settings.version_from_match is None or configuration.settings.version_from_match == \"latest\":\n return None # Rather than pick the latest version here, let the obj handle it so it knows which endpoint within the realms data to use\n\n if configuration.settings.version_from_match == \"version\" or hasattr(match._data[MatchData], \"version\"):\n majorminor = match.patch.major + \".\" + match.patch.minor\n elif configuration.settings.version_from_match == \"patch\":\n patch = Patch.from_date(match.creation, region=match.region)\n majorminor = patch.majorminor\n else:\n raise ValueError(\"Unknown value for setting `version_from_match`:\", configuration.settings.version_from_match)\n\n try:\n version = _staticdata_to_version_mapping[majorminor]\n except KeyError:\n if int(match.patch.major) >= 10:\n versions = Versions(region=match.region)\n # use the first major.minor.x matching occurrence from the versions list\n version = next(x for x in versions if \".\".join(x.split(\".\")[:2]) == majorminor)\n else:\n version = majorminor + \".1\" # use major.minor.1\n _staticdata_to_version_mapping[majorminor] = version\n return version\n\n##############\n# Data Types #\n##############\n\n\nclass MatchListData(CoreDataList):\n _dto_type = dto.MatchListDto\n _renamed = {\"champion\": \"championIds\", \"queue\": \"queues\", \"season\": \"seasons\"}\n\n\nclass PositionData(CoreData):\n _renamed = {}\n\n\nclass EventData(CoreData):\n _renamed = {\"eventType\": \"type\", \"teamId\": \"side\", \"pointCaptured\": \"capturedPoint\", \"assistingParticipantIds\": \"assistingParticipants\", \"skillSlot\": \"skill\"}\n\n def __call__(self, **kwargs):\n if \"position\" in kwargs:\n self.position = PositionData(**kwargs.pop(\"position\"))\n super().__call__(**kwargs)\n return self\n\n\nclass ParticipantFrameData(CoreData):\n _renamed = {\"totalGold\": \"goldEarned\", \"minionsKilled\": \"creepScore\", \"xp\": \"experience\", \"jungleMinionsKilled\": \"neutralMinionsKilled\"}\n\n def __call__(self, **kwargs):\n if \"position\" in kwargs:\n self.position = PositionData(**kwargs.pop(\"position\"))\n super().__call__(**kwargs)\n return self\n\n\nclass FrameData(CoreData):\n _renamed = {}\n\n def __call__(self, **kwargs):\n if \"events\" in kwargs:\n self.events = [EventData(**event) for event in kwargs.pop(\"events\")]\n if \"participantFrames\" in kwargs:\n self.participantFrames = {int(key): ParticipantFrameData(**pframe) for key, pframe in kwargs.pop(\"participantFrames\").items()}\n super().__call__(**kwargs)\n return self\n\n\nclass TimelineData(CoreData):\n _dto_type = dto.TimelineDto\n _renamed = {\"matchId\": \"id\", \"frameInterval\": \"frame_interval\"}\n\n def __call__(self, **kwargs):\n if \"frames\" in kwargs:\n self.frames = [FrameData(**frame) for frame in kwargs.pop(\"frames\")]\n super().__call__(**kwargs)\n return self\n\n\nclass ParticipantTimelineData(CoreData):\n _renamed = {\"participantId\": \"id\"}\n\n def __call__(self, **kwargs):\n #timeline.setCreepScore(getStatTotals(item.getCreepsPerMinDeltas(), durationInSeconds));\n #timeline.setCreepScoreDifference(getStatTotals(item.getCsDiffPerMinDeltas(), durationInSeconds));\n #timeline.setDamageTaken(getStatTotals(item.getDamageTakenPerMinDeltas(), durationInSeconds));\n #timeline.setDamageTakenDifference(getStatTotals(item.getDamageTakenDiffPerMinDeltas(), durationInSeconds));\n #timeline.setExperience(getStatTotals(item.getXpPerMinDeltas(), durationInSeconds));\n #timeline.setExperienceDifference(getStatTotals(item.getXpDiffPerMinDeltas(), durationInSeconds));\n super().__call__(**kwargs)\n return self\n\n\nclass ParticipantStatsData(CoreData):\n _renamed = {}\n\n\nclass ParticipantData(CoreData):\n _renamed = {\"summoner1Id\": \"summonerSpellDId\", \"summoner2Id\": \"summonerSpellFId\", \"bot\": \"isBot\", \"profileIcon\": \"profileIconId\", \"gameEndedInEarlySurrender\": \"endedInEarlySurrender\", \"gameEndedInSurrender\": \"endedInSurrender\"}\n\n def __call__(self, **kwargs):\n perks = kwargs.pop(\"perks\", {})\n stat_perks = perks.pop(\"statPerks\", {})\n # We're going to drop some info about the perks here because that info is already available from the static data\n styles = perks.pop(\"styles\", [])\n selections = list(itertools.chain(*[s.get(\"selections\", []) for s in styles]))\n self.perks = {s[\"perk\"]: [s.pop(\"var1\"), s.pop(\"var2\"), s.pop(\"var3\")] for s in selections}\n self.stat_perks = stat_perks\n non_stats = {\n \"championId\": kwargs.get(\"championId\", None),\n \"championName\": kwargs.get(\"championName\", None),\n \"gameEndedInEarlySurrender\": kwargs.get(\"gameEndedInEarlySurrender\", None),\n \"gameEndedInSurrender\": kwargs.get(\"gameEndedInSurrender\", None),\n \"individualPosition\": kwargs.get(\"individualPosition\", None),\n \"participantId\": kwargs.get(\"participantId\", None),\n \"profileIcon\": kwargs.get(\"profileIcon\", None),\n \"puuid\": kwargs.get(\"puuid\", None),\n \"riotIdName\": kwargs.get(\"riotIdName\", None),\n \"riotIdTagLine\": kwargs.get(\"riotIdTagline\", None),\n \"summoner1Id\": kwargs.get(\"summoner1Id\", None),\n \"summoner2Id\": kwargs.get(\"summoner2Id\", None),\n \"summonerId\": kwargs.get(\"summonerId\", None),\n \"summonerLevel\": kwargs.pop(\"summonerLevel\", None),\n \"summonerName\": kwargs.get(\"summonerName\", None),\n \"teamEarlySurrendered\": kwargs.get(\"teamEarlySurrendered\", None),\n \"teamId\": kwargs.get(\"teamId\", None),\n \"teamPosition\": kwargs.get(\"teamPosition\", None),\n }\n stats = {\n \"assists\": kwargs.pop(\"assists\", None),\n \"baronKills\": kwargs.pop(\"baronKills\", None),\n \"bountyLevel\": kwargs.pop(\"bountyLevel\", None),\n \"champExperience\": kwargs.pop(\"champExperience\", None),\n \"champLevel\": kwargs.pop(\"champLevel\", None),\n \"championTransform\": kwargs.pop(\"championTransform\", None),\n \"consumablesPurchased\": kwargs.pop(\"consumablesPurchased\", None),\n \"damageDealtToBuildings\": kwargs.pop(\"damageDealtToBuildings\", None),\n \"damageDealtToObjectives\": kwargs.pop(\"damageDealtToObjectives\", None),\n \"damageDealtToTurrets\": kwargs.pop(\"damageDealtToTurrets\", None),\n \"damageSelfMitigated\": kwargs.pop(\"damageSelfMitigated\", None),\n \"deaths\": kwargs.pop(\"deaths\", None),\n \"detectorWardsPlaced\": kwargs.pop(\"detectorWardsPlaced\", None),\n \"doubleKills\": kwargs.pop(\"doubleKills\", None),\n \"dragonKills\": kwargs.pop(\"dragonKills\", None),\n \"firstBloodAssist\": kwargs.pop(\"firstBloodAssist\", None),\n \"firstBloodKill\": kwargs.pop(\"firstBloodKill\", None),\n \"firstTowerAssist\": kwargs.pop(\"firstTowerAssist\", None),\n \"firstTowerKill\": kwargs.pop(\"firstTowerKill\", None),\n \"goldEarned\": kwargs.pop(\"goldEarned\", None),\n \"goldSpent\": kwargs.pop(\"goldSpent\", None),\n \"inhibitorKills\": kwargs.pop(\"inhibitorKills\", None),\n \"inhibitorTakedowns\": kwargs.pop(\"inhibitorTakedowns\", None),\n \"inhibitorsLost\": kwargs.pop(\"inhibitorsLost\", None),\n \"item0\": kwargs.pop(\"item0\", None),\n \"item1\": kwargs.pop(\"item1\", None),\n \"item2\": kwargs.pop(\"item2\", None),\n \"item3\": kwargs.pop(\"item3\", None),\n \"item4\": kwargs.pop(\"item4\", None),\n \"item5\": kwargs.pop(\"item5\", None),\n \"item6\": kwargs.pop(\"item6\", None),\n \"itemsPurchased\": kwargs.pop(\"itemsPurchased\", None),\n \"killingSprees\": kwargs.pop(\"killingSprees\", None),\n \"kills\": kwargs.pop(\"kills\", None),\n \"lane\": kwargs.pop(\"lane\", None),\n \"largestCriticalStrike\": kwargs.pop(\"largestCriticalStrike\", None),\n \"largestKillingSpree\": kwargs.pop(\"largestKillingSpree\", None),\n \"largestMultiKill\": kwargs.pop(\"largestMultiKill\", None),\n \"longestTimeSpentLiving\": kwargs.pop(\"longestTimeSpentLiving\", None),\n \"magicDamageDealt\": kwargs.pop(\"magicDamageDealt\", None),\n \"magicDamageDealtToChampions\": kwargs.pop(\"magicDamageDealtToChampions\", None),\n \"magicDamageTaken\": kwargs.pop(\"magicDamageTaken\", None),\n \"neutralMinionsKilled\": kwargs.pop(\"neutralMinionsKilled\", None),\n \"nexusKills\": kwargs.pop(\"nexusKills\", None),\n \"nexusLost\": kwargs.pop(\"nexusLost\", None),\n \"nexusTakedowns\": kwargs.pop(\"nexusTakedowns\", None),\n \"objectivesStolen\": kwargs.pop(\"objectivesStolen\", None),\n \"objectivesStolenAssists\": kwargs.pop(\"objectivesStolenAssists\", None),\n \"pentaKills\": kwargs.pop(\"pentaKills\", None),\n \"physicalDamageDealt\": kwargs.pop(\"physicalDamageDealt\", None),\n \"physicalDamageDealtToChampions\": kwargs.pop(\"physicalDamageDealtToChampions\", None),\n \"physicalDamageTaken\": kwargs.pop(\"physicalDamageTaken\", None),\n \"quadraKills\": kwargs.pop(\"quadraKills\", None),\n \"role\": kwargs.pop(\"role\", None),\n \"sightWardsBoughtInGame\": kwargs.pop(\"sightWardsBoughtInGame\", None),\n \"spell1Casts\": kwargs.pop(\"spell1Casts\", None),\n \"spell2Casts\": kwargs.pop(\"spell2Casts\", None),\n \"spell3Casts\": kwargs.pop(\"spell3Casts\", None),\n \"spell4Casts\": kwargs.pop(\"spell4Casts\", None),\n \"summoner1Casts\": kwargs.pop(\"summoner1Casts\", None),\n \"summoner2Casts\": kwargs.pop(\"summoner2Casts\", None),\n \"timeCCingOthers\": kwargs.pop(\"timeCCingOthers\", None),\n \"timePlayed\": kwargs.pop(\"timePlayed\", None),\n \"totalDamageDealt\": kwargs.pop(\"totalDamageDealt\", None),\n \"totalDamageDealtToChampions\": kwargs.pop(\"totalDamageDealtToChampions\", None),\n \"totalDamageShieldedOnTeammates\": kwargs.pop(\"totalDamageShieldedOnTeammates\", None),\n \"totalDamageTaken\": kwargs.pop(\"totalDamageTaken\", None),\n \"totalHeal\": kwargs.pop(\"totalHeal\", None),\n \"totalHealsOnTeammates\": kwargs.pop(\"totalHealsOnTeammates\", None),\n \"totalMinionsKilled\": kwargs.pop(\"totalMinionsKilled\", None),\n \"totalTimeCCDealt\": kwargs.pop(\"totalTimeCCDealt\", None),\n \"totalTimeSpentDead\": kwargs.pop(\"totalTimeSpentDead\", None),\n \"totalUnitsHealed\": kwargs.pop(\"totalUnitsHealed\", None),\n \"tripleKills\": kwargs.pop(\"tripleKills\", None),\n \"trueDamageDealt\": kwargs.pop(\"trueDamageDealt\", None),\n \"trueDamageDealtToChampions\": kwargs.pop(\"trueDamageDealtToChampions\", None),\n \"trueDamageTaken\": kwargs.pop(\"trueDamageTaken\", None),\n \"turretKills\": kwargs.pop(\"turretKills\", None),\n \"turretTakedowns\": kwargs.pop(\"turretTakedowns\", None),\n \"turretsLost\": kwargs.pop(\"turretsLost\", None),\n \"unrealKills\": kwargs.pop(\"unrealKills\", None),\n \"visionScore\": kwargs.pop(\"visionScore\", None),\n \"visionWardsBoughtInGame\": kwargs.pop(\"visionWardsBoughtInGame\", None),\n \"wardsKilled\": kwargs.pop(\"wardsKilled\", None),\n \"wardsPlaced\": kwargs.pop(\"wardsPlaced\", None),\n \"win\": kwargs.pop(\"win\", None),\n }\n self.stats = ParticipantStatsData(**stats)\n\n # TODO: I don't think this is supported anymore, same for the attributes relying on this (role, lane, ...)\n if \"timeline\" in kwargs:\n self.timeline = ParticipantTimelineData(**kwargs.pop(\"timeline\"))\n if \"teamId\" in kwargs:\n self.side = Side(kwargs.pop(\"teamId\"))\n\n super().__call__(**kwargs)\n return self\n\n\nclass BanData(CoreData):\n _renamed = {}\n\n\nclass ObjectiveData(CoreData):\n _renamed = {}\n\n\nclass TeamData(CoreData):\n _renamed = {\"dominionVictoryScore\": \"dominionScore\", \"firstBaron\": \"firstBaronKiller\", \"firstBlood\": \"firstBloodKiller\", \"firstDragon\": \"firstDragonKiller\", \"firstInhibitor\": \"firstInhibitorKiller\", \"firstRiftHerald\": \"firstRiftHeraldKiller\", \"firstTower\": \"firstTowerKiller\"}\n\n def __call__(self, **kwargs):\n self.bans = [BanData(**ban) for ban in kwargs.pop(\"bans\", [])]\n self.objectives = {key: ObjectiveData(**obj) for key, obj in kwargs.pop(\"objectives\", {}).items()}\n if \"win\" in kwargs:\n self.isWinner = kwargs.pop(\"win\")\n if \"teamId\" in kwargs:\n self.side = Side(kwargs.pop(\"teamId\"))\n super().__call__(**kwargs)\n return self\n\n\nclass MatchReferenceData(CoreData):\n _renamed = {\"matchId\": \"id\"}\n\n\nclass MatchData(CoreData):\n _dto_type = dto.MatchDto\n _renamed = {\"gameId\": \"id\", \"gameVersion\": \"version\", \"gameMode\": \"mode\", \"gameType\": \"type\", \"gameName\": \"name\", \"queueId\": \"queue\"}\n\n def __call__(self, **kwargs):\n if \"gameCreation\" in kwargs:\n self.creation = arrow.get(kwargs[\"gameCreation\"] / 1000)\n if \"gameDuration\" in kwargs:\n self.duration = datetime.timedelta(seconds=kwargs[\"gameDuration\"] / 1000)\n if \"gameStartTimestamp\" in kwargs:\n self.start = arrow.get(kwargs[\"gameStartTimestamp\"] / 1000)\n\n participants = kwargs.pop(\"participants\", [])\n puuids = set([p.get(\"puuid\", None) for p in participants])\n self.privateGame = False\n if len(puuids) == 1:\n self.privateGame = True\n self.participants = []\n for participant in participants:\n participant = ParticipantData(**participant, platformId=kwargs[\"platformId\"])\n self.participants.append(participant)\n\n teams = kwargs.pop(\"teams\", [])\n self.teams = []\n for team in teams:\n team_side = Side(team[\"teamId\"])\n participants = []\n for participant in self.participants:\n if participant.side is team_side:\n participants.append(participant)\n self.teams.append(TeamData(**team, participants=participants))\n\n super().__call__(**kwargs)\n return self\n\n\n##############\n# Core Types #\n##############\n\n\nclass MatchHistory(CassiopeiaLazyList): # type: List[Match]\n \"\"\"The match history for a summoner. By default, this will return the entire match history.\"\"\"\n _data_types = {MatchListData}\n\n def __init__(self, *, puuid: str, continent: Continent = None, region: Region = None, platform: Platform = None, begin_index: int = None, end_index: int = None, begin_time: arrow.Arrow = None, end_time: arrow.Arrow = None, queue: Queue = None, type: MatchType = None):\n assert end_index is None or end_index > begin_index\n if begin_time is not None and end_time is not None and begin_time > end_time:\n raise ValueError(\"`end_time` should be greater than `begin_time`\")\n kwargs = {\"continent\": continent, \"puuid\": puuid, \"queue\": queue, \"type\": type, \"begin_index\": begin_index, \"end_index\": end_index}\n if begin_time is not None and not isinstance(begin_time, (int, float)):\n begin_time = begin_time.int_timestamp * 1000\n kwargs[\"begin_time\"] = begin_time\n if end_time is not None and not isinstance(end_time, (int, float)):\n end_time = end_time.int_timestamp * 1000\n kwargs[\"end_time\"] = end_time\n CassiopeiaObject.__init__(self, **kwargs)\n\n @classmethod\n def __get_query_from_kwargs__(cls, *, continent: Continent, puuid: str, region: Region = None, platform: Platform = None, begin_index: int = None, end_index: int = None, begin_time: arrow.Arrow = None, end_time: arrow.Arrow = None, queue: Queue = None, type: MatchType = None):\n query = {\"continent\": continent, \"puuid\": puuid}\n\n if begin_index is not None:\n query[\"beginIndex\"] = begin_index\n\n if end_index is not None:\n query[\"endIndex\"] = end_index\n\n if begin_time is not None:\n if isinstance(begin_time, arrow.Arrow):\n begin_time = begin_time.int_timestamp * 1000\n query[\"beginTime\"] = begin_time\n\n if end_time is not None:\n if isinstance(end_time, arrow.Arrow):\n end_time = end_time.int_timestamp * 1000\n query[\"endTime\"] = end_time\n\n if queue is not None:\n query[\"queue\"] = queue\n\n if type is not None:\n query[\"type\"] = type\n\n return query\n\n @classmethod\n def from_generator(cls, generator: Generator, **kwargs):\n self = cls.__new__(cls)\n CassiopeiaLazyList.__init__(self, generator=generator, **kwargs)\n return self\n\n def __call__(self, **kwargs) -> \"MatchHistory\":\n kwargs.setdefault(\"begin_index\", self.begin_index)\n kwargs.setdefault(\"end_index\", self.end_index)\n kwargs.setdefault(\"begin_time\", self.begin_time)\n kwargs.setdefault(\"end_time\", self.end_time)\n kwargs.setdefault(\"queue\", self.queue)\n kwargs.setdefault(\"type\", self.match_type)\n return MatchHistory(**kwargs)\n\n def continent(self) -> Continent:\n return Continent(self._data[MatchListData].continent)\n\n @lazy_property\n def region(self) -> Region:\n return Region(self._data[MatchListData].region)\n\n @lazy_property\n def platform(self) -> Platform:\n return self.region.platform\n\n def queue(self) -> Queue:\n return Queue(self._data[MatchListData].queue)\n\n def match_type(self) -> MatchType:\n return MatchType(self._data[MatchData].type)\n\n @property\n def begin_index(self) -> Union[int, None]:\n try:\n return self._data[MatchListData].beginIndex\n except AttributeError:\n return None\n\n @property\n def end_index(self) -> Union[int, None]:\n try:\n return self._data[MatchListData].endIndex\n except AttributeError:\n return None\n\n @property\n def begin_time(self) -> arrow.Arrow:\n time = self._data[MatchListData].begin_time\n if time is not None:\n return arrow.get(time / 1000)\n\n @property\n def end_time(self) -> arrow.Arrow:\n time = self._data[MatchListData].end_time\n if time is not None:\n return arrow.get(time / 1000)\n\n\nclass Position(CassiopeiaObject):\n _data_types = {PositionData}\n\n def __str__(self):\n return \"<Position ({}, {})>\".format(self.x, self.y)\n\n @property\n def x(self) -> int:\n return self._data[PositionData].x\n\n @property\n def y(self) -> int:\n return self._data[PositionData].y\n\n @property\n def location(self) -> SummonersRiftArea:\n return SummonersRiftArea.from_position(self)\n\n\n@searchable({str: [\"type\", \"tower_type\", \"ascended_type\", \"ward_type\", \"monster_type\", \"type\", \"monster_sub_type\", \"lane_type\", \"building_type\"]})\nclass Event(CassiopeiaObject):\n _data_types = {EventData}\n\n @property\n def tower_type(self) -> Tower:\n return Tower(self._data[EventData].towerType)\n\n @property\n def side(self) -> Side:\n return Side(self._data[EventData].side)\n\n @property\n def ascended_type(self) -> str:\n return self._data[EventData].ascendedType\n\n @property\n def killer_id(self) -> int:\n return self._data[EventData].killerId\n\n @property\n def level_up_type(self) -> str:\n return self._data[EventData].levelUpType\n\n @property\n def captured_point(self) -> str:\n return self._data[EventData].capturedPoint\n\n @property\n def assisting_participants(self) -> List[int]:\n return self._data[EventData].assistingParticipants\n\n @property\n def ward_type(self) -> str:\n return self._data[EventData].wardType\n\n @property\n def monster_type(self) -> str:\n return self._data[EventData].monsterType\n\n @property\n def type(self) -> List[str]:\n \"\"\"Legal values: CHAMPION_KILL, WARD_PLACED, WARD_KILL, BUILDING_KILL, ELITE_MONSTER_KILL, ITEM_PURCHASED, ITEM_SOLD, ITEM_DESTROYED, ITEM_UNDO, SKILL_LEVEL_UP, ASCENDED_EVENT, CAPTURE_POINT, PORO_KING_SUMMON\"\"\"\n return self._data[EventData].type\n\n @property\n def skill(self) -> int:\n return self._data[EventData].skill\n\n @property\n def victim_id(self) -> int:\n return self._data[EventData].victimId\n\n @property\n def timestamp(self) -> datetime.timedelta:\n return datetime.timedelta(seconds=self._data[EventData].timestamp/1000)\n\n @property\n def after_id(self) -> int:\n return self._data[EventData].afterId\n\n @property\n def monster_sub_type(self) -> str:\n return self._data[EventData].monsterSubType\n\n @property\n def lane_type(self) -> str:\n return self._data[EventData].laneType\n\n @property\n def item_id(self) -> int:\n return self._data[EventData].itemId\n\n @property\n def participant_id(self) -> int:\n return self._data[EventData].participantId\n\n @property\n def building_type(self) -> str:\n return self._data[EventData].buildingType\n\n @property\n def creator_id(self) -> int:\n return self._data[EventData].creatorId\n\n @property\n def position(self) -> Position:\n return Position.from_data(self._data[EventData].position)\n\n @property\n def before_id(self) -> int:\n return self._data[EventData].beforeId\n\n\nclass ParticipantFrame(CassiopeiaObject):\n _data_types = {ParticipantFrameData}\n\n @property\n def gold_earned(self) -> int:\n return self._data[ParticipantFrameData].goldEarned\n\n @property\n def team_score(self) -> int:\n return self._data[ParticipantFrameData].teamScore\n\n @property\n def participant_id(self) -> int:\n return self._data[ParticipantFrameData].participantId\n\n @property\n def level(self) -> int:\n return self._data[ParticipantFrameData].level\n\n @property\n def current_gold(self) -> int:\n return self._data[ParticipantFrameData].currentGold\n\n @property\n def creep_score(self) -> int:\n return self._data[ParticipantFrameData].creepScore\n\n @property\n def dominion_score(self) -> int:\n return self._data[ParticipantFrameData].dominionScore\n\n @property\n def position(self) -> Position:\n return Position.from_data(self._data[ParticipantFrameData].position)\n\n @property\n def experience(self) -> int:\n return self._data[ParticipantFrameData].experience\n\n @property\n def neutral_minions_killed(self) -> int:\n return self._data[ParticipantFrameData].neutralMinionsKilled\n\n\nclass Frame(CassiopeiaObject):\n _data_types = {FrameData}\n\n @property\n def timestamp(self) -> datetime.timedelta:\n return datetime.timedelta(seconds=self._data[FrameData].timestamp/1000)\n\n @property\n def participant_frames(self) -> Dict[int, ParticipantFrame]:\n return SearchableDictionary({k: ParticipantFrame.from_data(frame) for k, frame in self._data[FrameData].participantFrames.items()})\n\n @property\n def events(self) -> List[Event]:\n return SearchableList([Event.from_data(event) for event in self._data[FrameData].events])\n\n\nclass Timeline(CassiopeiaGhost):\n _data_types = {TimelineData}\n\n def __init__(self, *, id: int = None, continent: Continent = None, region: Union[Region, str] = None, platform: Platform = None):\n kwargs = {\"id\": id}\n if continent is not None:\n kwargs[\"continent\"] = continent\n elif region is not None:\n kwargs[\"continent\"] = region.continent\n elif platform is not None:\n kwargs[\"continent\"] = platform.continent\n super().__init__(**kwargs)\n\n def __get_query__(self):\n return {\"continent\": self.continent, \"id\": self.id}\n\n @property\n def id(self):\n return self._data[TimelineData].id\n\n @property\n def continent(self) -> Continent:\n return Continent(self._data[TimelineData].continent)\n\n @property\n def region(self) -> Region:\n return Region(self._data[TimelineData].region)\n\n @property\n def platform(self) -> Platform:\n return self.region.platform\n\n @CassiopeiaGhost.property(TimelineData)\n @ghost_load_on\n def frames(self) -> List[Frame]:\n return SearchableList([Frame.from_data(frame) for frame in self._data[TimelineData].frames])\n\n @CassiopeiaGhost.property(TimelineData)\n @ghost_load_on\n def frame_interval(self) -> int:\n return self._data[TimelineData].frame_interval\n\n @property\n def first_tower_fallen(self) -> Event:\n for frame in self.frames:\n for event in frame.events:\n if event.type == \"BUILDING_KILL\" and event.building_type == \"TOWER_BUILDING\":\n return event\n\n\nclass ParticipantTimeline(object):\n _data_types = {ParticipantTimelineData}\n\n @classmethod\n def from_data(cls, match: \"Match\"):\n self = cls()\n self.__match = match\n return self\n\n @property\n def frames(self):\n these = []\n for frame in self.__match.timeline.frames:\n for pid, pframe in frame.participant_frames.items():\n pframe.timestamp = frame.timestamp\n if pframe.participant_id == self.id:\n these.append(pframe)\n return these\n\n @property\n def events(self):\n my_events = []\n timeline = self.__match.timeline\n for frame in timeline.frames:\n for event in frame.events:\n try:\n if event.participant_id == self.id:\n my_events.append(event)\n except AttributeError:\n pass\n try:\n if event.creator_id == self.id:\n my_events.append(event)\n except AttributeError:\n pass\n try:\n if event.killer_id == self.id:\n my_events.append(event)\n except AttributeError:\n pass\n try:\n if event.victim_id == self.id:\n my_events.append(event)\n except AttributeError:\n pass\n try:\n if self.id in event.assisting_participants:\n my_events.append(event)\n except AttributeError:\n pass\n return SearchableList(my_events)\n\n @property\n def champion_kills(self):\n return self.events.filter(lambda event: event.type == \"CHAMPION_KILL\" and event.killer_id == self.id)\n\n @property\n def champion_deaths(self):\n return self.events.filter(lambda event: event.type == \"CHAMPION_KILL\" and event.victim_id == self.id)\n\n @property\n def champion_assists(self):\n return self.events.filter(lambda event: event.type == \"CHAMPION_KILL\" and self.id in event.assisting_participants)\n\n\nclass CumulativeTimeline:\n def __init__(self, id: int, participant_timeline: ParticipantTimeline):\n self._id = id\n self._timeline = participant_timeline\n\n def __getitem__(self, time: Union[datetime.timedelta, str]) -> \"ParticipantState\":\n if isinstance(time, str):\n time = time.split(\":\")\n time = datetime.timedelta(minutes=int(time[0]), seconds=int(time[1]))\n state = ParticipantState(id=self._id, time=time, participant_timeline=self._timeline)\n for event in self._timeline.events:\n if event.timestamp > time:\n break\n state._process_event(event)\n return state\n\n\nclass ParticipantState:\n \"\"\"The state of a participant at a given point in the timeline.\"\"\"\n def __init__(self, id: int, time: datetime.timedelta, participant_timeline: ParticipantTimeline):\n self._id = id\n self._time = time\n #self._timeline = participant_timeline\n # Try to get info from the most recent participant timeline object\n latest_frame = None\n for frame in participant_timeline.frames:\n # Round to the nearest second for the frame timestamp because it's off by a few ms\n rounded_frame_timestamp = datetime.timedelta(seconds=frame.timestamp.seconds)\n if rounded_frame_timestamp > self._time:\n break\n latest_frame = frame\n self._latest_frame = latest_frame\n self._item_state = _ItemState()\n self._skills = Counter()\n self._kills = 0\n self._deaths = 0\n self._assists = 0\n self._objectives = 0\n self._level = 1\n self._processed_events = []\n\n def _process_event(self, event: Event):\n if \"ITEM\" in event.type:\n self._item_state.process_event(event)\n elif \"CHAMPION_KILL\" == event.type:\n if event.killer_id == self._id:\n self._kills += 1\n elif event.victim_id == self._id:\n self._deaths += 1\n else:\n assert self._id in event.assisting_participants\n self._assists += 1\n elif \"SKILL_LEVEL_UP\" == event.type:\n if event.level_up_type == \"NORMAL\":\n self._skills[event.skill] += 1\n self._level += 1\n elif event.type in (\"WARD_PLACED\", \"WARD_KILL\"):\n return\n elif event.type in (\"ELITE_MONSTER_KILL\", \"BUILDING_KILL\"):\n self._objectives += 1\n else:\n #print(f\"Did not process event {event.to_dict()}\")\n pass\n self._processed_events.append(event)\n\n @property\n def items(self) -> SearchableList:\n return SearchableList([Item(id=id_, region=\"NA\") for id_ in self._item_state._items])\n\n @property\n def skills(self) -> Dict[Key, int]:\n skill_keys = {1: Key.Q, 2: Key.W, 3: Key.E, 4: Key.R}\n skills = {skill_keys[skill]: level for skill, level in self._skills.items()}\n return skills\n\n @property\n def kills(self) -> int:\n return self._kills\n\n @property\n def deaths(self) -> int:\n return self._deaths\n\n @property\n def assists(self) -> int:\n return self._assists\n\n @property\n def kda(self) -> float:\n return (self.kills + self.assists) / (self.deaths or 1)\n\n @property\n def objectives(self) -> int:\n \"\"\"Number of objectives assisted in.\"\"\"\n return self._objectives\n\n @property\n def level(self) -> int:\n return self._level\n\n @property\n def gold_earned(self) -> int:\n return self._latest_frame.gold_earned\n\n @property\n def team_score(self) -> int:\n return self._latest_frame.team_score\n\n @property\n def current_gold(self) -> int:\n return self._latest_frame.current_gold\n\n @property\n def creep_score(self) -> int:\n return self._latest_frame.creep_score\n\n @property\n def dominion_score(self) -> int:\n return self._latest_frame.dominion_score\n\n @property\n def position(self) -> Position:\n # The latest position is either from the latest event or from the participant timeline frame\n latest_frame_ts = self._latest_frame.timestamp\n latest_event_with_ts = [(getattr(event, 'timestamp', None), getattr(event, 'position', None)) for event in self._processed_events]\n latest_event_with_ts = [(ts, p) for ts, p in latest_event_with_ts if ts is not None and p is not None]\n latest_event_ts = sorted(latest_event_with_ts)[-1]\n if latest_frame_ts > latest_event_ts[0]:\n return self._latest_frame.position\n else:\n return latest_event_ts[1]\n\n @property\n def experience(self) -> int:\n return self._latest_frame.experience\n\n @property\n def neutral_minions_killed(self) -> int:\n return self._latest_frame.neutral_minions_killed\n\n\nclass _ItemState:\n def __init__(self, *args):\n self._items = []\n self._events = []\n\n def __str__(self):\n return str(self._items)\n\n def process_event(self, event):\n items_to_ignore = (2010, 3599, 3520, 3513, 2422, 2052)\n # 2422 is Slightly Magical Boots... I could figure out how to add those and Biscuits to the inventory based on runes but it would be manual...\n # 2052 is Poro-Snax, which gets added to inventory eventless\n upgradable_items = {\n 3850: 3851, 3851: 3853, # Spellthief's Edge -> Frostfang -> Shard of True Ice\n 3854: 3855, 3855: 3857, # Steel Shoulderguards -> Runesteel Spaulders -> Pauldrons of Whiterock\n 3858: 3859, 3859: 3860, # Relic Shield -> Targon's Buckler -> Bulwark of the Mountain\n 3862: 3863, 3863: 3864, # Spectral Sickle -> Harrowing Crescent -> Black Mist Scythe\n }\n item_id = getattr(event, 'item_id', getattr(event, 'before_id', None))\n assert item_id is not None\n if item_id in items_to_ignore:\n return\n if event.type == \"ITEM_PURCHASED\":\n self.add(event.item_id)\n self._events.append(event)\n elif event.type == \"ITEM_DESTROYED\":\n self.destroy(event.item_id)\n if event.item_id in upgradable_items:\n # add the upgraded item\n self.add(upgradable_items[event.item_id])\n self._events.append(event)\n elif event.type == \"ITEM_SOLD\":\n self.destroy(event.item_id)\n self._events.append(event)\n elif event.type == \"ITEM_UNDO\":\n self.undo(event)\n else:\n raise ValueError(f\"Unexpected event type {event.type}\")\n\n def add(self, item: int):\n self._items.append(item)\n\n def destroy(self, item: int):\n self._items.reverse()\n try:\n self._items.remove(item)\n except ValueError as error:\n if item in (3340, 3364, 2319, 2061, 2062, 2056, 2403, 2419, 3400, 2004, 2058, 3200, 2011, 2423, 2055, 2057, 2424, 2059, 2060, 2013, 2421, 3600): # Something weird can happen with trinkets and klepto items\n pass\n else:\n raise error\n self._items.reverse()\n\n def undo(self, event: Event):\n assert event.after_id == 0 or event.before_id == 0\n item_id = event.before_id or event.after_id\n prev = None\n while prev is None or prev.item_id != item_id:\n prev = self._events.pop()\n if prev.type == \"ITEM_PURCHASED\":\n self.destroy(prev.item_id)\n elif prev.type == \"ITEM_DESTROYED\":\n self.add(prev.item_id)\n elif prev.type == \"ITEM_SOLD\":\n self.add(prev.item_id)\n else:\n raise TypeError(f\"Unexpected event type {prev.type}\")\n\n\n@searchable({str: [\"items\"], Item: [\"items\"]})\nclass ParticipantStats(CassiopeiaObject):\n _data_types = {ParticipantStatsData}\n\n @classmethod\n def from_data(cls, data: ParticipantStatsData, match: \"Match\", participant: \"Participant\"):\n self = super().from_data(data)\n self.__match = match\n self.__participant = participant\n return self\n\n @property\n @load_match_on_attributeerror\n def kda(self) -> float:\n return (self.kills + self.assists) / (self.deaths or 1)\n\n @property\n @load_match_on_attributeerror\n def deaths(self) -> int:\n return self._data[ParticipantStatsData].deaths\n\n @property\n @load_match_on_attributeerror\n def assists(self) -> int:\n return self._data[ParticipantStatsData].assists\n\n @property\n @load_match_on_attributeerror\n def kills(self) -> int:\n return self._data[ParticipantStatsData].kills\n \n @property\n @load_match_on_attributeerror\n def baron_kills(self) -> int:\n return self._data[ParticipantStatsData].baronKills\n \n @property\n @load_match_on_attributeerror\n def bounty_level(self) -> int:\n return self._data[TeamData].bountyLevel\n\n @property\n @load_match_on_attributeerror\n def champion_experience(self) -> int:\n return self._data[TeamData].championExperience\n\n @property\n @load_match_on_attributeerror\n def level(self) -> int:\n return self._data[ParticipantStatsData].champLevel\n\n @load_match_on_attributeerror\n @property\n def champion_transform(self) -> int:\n return self._data[TeamData].championTransform\n\n @property\n @load_match_on_attributeerror\n def consumables_purchased(self) -> int:\n return self._data[ParticipantStatsData].consumablesPurchased\n\n @property\n @load_match_on_attributeerror\n def damage_dealt_to_buildings(self) -> int:\n return self._data[ParticipantStatsData].damageDealtToBuildings\n\n @property\n @load_match_on_attributeerror\n def damage_dealt_to_objectives(self) -> int:\n return self._data[ParticipantStatsData].damageDealtToObjectives\n\n @property\n @load_match_on_attributeerror\n def damage_dealt_to_turrets(self) -> int:\n return self._data[ParticipantStatsData].damageDealtToTurrets\n\n @property\n @load_match_on_attributeerror\n def damage_self_mitigated(self) -> int:\n return self._data[ParticipantStatsData].damageSelfMitigated\n\n @property\n @load_match_on_attributeerror\n def vision_wards_bought(self) -> int:\n return self._data[ParticipantStatsData].visionWardsBoughtInGame\n\n @property\n @load_match_on_attributeerror\n def vision_wards_placed(self) -> int:\n return self._data[ParticipantStatsData].detectorWardsPlaced\n\n @property\n @load_match_on_attributeerror\n def double_kills(self) -> int:\n return self._data[ParticipantStatsData].doubleKills\n\n @property\n @load_match_on_attributeerror\n def dragon_kills(self) -> int:\n return self._data[ParticipantStatsData].dragonKills\n\n @property\n @load_match_on_attributeerror\n def first_blood_assist(self) -> bool:\n return self._data[ParticipantStatsData].firstBloodAssist\n\n @property\n @load_match_on_attributeerror\n def first_blood_kill(self) -> bool:\n return self._data[ParticipantStatsData].firstBloodKill\n\n @property\n @load_match_on_attributeerror\n def first_tower_assist(self) -> bool:\n return self._data[ParticipantStatsData].firstTowerAssist\n\n @property\n @load_match_on_attributeerror\n def first_tower_kill(self) -> bool:\n return self._data[ParticipantStatsData].firstTowerKill\n\n @property\n @load_match_on_attributeerror\n def gold_earned(self) -> int:\n return self._data[ParticipantStatsData].goldEarned\n\n @property\n @load_match_on_attributeerror\n def gold_spent(self) -> int:\n return self._data[ParticipantStatsData].goldSpent\n\n @property\n @load_match_on_attributeerror\n def inhibitor_kills(self) -> int:\n return self._data[ParticipantStatsData].inhibitorKills\n\n @property\n @load_match_on_attributeerror\n def inhibitor_takedowns(self) -> int:\n return self._data[ParticipantStatsData].inhibitorTakedowns\n\n @property\n @load_match_on_attributeerror\n def inhibitors_lost(self) -> int:\n return self._data[ParticipantStatsData].inhibitorsLost\n\n @lazy_property\n @load_match_on_attributeerror\n def items(self) -> List[Item]:\n ids = [self._data[ParticipantStatsData].item0,\n self._data[ParticipantStatsData].item1,\n self._data[ParticipantStatsData].item2,\n self._data[ParticipantStatsData].item3,\n self._data[ParticipantStatsData].item4,\n self._data[ParticipantStatsData].item5,\n self._data[ParticipantStatsData].item6\n ]\n version = _choose_staticdata_version(self.__match)\n return SearchableList([Item(id=id, version=version, region=self.__match.region) if id else None for id in ids])\n\n @property\n @load_match_on_attributeerror\n def items_purchased(self) -> int:\n return self._data[ParticipantStatsData].itemsPurchased\n\n @property\n @load_match_on_attributeerror\n def killing_sprees(self) -> int:\n return self._data[ParticipantStatsData].killingSprees\n\n @property\n @load_match_on_attributeerror\n def largest_critical_strike(self) -> int:\n return self._data[ParticipantStatsData].largestCriticalStrike\n\n @property\n @load_match_on_attributeerror\n def largest_killing_spree(self) -> int:\n return self._data[ParticipantStatsData].largestKillingSpree\n\n @property\n @load_match_on_attributeerror\n def largest_multi_kill(self) -> int:\n return self._data[ParticipantStatsData].largestMultiKill\n\n @property\n @load_match_on_attributeerror\n def longest_time_spent_living(self) -> int:\n return self._data[ParticipantStatsData].longestTimeSpentLiving\n\n @property\n @load_match_on_attributeerror\n def magic_damage_dealt(self) -> int:\n return self._data[ParticipantStatsData].magicDamageDealt\n\n @property\n @load_match_on_attributeerror\n def magic_damage_dealt_to_champions(self) -> int:\n return self._data[ParticipantStatsData].magicDamageDealtToChampions\n\n @property\n @load_match_on_attributeerror\n def magic_damage_taken(self) -> int:\n return self._data[ParticipantStatsData].magicDamageTaken\n\n @property\n @load_match_on_attributeerror\n def neutral_minions_killed(self) -> int:\n return self._data[ParticipantStatsData].neutralMinionsKilled\n\n @property\n @load_match_on_attributeerror\n def nexus_kills(self) -> int:\n return self._data[ParticipantStatsData].nexusKills\n\n @property\n @load_match_on_attributeerror\n def nexus_lost(self) -> int:\n return self._data[ParticipantStatsData].nexusLost\n\n @property\n @load_match_on_attributeerror\n def nexus_takedowns(self) -> int:\n return self._data[ParticipantStatsData].nexusTakedowns\n\n @property\n @load_match_on_attributeerror\n def objectives_stolen(self) -> int:\n return self._data[ParticipantStatsData].objectivesStolen\n\n @property\n @load_match_on_attributeerror\n def objectives_stolen_assists(self) -> int:\n return self._data[ParticipantStatsData].objectivesStolenAssists\n\n @property\n @load_match_on_attributeerror\n def penta_kills(self) -> int:\n return self._data[ParticipantStatsData].pentaKills\n\n @property\n @load_match_on_attributeerror\n def physical_damage_dealt(self) -> int:\n return self._data[ParticipantStatsData].physicalDamageDealt\n\n @property\n @load_match_on_attributeerror\n def physical_damage_dealt_to_champions(self) -> int:\n return self._data[ParticipantStatsData].physicalDamageDealtToChampions\n\n @property\n @load_match_on_attributeerror\n def physical_damage_taken(self) -> int:\n return self._data[ParticipantStatsData].physicalDamageTaken\n\n @property\n @load_match_on_attributeerror\n def quadra_kills(self) -> int:\n return self._data[ParticipantStatsData].quadraKills\n\n @property\n @load_match_on_attributeerror\n def sight_wards_bought(self) -> int:\n return self._data[ParticipantStatsData].sightWardsBoughtInGame\n\n @property\n @load_match_on_attributeerror\n def spell_1_casts(self) -> int:\n return self._data[ParticipantStatsData].spell1Casts\n\n @property\n @load_match_on_attributeerror\n def spell_2_casts(self) -> int:\n return self._data[ParticipantStatsData].spell2Casts\n\n @property\n @load_match_on_attributeerror\n def spell_3_casts(self) -> int:\n return self._data[ParticipantStatsData].spell3Casts\n\n @property\n @load_match_on_attributeerror\n def spell_4_casts(self) -> int:\n return self._data[ParticipantStatsData].spell4Casts\n\n @property\n @load_match_on_attributeerror\n def summoner_spell_1_casts(self) -> int:\n return self._data[ParticipantStatsData].summoner1Casts\n\n @property\n @load_match_on_attributeerror\n def summoner_spell_2_casts(self) -> int:\n return self._data[ParticipantStatsData].summoner2Casts\n\n @property\n @load_match_on_attributeerror\n def time_CCing_others(self) -> int:\n return self._data[ParticipantStatsData].timeCCingOthers\n\n @property\n @load_match_on_attributeerror\n def time_played(self) -> int:\n return self._data[ParticipantStatsData].timePlayed\n\n @property\n @load_match_on_attributeerror\n def total_damage_dealt(self) -> int:\n return self._data[ParticipantStatsData].totalDamageDealt\n\n @property\n @load_match_on_attributeerror\n def total_damage_dealt_to_champions(self) -> int:\n return self._data[ParticipantStatsData].totalDamageDealtToChampions\n\n @property\n @load_match_on_attributeerror\n def total_damage_shielded_on_teammates(self) -> int:\n return self._data[ParticipantStatsData].totalDamageshieldedOnTeammates\n\n @property\n @load_match_on_attributeerror\n def total_damage_taken(self) -> int:\n return self._data[ParticipantStatsData].totalDamageTaken\n\n @property\n @load_match_on_attributeerror\n def total_heal(self) -> int:\n return self._data[ParticipantStatsData].totalHeal\n\n @property\n @load_match_on_attributeerror\n def total_heals_on_teammates(self) -> int:\n return self._data[ParticipantStatsData].totalHealsOnTeammates\n\n @property\n @load_match_on_attributeerror\n def total_minions_killed(self) -> int:\n return self._data[ParticipantStatsData].totalMinionsKilled\n\n @property\n @load_match_on_attributeerror\n def total_time_cc_dealt(self) -> int:\n return self._data[ParticipantStatsData].totalTimeCCDealt\n\n @property\n @load_match_on_attributeerror\n def total_time_spent_dead(self) -> int:\n return self._data[ParticipantStatsData].totalTimeSpentDead\n\n @property\n @load_match_on_attributeerror\n def total_units_healed(self) -> int:\n return self._data[ParticipantStatsData].totalUnitsHealed\n\n @property\n @load_match_on_attributeerror\n def triple_kills(self) -> int:\n return self._data[ParticipantStatsData].tripleKills\n\n @property\n @load_match_on_attributeerror\n def true_damage_dealt(self) -> int:\n return self._data[ParticipantStatsData].trueDamageDealt\n\n @property\n @load_match_on_attributeerror\n def true_damage_dealt_to_champions(self) -> int:\n return self._data[ParticipantStatsData].trueDamageDealtToChampions\n\n @property\n @load_match_on_attributeerror\n def true_damage_taken(self) -> int:\n return self._data[ParticipantStatsData].trueDamageTaken\n\n @property\n @load_match_on_attributeerror\n def turret_kills(self) -> int:\n return self._data[ParticipantStatsData].turretKills\n\n @property\n @load_match_on_attributeerror\n def turret_takedowns(self) -> int:\n return self._data[ParticipantStatsData].turretTakedowns\n\n @property\n @load_match_on_attributeerror\n def turrets_lost(self) -> int:\n return self._data[ParticipantStatsData].turretsLost\n\n @property\n @load_match_on_attributeerror\n def unreal_kills(self) -> int:\n return self._data[ParticipantStatsData].unrealKills\n\n @property\n @load_match_on_attributeerror\n def vision_score(self) -> int:\n return self._data[ParticipantStatsData].visionScore\n\n @property\n @load_match_on_attributeerror\n def wards_killed(self) -> int:\n return self._data[ParticipantStatsData].wardsKilled\n\n @property\n @load_match_on_attributeerror\n def wards_placed(self) -> int:\n return self._data[ParticipantStatsData].wardsPlaced\n\n @property\n @load_match_on_attributeerror\n def win(self) -> bool:\n return self._data[ParticipantStatsData].win\n\n\n@searchable({str: [\"summoner\", \"champion\", \"stats\", \"runes\", \"side\", \"summoner_spell_d\", \"summoner_spell_f\"], Summoner: [\"summoner\"], Champion: [\"champion\"], Side: [\"side\"], Rune: [\"runes\"], SummonerSpell: [\"summoner_spell_d\", \"summoner_spell_f\"]})\nclass Participant(CassiopeiaObject):\n _data_types = {ParticipantData}\n\n @classmethod\n def from_data(cls, data: CoreData, match: \"Match\"):\n self = super().from_data(data)\n self.__match = match\n return self\n\n @property\n def version(self) -> str:\n version = self.__match.version\n version = version.split(\".\")[0:2]\n version = \".\".join(version) + \".1\" # Always use x.x.1 because I don't know how to figure out what the last version number should be.\n return version\n\n @property\n def individual_position(self) -> Lane:\n return Lane.from_match_naming_scheme(self._data[ParticipantData].individualPosition)\n\n @property\n def team_position(self) -> Lane:\n return Lane.from_match_naming_scheme(self._data[ParticipantData].teamPosition)\n\n @property\n def lane(self) -> Lane:\n return Lane.from_match_naming_scheme(self._data[ParticipantData].timeline.lane)\n\n @property\n def role(self) -> Role:\n return Role.from_match_naming_scheme(self._data[ParticipantData].timeline.role)\n\n @property\n def skill_order(self) -> List[Key]:\n skill_events = self.timeline.events.filter(lambda event: event.type == \"SKILL_LEVEL_UP\")\n skill_events.sort(key=lambda event: event.timestamp)\n skills = [event.skill - 1 for event in skill_events]\n spells = [self.champion.spells[Key(\"Q\")], self.champion.spells[Key(\"W\")], self.champion.spells[Key(\"E\")], self.champion.spells[Key(\"R\")]]\n skills = [spells[skill] for skill in skills]\n return skills\n\n @property\n def ended_in_early_surrender(self) -> bool:\n return self._data[ParticipantData].endedInEarlySurrender\n\n @lazy_property\n @load_match_on_attributeerror\n def stats(self) -> ParticipantStats:\n return ParticipantStats.from_data(self._data[ParticipantData].stats, match=self.__match, participant=self)\n\n @lazy_property\n @load_match_on_attributeerror\n def id(self) -> int:\n if self._data[ParticipantData].participantId is None:\n raise AttributeError\n return self._data[ParticipantData].participantId\n\n @lazy_property\n @load_match_on_attributeerror\n def is_bot(self) -> bool:\n return self._data[ParticipantData].isBot\n\n @lazy_property\n @load_match_on_attributeerror\n def runes(self) -> Dict[Rune, int]:\n version = _choose_staticdata_version(self.__match)\n runes = SearchableDictionary({Rune(id=rune_id, version=version, region=self.__match.region): perk_vars\n for rune_id, perk_vars in self._data[ParticipantData].perks.items()})\n\n def keystone(self):\n for rune in self:\n if rune.is_keystone:\n return rune\n # The bad thing about calling this here is that the runes won't be lazy loaded, so if the user only want the\n # rune ids then there will be a needless call. That said, it's pretty nice functionality to have and without\n # making a custom RunePage class, I believe this is the only option.\n runes.keystone = keystone(runes)\n return runes\n\n @lazy_property\n @load_match_on_attributeerror\n def stat_runes(self) -> List[Rune]:\n version = _choose_staticdata_version(self.__match)\n runes = SearchableList([Rune(id=rune_id, version=version, region=self.__match.region)\n for rune_id in self._data[ParticipantData].stat_perks.values()])\n return runes\n\n @lazy_property\n @load_match_on_attributeerror\n def timeline(self) -> ParticipantTimeline:\n timeline = ParticipantTimeline.from_data(match=self.__match)\n timeline.id = self.id\n return timeline\n\n @property\n def cumulative_timeline(self) -> CumulativeTimeline:\n return CumulativeTimeline(id=self.id, participant_timeline=self.timeline)\n\n @lazy_property\n @load_match_on_attributeerror\n def side(self) -> Side:\n return Side(self._data[ParticipantData].side)\n\n @lazy_property\n @load_match_on_attributeerror\n def summoner_spell_d(self) -> SummonerSpell:\n version = _choose_staticdata_version(self.__match)\n return SummonerSpell(id=self._data[ParticipantData].summonerSpellDId, version=version, region=self.__match.region)\n\n @lazy_property\n @load_match_on_attributeerror\n def summoner_spell_f(self) -> SummonerSpell:\n version = _choose_staticdata_version(self.__match)\n return SummonerSpell(id=self._data[ParticipantData].summonerSpellFId, version=version, region=self.__match.region)\n\n @lazy_property\n @load_match_on_attributeerror\n def rank_last_season(self) -> Tier:\n return Tier(self._data[ParticipantData].rankLastSeason)\n\n @property\n @load_match_on_attributeerror\n def match_history_uri(self) -> str:\n return self._data[ParticipantData].matchHistoryUri\n\n @lazy_property\n @load_match_on_attributeerror\n def champion(self) -> \"Champion\":\n # See ParticipantStats for info\n version = _choose_staticdata_version(self.__match)\n return Champion(id=self._data[ParticipantData].championId, version=version, region=self.__match.region)\n\n # All the summoner data from the match endpoint is passed through to the Summoner class.\n @lazy_property\n def summoner(self) -> Summoner:\n if self.__match._data[MatchData].privateGame:\n return None\n kwargs = {}\n try:\n kwargs[\"id\"] = self._data[ParticipantData].summonerId\n except AttributeError:\n pass\n try:\n kwargs[\"name\"] = self._data[ParticipantData].summonerName\n except AttributeError:\n pass\n kwargs[\"puuid\"] = self._data[ParticipantData].puuid\n kwargs[\"region\"] = Platform(self._data[ParticipantData].platformId).region\n summoner = Summoner(**kwargs)\n try:\n summoner(profileIconId=self._data[ParticipantData].profileIconId)\n except AttributeError:\n pass\n return summoner\n\n @property\n def team(self) -> \"Team\":\n if self.side == Side.blue:\n return self.__match.blue_team\n else:\n return self.__match.red_team\n\n @property\n def enemy_team(self) -> \"Team\":\n if self.side == Side.blue:\n return self.__match.red_team\n else:\n return self.__match.blue_team\n\n\n@searchable({str: [\"participants\"], bool: [\"win\"], Champion: [\"participants\"], Summoner: [\"participants\"], SummonerSpell: [\"participants\"]})\nclass Team(CassiopeiaObject):\n _data_types = {TeamData}\n\n @classmethod\n def from_data(cls, data: CoreData, match: \"Match\"):\n self = super().from_data(data)\n self.__match = match\n return self\n\n @property\n def first_dragon(self) -> bool:\n return self._data[TeamData].objectives['dragon'].first\n\n @property\n def first_inhibitor(self) -> bool:\n return self._data[TeamData].objectives['inhibitor'].first\n\n @property\n def first_rift_herald(self) -> bool:\n return self._data[TeamData].objectives['riftHerald'].first\n\n @property\n def first_baron(self) -> bool:\n return self._data[TeamData].objectives['baron'].first\n\n @property\n def first_tower(self) -> bool:\n return self._data[TeamData].objectives['tower'].first\n\n @property\n def first_blood(self) -> bool:\n return self._data[TeamData].objectives['champion'].first\n\n @property\n def bans(self) -> List[\"Champion\"]:\n version = _choose_staticdata_version(self.__match)\n return [Champion(id=ban.championId, version=version, region=self.__match.region) if ban.championId != -1 else None for ban in self._data[TeamData].bans]\n\n @property\n def rift_herald_kills(self) -> int:\n return self._data[TeamData].objectives['riftHerald'].kills\n\n @property\n def baron_kills(self) -> int:\n return self._data[TeamData].objectives['baron'].kills\n\n @property\n def inhibitor_kills(self) -> int:\n return self._data[TeamData].objectives['inhibitor'].kills\n\n @property\n def tower_kills(self) -> int:\n return self._data[TeamData].objectives['tower'].kills\n\n @property\n def dragon_kills(self) -> int:\n return self._data[TeamData].objectives['dragon'].kills\n\n @property\n def side(self) -> Side:\n return self._data[TeamData].side\n\n @property\n def dominion_score(self) -> int:\n return self._data[TeamData].dominionScore\n\n @property\n def win(self) -> bool:\n return self._data[TeamData].isWinner\n\n @lazy_property\n def participants(self) -> List[Participant]:\n return SearchableList([Participant.from_data(p, match=self.__match) for p in self._data[TeamData].participants])\n\n\n@searchable({str: [\"participants\", \"continent\", \"queue\", \"mode\", \"map\", \"type\"], Continent: [\"continent\"], Queue: [\"queue\"], MatchType: [\"type\"], GameMode: [\"mode\"], Map: [\"map\"], GameType: [\"type\"], Item: [\"participants\"], Patch: [\"patch\"], Summoner: [\"participants\"], SummonerSpell: [\"participants\"]})\nclass Match(CassiopeiaGhost):\n _data_types = {MatchData}\n\n def __init__(self, *, id: int = None, continent: Union[Continent, str] = None, region: Union[Region, str] = None, platform: Union[Platform, str] = None):\n if isinstance(region, str):\n region = Region(region)\n if region is not None:\n continent = region.continent\n kwargs = {\"continent\": continent, \"id\": id}\n super().__init__(**kwargs)\n self.__participants = [] # For lazy-loading the participants in a special way\n self._timeline = None\n\n def __get_query__(self):\n return {\"continent\": self.continent, \"id\": self.id}\n\n @classmethod\n def from_match_reference(cls, ref: MatchReferenceData):\n instance = cls(id=ref.id, continent=ref.continent)\n instance._timeline = None\n return instance\n\n def __eq__(self, other: \"Match\"):\n if not isinstance(other, Match) or self.continent != other.continent:\n return False\n return self.id == other.id\n\n def __str__(self):\n return f\"Match(id={self.id}, region='{self.continent.value}')\"\n\n __hash__ = CassiopeiaGhost.__hash__\n\n @lazy_property\n def continent(self) -> Continent:\n \"\"\"The continent for this match.\"\"\"\n return Continent(self._data[MatchData].continent)\n\n @lazy_property\n def region(self) -> Region:\n \"\"\"The region for this match.\"\"\"\n return self.platform.region\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n @lazy\n def platform(self) -> Platform:\n \"\"\"The platform for this match.\"\"\"\n return Platform(self._data[MatchData].platformId)\n\n @property\n def id(self) -> int:\n return self._data[MatchData].id\n\n @lazy_property\n def timeline(self) -> Timeline:\n if self._timeline is None:\n self._timeline = Timeline(id=self.id, continent=self.continent)\n return self._timeline\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n @lazy\n def queue(self) -> Queue:\n return Queue.from_id(self._data[MatchData].queue)\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n @lazy\n def type(self) -> MatchType:\n # TODO: this is wrong as type refers to the GameType, we could infer it from the queue\n return MatchType(self._data[MatchData].type)\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n def participants(self) -> List[Participant]:\n if hasattr(self._data[MatchData], \"participants\"):\n if not self._Ghost__is_loaded(MatchData):\n self.__load__(MatchData)\n self._Ghost__set_loaded(MatchData) # __load__ doesn't trigger __set_loaded.\n # TODO: this is probably not the way to go, but that prevents participants being reappened every time match.participants is called\n if len(self.__participants) == 0:\n for p in self._data[MatchData].participants:\n participant = Participant.from_data(p, match=self)\n self.__participants.append(participant)\n\n else:\n self.__participants = []\n\n return SearchableList(self.__participants)\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n @lazy\n def teams(self) -> List[Team]:\n return [Team.from_data(t, match=self) for i, t in enumerate(self._data[MatchData].teams)]\n\n @property\n def red_team(self) -> Team:\n if self.teams[0].side is Side.red:\n return self.teams[0]\n else:\n return self.teams[1]\n\n @property\n def blue_team(self) -> Team:\n if self.teams[0].side is Side.blue:\n return self.teams[0]\n else:\n return self.teams[1]\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n def version(self) -> str:\n return self._data[MatchData].version\n\n @property\n def patch(self) -> Patch:\n if hasattr(self._data[MatchData], \"version\"):\n version = \".\".join(self.version.split(\".\")[:2])\n patch = Patch.from_str(version, region=self.region)\n else:\n date = self.creation\n patch = Patch.from_date(date, region=self.region)\n return patch\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n @lazy\n def mode(self) -> GameMode:\n return GameMode(self._data[MatchData].mode)\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n @lazy\n def map(self) -> Map:\n version = _choose_staticdata_version(self)\n return Map(id=self._data[MatchData].mapId, region=self.region, version=version)\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n @lazy\n def game_type(self) -> GameType:\n return GameType(self._data[MatchData].type)\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n @lazy\n def duration(self) -> datetime.timedelta:\n return self._data[MatchData].duration\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n @lazy\n def creation(self) -> arrow.Arrow:\n return self._data[MatchData].creation\n\n @CassiopeiaGhost.property(MatchData)\n @ghost_load_on\n @lazy\n def start(self) -> arrow.Arrow:\n return self._data[MatchData].start\n\n @property\n def is_remake(self) -> bool:\n # TODO: not sure how this should be handled, it feels like the early surrender state should belong the the match itself, not the participants\n if self.__participants[0] is not None:\n return self.__participants[0].ended_in_early_surrender or self.duration < datetime.timedelta(minutes=5)\n else:\n self.duration < datetime.timedelta(minutes=5)\n\n @property\n def exists(self) -> bool:\n try:\n if not self._Ghost__all_loaded:\n self.__load__()\n self.type # Make sure we can access this attribute\n return True\n except (AttributeError, NotFoundError):\n return False\n\n def kills_heatmap(self):\n if self.map.name == \"Summoner's Rift\":\n rx0, ry0, rx1, ry1 = 0, 0, 14820, 14881\n elif self.map.name == \"Howling Abyss\":\n rx0, ry0, rx1, ry1 = -28, -19, 12849, 12858\n else:\n raise NotImplemented\n\n imx0, imy0, imx1, imy1 = self.map.image.image.getbbox()\n\n def position_to_map_image_coords(position):\n x, y = position.x, position.y\n x -= rx0\n x /= (rx1 - rx0)\n x *= (imx1 - imx0)\n y -= ry0\n y /= (ry1 - ry0)\n y *= (imy1 - imy0)\n return x, y\n\n import matplotlib.pyplot as plt\n size = 8\n plt.figure(figsize=(size, size))\n plt.imshow(self.map.image.image.rotate(-90))\n for p in self.participants:\n for kill in p.timeline.champion_kills:\n x, y = position_to_map_image_coords(kill.position)\n if p.team.side == Side.blue:\n plt.scatter([x], [y], c=\"b\", s=size * 10)\n else:\n plt.scatter([x], [y], c=\"r\", s=size * 10)\n plt.axis('off')\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.axis", "matplotlib.pyplot.scatter", "matplotlib.pyplot.figure" ] ]
bootml/agent
[ "84235db931d6e4ef956962961c619994898ebdd5" ]
[ "utilities/architectures/mlp.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom .architecture import Architecture\n\n__author__ = 'cnheider'\n\n'''\nDescription: Multi Layer Perceptron\nAuthor: Christian Heider Nielsen\n'''\nimport torch\nfrom torch import nn, Tensor\nfrom torch.nn import functional as F\n\n\nclass MLP(Architecture):\n '''\nOOOO input_size\n|XX| fc1\nOOOO hidden_layer_size * (Weights,Biases)\n|XX| fc2\nOOOO hidden_layer_size * (Weights,Biases)\n|XX| fc3\n0000 output_size * (Weights,Biases)\n'''\n\n def __init__(self, input_size, hidden_layers, output_size, activation, use_bias):\n super().__init__()\n\n self._input_size = input_size\n self._hidden_layers = hidden_layers\n self._activation = activation\n self._output_size = output_size\n self._use_bias = use_bias\n\n previous_layer_size = self._input_size[0]\n\n self.num_of_layer = len(self._hidden_layers)\n if self.num_of_layer > 0:\n for i in range(1, self.num_of_layer + 1):\n layer = nn.Linear(\n previous_layer_size, self._hidden_layers[i - 1], bias=self._use_bias\n )\n # fan_in_init(layer.weight)\n setattr(self, f'fc{i}', layer)\n previous_layer_size = self._hidden_layers[i - 1]\n\n self.head = nn.Linear(\n previous_layer_size, self._output_size[0], bias=self._use_bias\n )\n\n def forward(self, x, **kwargs):\n '''\n\n:param x:\n:return output:\n'''\n assert type(x) is Tensor\n\n # if hasattr(self, 'num_of_layer'): # Safer but slower\n # for i in range(1, self.num_of_layer + 1):\n # if hasattr(self, 'fc' + str(i)):\n # layer = getattr(self, 'fc' + str(i))\n # x = F.relu(layer(x))\n\n for i in range(1, self.num_of_layer + 1):\n layer = getattr(self, f'fc{i}')\n x = layer(x)\n x = self._activation(x)\n\n return self.head(x)\n\n\nclass CategoricalMLP(MLP):\n\n def forward(self, x, **kwargs):\n x = super().forward(x, **kwargs)\n return F.softmax(x, dim=1)\n\n\nclass MultiHeadedMLP(MLP):\n\n def __init__(self, *, heads, **kwargs):\n super().__init__(**kwargs)\n\n self._heads = heads\n\n self.num_of_heads = len(self._heads)\n if self.num_of_heads > 0:\n for i in range(self.num_of_heads):\n head = nn.Linear(self._output_size[0], self._heads[i])\n # fan_in_init(layer.weight)\n setattr(self, f'subhead{str(i + 1)}', head)\n else:\n raise ValueError('Number of head must be >0')\n\n def forward(self, x, **kwargs):\n x = super().forward(x, **kwargs)\n\n output = []\n for i in range(1, self.num_of_heads + 1):\n head = getattr(self, 'subhead' + str(i))\n sub_res = head(x)\n if type(sub_res) is not list:\n sub_res = [sub_res]\n output.append(sub_res)\n\n return output\n\n\nclass DistributionMLP(MultiHeadedMLP):\n def __init__(self, **kwargs):\n heads = [1, 1]\n\n super().__init__(heads=heads, **kwargs)\n\n\nclass RecurrentCategoricalMLP(MLP):\n\n def __init__(self, r_hidden_layers=10, **kwargs):\n super().__init__(**kwargs)\n self._r_hidden_layers = r_hidden_layers\n self._r_input_size = self._output_size[0] + r_hidden_layers\n\n self.hidden = nn.Linear(self._r_input_size, r_hidden_layers)\n self.out = nn.Linear(self._r_input_size, r_hidden_layers)\n\n self._prev_hidden_x = torch.zeros(r_hidden_layers)\n\n def forward(self, x, **kwargs):\n x = super().forward(x, **kwargs)\n combined = torch.cat((x, self._prev_hidden_x), 1)\n out_x = self.out(combined)\n hidden_x = self.hidden(combined)\n self._prev_hidden_x = hidden_x\n\n return F.softmax(out_x, dim=1)\n\n\nclass ExposedRecurrentCategoricalMLP(RecurrentCategoricalMLP):\n\n def forward(self, x, hidden_x, **kwargs):\n self._prev_hidden_x = hidden_x\n out_x = super().forward(x, **kwargs)\n\n return F.softmax(out_x, dim=1), self._prev_hidden_x\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.softmax", "torch.cat", "torch.zeros" ] ]
huynhngoc/head-neck-analysis
[ "f723e81509545c13c65c88b41d8b0465a35b017e" ]
[ "run_test.py" ]
[ "\"\"\"\nExample of running a single experiment of unet in the head and neck data.\nThe json config of the main model is 'examples/json/unet-sample-config.json'\nAll experiment outputs are stored in '../../hn_perf/logs'.\nAfter running 3 epochs, the performance of the training process can be accessed\nas log file and perforamance plot.\nIn addition, we can peek the result of 42 first images from prediction set.\n\"\"\"\n\nfrom deoxys.experiment import Experiment, ExperimentPipeline\n# from deoxys.utils import read_file\nimport argparse\nimport os\n# from pathlib import Path\n# from comet_ml import Experiment as CometEx\nimport tensorflow as tf\nimport customize_obj\n\nif __name__ == '__main__':\n gpus = tf.config.list_physical_devices('GPU')\n if not gpus:\n raise RuntimeError(\"GPU Unavailable\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"log_folder\")\n parser.add_argument(\"--best_epoch\", default=0, type=int)\n parser.add_argument(\"--temp_folder\", default='', type=str)\n parser.add_argument(\"--analysis_folder\",\n default='', type=str)\n parser.add_argument(\"--epochs\", default=500, type=int)\n parser.add_argument(\"--model_checkpoint_period\", default=5, type=int)\n parser.add_argument(\"--prediction_checkpoint_period\", default=5, type=int)\n parser.add_argument(\"--meta\", default='patient_idx,slice_idx', type=str)\n parser.add_argument(\"--monitor\", default='', type=str)\n parser.add_argument(\"--memory_limit\", default=0, type=int)\n\n args, unknown = parser.parse_known_args()\n\n if args.memory_limit:\n # Restrict TensorFlow to only allocate X-GB of memory on the first GPU\n try:\n tf.config.set_logical_device_configuration(\n gpus[0],\n [tf.config.LogicalDeviceConfiguration(\n memory_limit=1024 * args.memory_limit)])\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(\n logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Virtual devices must be set before GPUs have been initialized\n print(e)\n\n if 'patch' in args.log_folder:\n analysis_folder = args.analysis_folder\n else:\n analysis_folder = ''\n\n if '2d' in args.log_folder:\n meta = args.meta\n else:\n meta = args.meta.split(',')[0]\n # # 42 images for 2d, 15 images for 3d\n # img_num = 42\n\n # if '3d' in args.log_folder:\n # img_num = 40\n\n # best_model = customize_obj.PostProcessor(\n # args.log_folder,\n # temp_base_path=args.temp_folder).get_best_model(args.monitor)\n\n # (\n # customize_obj.AnalysisExperiment(\n # log_base_path=args.log_folder,\n # temp_base_path=args.temp_folder)\n # .from_file(best_model)\n # .run_test(masked_images=[i for i in range(img_num)])\n # )\n\n # if '2d' in args.log_folder:\n # customize_obj.PostProcessor(\n # args.log_folder,\n # temp_base_path=args.temp_folder,\n # map_meta_data=args.meta,\n # run_test=True\n # ).map_2d_meta_data().calculate_fscore_single().merge_2d_slice(\n # ).calculate_fscore()\n # else:\n # customize_obj.PostProcessor(\n # args.log_folder,\n # temp_base_path=args.temp_folder,\n # analysis_base_path=args.analysis_folder,\n # map_meta_data=args.meta,\n # run_test=True\n # ).merge_3d_patches().calculate_fscore()\n\n ex = ExperimentPipeline(\n log_base_path=args.log_folder,\n temp_base_path=args.temp_folder\n )\n if args.best_epoch == 0:\n try:\n ex = ex.load_best_model(\n recipe='auto',\n analysis_base_path=analysis_folder,\n map_meta_data=meta,\n )\n except Exception as e:\n print(e)\n else:\n print(f'Loading model from epoch {args.best_epoch}')\n ex.from_file(args.log_folder +\n f'/model/model.{args.best_epoch:03d}.h5')\n ex.run_test(\n ).apply_post_processors(\n recipe='auto',\n analysis_base_path=analysis_folder,\n map_meta_data=meta,\n run_test=True\n ).plot_3d_test_images(best_num=2, worst_num=2)\n" ]
[ [ "tensorflow.config.LogicalDeviceConfiguration", "tensorflow.config.experimental.list_logical_devices", "tensorflow.config.list_physical_devices" ] ]
TripleRD/Pulser
[ "7405d8cd7463782891cbbf40335f163dc8f284cc" ]
[ "pulser/simulation/simulation.py" ]
[ "# Copyright 2020 Pulser Development Team\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Contains the Simulation class, used for simulation of a Sequence.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Optional, Union, cast, Any\nfrom collections.abc import Mapping\nimport itertools\nfrom collections import Counter\nfrom copy import deepcopy\nfrom dataclasses import asdict\nimport warnings\n\nimport qutip\nimport numpy as np\nfrom numpy.typing import ArrayLike\nimport matplotlib.pyplot as plt\n\nfrom pulser import Pulse, Sequence\nfrom pulser.register import QubitId\nfrom pulser.simulation.simresults import (\n SimulationResults,\n CoherentResults,\n NoisyResults,\n)\nfrom pulser.simulation.simconfig import SimConfig\nfrom pulser._seq_drawer import draw_sequence\nfrom pulser.sequence import _TimeSlot\n\n\nSUPPORTED_NOISE = {\n \"ising\": {\"dephasing\", \"doppler\", \"amplitude\", \"SPAM\"},\n \"XY\": {\"SPAM\"},\n}\n\n\nclass Simulation:\n r\"\"\"Simulation of a pulse sequence using QuTiP.\n\n Args:\n sequence (Sequence): An instance of a Pulser Sequence that we\n want to simulate.\n sampling_rate (float): The fraction of samples that we wish to\n extract from the pulse sequence to simulate. Has to be a\n value between 0.05 and 1.0.\n config (SimConfig): Configuration to be used for this simulation.\n evaluation_times (Union[str, ArrayLike, float]): Choose between:\n\n - \"Full\": The times are set to be the ones used to define the\n Hamiltonian to the solver.\n\n - \"Minimal\": The times are set to only include initial and final\n times.\n\n - An ArrayLike object of times in µs if you wish to only include\n those specific times.\n\n - A float to act as a sampling rate for the resulting state.\n \"\"\"\n\n def __init__(\n self,\n sequence: Sequence,\n sampling_rate: float = 1.0,\n config: Optional[SimConfig] = None,\n evaluation_times: Union[float, str, ArrayLike] = \"Full\",\n ) -> None:\n \"\"\"Instantiates a Simulation object.\"\"\"\n if not isinstance(sequence, Sequence):\n raise TypeError(\n \"The provided sequence has to be a valid \"\n \"pulser.Sequence instance.\"\n )\n if not sequence._schedule:\n raise ValueError(\"The provided sequence has no declared channels.\")\n if all(sequence._schedule[x][-1].tf == 0 for x in sequence._channels):\n raise ValueError(\n \"No instructions given for the channels in the sequence.\"\n )\n self._seq = sequence\n self._interaction = \"XY\" if self._seq._in_xy else \"ising\"\n self._qdict = self._seq.qubit_info\n self._size = len(self._qdict)\n self._tot_duration = self._seq.get_duration()\n if not (0 < sampling_rate <= 1.0):\n raise ValueError(\n \"The sampling rate (`sampling_rate` = \"\n f\"{sampling_rate}) must be greater than 0 and \"\n \"less than or equal to 1.\"\n )\n if int(self._tot_duration * sampling_rate) < 4:\n raise ValueError(\n \"`sampling_rate` is too small, less than 4 data points.\"\n )\n self._sampling_rate = sampling_rate\n self._qid_index = {qid: i for i, qid in enumerate(self._qdict)}\n self._collapse_ops: list[qutip.Qobj] = []\n\n self.sampling_times = self._adapt_to_sampling_rate(\n np.arange(self._tot_duration, dtype=np.double) / 1000\n )\n self.evaluation_times = evaluation_times # type: ignore\n\n self._bad_atoms: dict[Union[str, int], bool] = {}\n self._doppler_detune: dict[Union[str, int], float] = {}\n # Sets the config as well as builds the hamiltonian\n self.set_config(config) if config else self.set_config(SimConfig())\n if hasattr(self._seq, \"_measurement\"):\n self._meas_basis = cast(str, self._seq._measurement)\n else:\n if self.basis_name in {\"digital\", \"all\"}:\n self._meas_basis = \"digital\"\n else:\n self._meas_basis = self.basis_name\n self.initial_state = \"all-ground\"\n\n @property\n def config(self) -> SimConfig:\n \"\"\"The current configuration, as a SimConfig instance.\"\"\"\n return self._config\n\n def set_config(self, cfg: SimConfig) -> None:\n \"\"\"Sets current config to cfg and updates simulation parameters.\n\n Args:\n cfg (SimConfig): New configuration.\n \"\"\"\n if not isinstance(cfg, SimConfig):\n raise ValueError(f\"Object {cfg} is not a valid `SimConfig`.\")\n not_supported = set(cfg.noise) - SUPPORTED_NOISE[self._interaction]\n if not_supported:\n raise NotImplementedError(\n f\"Interaction mode '{self._interaction}' does not support \"\n f\"simulation of noise types: {', '.join(not_supported)}.\"\n )\n prev_config = self.config if hasattr(self, \"_config\") else SimConfig()\n self._config = cfg\n if not (\"SPAM\" in self.config.noise and self.config.eta > 0):\n self._bad_atoms = {qid: False for qid in self._qid_index}\n if \"doppler\" not in self.config.noise:\n self._doppler_detune = {qid: 0.0 for qid in self._qid_index}\n # Noise, samples and Hamiltonian update routine\n self._construct_hamiltonian()\n if \"dephasing\" in self.config.noise:\n if self.basis_name == \"digital\" or self.basis_name == \"all\":\n # Go back to previous config\n self.set_config(prev_config)\n raise NotImplementedError(\n \"Cannot include dephasing noise in digital- or all-basis.\"\n )\n # Probability of phase (Z) flip:\n # First order in prob\n prob = self.config.dephasing_prob / 2\n n = self._size\n if prob > 0.1 and n > 1:\n warnings.warn(\n \"The dephasing model is a first-order approximation in the\"\n f\" dephasing probability. p = {2*prob} is too large for \"\n \"realistic results.\",\n stacklevel=2,\n )\n k = np.sqrt(prob * (1 - prob) ** (n - 1))\n self._collapse_ops = [\n np.sqrt((1 - prob) ** n)\n * qutip.tensor([self.op_matrix[\"I\"] for _ in range(n)])\n ]\n self._collapse_ops += [\n k\n * (\n self.build_operator([(\"sigma_rr\", [qid])])\n - self.build_operator([(\"sigma_gg\", [qid])])\n )\n for qid in self._qid_index\n ]\n\n def add_config(self, config: SimConfig) -> None:\n \"\"\"Updates the current configuration with parameters of another one.\n\n Mostly useful when dealing with multiple noise types in different\n configurations and wanting to merge these configurations together.\n Adds simulation parameters to noises that weren't available in the\n former SimConfig. Noises specified in both SimConfigs will keep\n former noise parameters.\n\n Args:\n config (SimConfig): SimConfig to retrieve parameters from.\n \"\"\"\n if not isinstance(config, SimConfig):\n raise ValueError(f\"Object {config} is not a valid `SimConfig`\")\n\n not_supported = set(config.noise) - SUPPORTED_NOISE[self._interaction]\n if not_supported:\n raise NotImplementedError(\n f\"Interaction mode '{self._interaction}' does not support \"\n f\"simulation of noise types: {', '.join(not_supported)}.\"\n )\n\n old_noise_set = set(self.config.noise)\n new_noise_set = old_noise_set.union(config.noise)\n diff_noise_set = new_noise_set - old_noise_set\n param_dict: dict[str, Any] = asdict(self._config)\n del param_dict[\"spam_dict\"]\n del param_dict[\"doppler_sigma\"]\n param_dict[\"noise\"] = tuple(new_noise_set)\n if \"SPAM\" in diff_noise_set:\n param_dict[\"eta\"] = config.eta\n param_dict[\"epsilon\"] = config.epsilon\n param_dict[\"epsilon_prime\"] = config.epsilon_prime\n if \"doppler\" in diff_noise_set:\n param_dict[\"temperature\"] = config.temperature\n if \"amplitude\" in diff_noise_set:\n param_dict[\"laser_waist\"] = config.laser_waist\n if \"dephasing\" in diff_noise_set:\n param_dict[\"dephasing_prob\"] = config.dephasing_prob\n param_dict[\"temperature\"] *= 1.0e6\n self.set_config(SimConfig(**param_dict))\n\n def show_config(self, solver_options: bool = False) -> None:\n \"\"\"Shows current configuration.\"\"\"\n print(self._config.__str__(solver_options))\n\n def reset_config(self) -> None:\n \"\"\"Resets configuration to default.\"\"\"\n self.set_config(SimConfig())\n\n @property\n def initial_state(self) -> qutip.Qobj:\n \"\"\"The initial state of the simulation.\n\n Args:\n state (Union[str, ArrayLike, qutip.Qobj]): The initial state.\n Choose between:\n\n - \"all-ground\" for all atoms in ground state\n - An ArrayLike with a shape compatible with the system\n - A Qobj object\n \"\"\"\n return self._initial_state\n\n @initial_state.setter\n def initial_state(self, state: Union[str, np.ndarray, qutip.Qobj]) -> None:\n \"\"\"Sets the initial state of the simulation.\"\"\"\n self._initial_state: qutip.Qobj\n if isinstance(state, str) and state == \"all-ground\":\n self._initial_state = qutip.tensor(\n [\n self.basis[\"d\" if self._interaction == \"XY\" else \"g\"]\n for _ in range(self._size)\n ]\n )\n else:\n state = cast(Union[np.ndarray, qutip.Qobj], state)\n shape = state.shape[0]\n legal_shape = self.dim ** self._size\n legal_dims = [[self.dim] * self._size, [1] * self._size]\n if shape != legal_shape:\n raise ValueError(\n \"Incompatible shape of initial state.\"\n + f\"Expected {legal_shape}, got {shape}.\"\n )\n self._initial_state = qutip.Qobj(state, dims=legal_dims)\n\n @property\n def evaluation_times(self) -> np.ndarray:\n \"\"\"The times at which the results of this simulation are returned.\n\n Args:\n value (Union[str, ArrayLike, float]): Choose between:\n\n - \"Full\": The times are set to be the ones used to define the\n Hamiltonian to the solver.\n\n - \"Minimal\": The times are set to only include initial and\n final times.\n\n - An ArrayLike object of times in µs if you wish to only\n include those specific times.\n\n - A float to act as a sampling rate for the resulting state.\n \"\"\"\n return np.array(self._eval_times_array)\n\n @evaluation_times.setter\n def evaluation_times(self, value: Union[str, ArrayLike, float]) -> None:\n \"\"\"Sets times at which the results of this simulation are returned.\"\"\"\n if isinstance(value, str):\n if value == \"Full\":\n self._eval_times_array = np.append(\n self.sampling_times, self._tot_duration / 1000\n )\n elif value == \"Minimal\":\n self._eval_times_array = np.array(\n [self.sampling_times[0], self._tot_duration / 1000]\n )\n else:\n raise ValueError(\n \"Wrong evaluation time label. It should \"\n \"be `Full`, `Minimal`, an array of times or\"\n + \" a float between 0 and 1.\"\n )\n elif isinstance(value, float):\n if value > 1 or value <= 0:\n raise ValueError(\n \"evaluation_times float must be between 0 \" \"and 1.\"\n )\n extended_times = np.append(\n self.sampling_times, self._tot_duration / 1000\n )\n indices = np.linspace(\n 0,\n len(extended_times) - 1,\n int(value * len(extended_times)),\n dtype=int,\n )\n self._eval_times_array = extended_times[indices]\n elif isinstance(value, (list, tuple, np.ndarray)):\n t_max = np.max(value)\n t_min = np.min(value)\n if t_max > self._tot_duration / 1000:\n raise ValueError(\n \"Provided evaluation-time list extends \"\n \"further than sequence duration.\"\n )\n if t_min < 0:\n raise ValueError(\n \"Provided evaluation-time list contains \"\n \"negative values.\"\n )\n # Ensure the list of times is sorted\n eval_times = np.array(np.sort(value))\n if t_min > 0:\n eval_times = np.insert(eval_times, 0, 0.0)\n if t_max < self._tot_duration / 1000:\n eval_times = np.append(eval_times, self._tot_duration / 1000)\n self._eval_times_array = eval_times\n # always include initial and final times\n else:\n raise ValueError(\n \"Wrong evaluation time label. It should \"\n \"be `Full`, `Minimal`, an array of times or a \"\n + \"float between 0 and 1.\"\n )\n self._eval_times_instruction = value\n\n def draw(\n self,\n draw_phase_area: bool = False,\n draw_interp_pts: bool = False,\n draw_phase_shifts: bool = False,\n fig_name: str = None,\n kwargs_savefig: dict = {},\n ) -> None:\n \"\"\"Draws the input sequence and the one used by the solver.\n\n Keyword Args:\n draw_phase_area (bool): Whether phase and area values need\n to be shown as text on the plot, defaults to False.\n draw_interp_pts (bool): When the sequence has pulses with waveforms\n of type InterpolatedWaveform, draws the points of interpolation\n on top of the respective waveforms (defaults to False).\n draw_phase_shifts (bool): Whether phase shift and reference\n information should be added to the plot, defaults to False.\n fig_name(str, default=None): The name on which to save the figure.\n If None the figure will not be saved.\n kwargs_savefig(dict, default={}): Keywords arguments for\n ``matplotlib.pyplot.savefig``. Not applicable if `fig_name`\n is ``None``.\n\n See Also:\n Sequence.draw(): Draws the sequence in its current state.\n \"\"\"\n draw_sequence(\n self._seq,\n self._sampling_rate,\n draw_phase_area=draw_phase_area,\n draw_interp_pts=draw_interp_pts,\n draw_phase_shifts=draw_phase_shifts,\n )\n if fig_name is not None:\n plt.savefig(fig_name, **kwargs_savefig)\n plt.show()\n\n def _extract_samples(self) -> None:\n \"\"\"Populates samples dictionary with every pulse in the sequence.\"\"\"\n self.samples: dict[str, dict[str, dict]]\n if self._interaction == \"ising\":\n self.samples = {\n addr: {basis: {} for basis in [\"ground-rydberg\", \"digital\"]}\n for addr in [\"Global\", \"Local\"]\n }\n else:\n self.samples = {addr: {\"XY\": {}} for addr in [\"Global\", \"Local\"]}\n\n if not hasattr(self, \"operators\"):\n self.operators = deepcopy(self.samples)\n\n def prepare_dict() -> dict[str, np.ndarray]:\n # Duration includes retargeting, delays, etc.\n return {\n \"amp\": np.zeros(self._tot_duration),\n \"det\": np.zeros(self._tot_duration),\n \"phase\": np.zeros(self._tot_duration),\n }\n\n def write_samples(\n slot: _TimeSlot,\n samples_dict: Mapping[str, np.ndarray],\n is_global_pulse: bool,\n *qid: Union[int, str],\n ) -> None:\n \"\"\"Builds hamiltonian coefficients.\n\n Taking into account, if necessary, noise effects, which are local\n and depend on the qubit's id qid.\n \"\"\"\n _pulse = cast(Pulse, slot.type)\n noise_det = 0.0\n noise_amp = 1.0\n if \"doppler\" in self.config.noise:\n noise_det += self._doppler_detune[qid[0]]\n # Gaussian beam loss in amplitude for global pulses only\n # Noise is drawn at random for each pulse\n if \"amplitude\" in self.config.noise and is_global_pulse:\n position = self._qdict[qid[0]]\n r = np.linalg.norm(position)\n w0 = self.config.laser_waist\n noise_amp = np.random.normal(1.0, 1.0e-3) * np.exp(\n -((r / w0) ** 2)\n )\n samples_dict[\"amp\"][slot.ti : slot.tf] += (\n _pulse.amplitude.samples * noise_amp\n )\n samples_dict[\"det\"][slot.ti : slot.tf] += (\n _pulse.detuning.samples + noise_det\n )\n samples_dict[\"phase\"][slot.ti : slot.tf] += _pulse.phase\n\n for channel in self._seq.declared_channels:\n addr = self._seq.declared_channels[channel].addressing\n basis = self._seq.declared_channels[channel].basis\n\n # Case of coherent global simulations\n if addr == \"Global\" and (\n set(self.config.noise).issubset({\"dephasing\"})\n ):\n slm_on = bool(self._seq._slm_mask_targets)\n for slot in self._seq._schedule[channel]:\n if isinstance(slot.type, Pulse):\n # If SLM is on during slot, populate local samples\n if slm_on and self._seq._slm_mask_time[1] > slot.ti:\n samples_dict = self.samples[\"Local\"][basis]\n for qubit in slot.targets:\n if qubit not in samples_dict:\n samples_dict[qubit] = prepare_dict()\n write_samples(\n slot, samples_dict[qubit], True, qubit\n )\n self.samples[\"Local\"][basis] = samples_dict\n # Otherwise, populate corresponding global\n else:\n slm_on = False\n samples_dict = self.samples[\"Global\"][basis]\n if not samples_dict:\n samples_dict = prepare_dict()\n write_samples(slot, samples_dict, True)\n self.samples[\"Global\"][basis] = samples_dict\n\n # Any noise : global becomes local for each qubit in the reg\n # Since coefficients are modified locally by all noises\n else:\n is_global = addr == \"Global\"\n samples_dict = self.samples[\"Local\"][basis]\n for slot in self._seq._schedule[channel]:\n if isinstance(slot.type, Pulse):\n for qubit in slot.targets:\n if qubit not in samples_dict:\n samples_dict[qubit] = prepare_dict()\n # We don't write samples for badly prep qubits\n if not self._bad_atoms[qubit]:\n write_samples(\n slot, samples_dict[qubit], is_global, qubit\n )\n self.samples[\"Local\"][basis] = samples_dict\n\n # Apply SLM mask if it was defined\n if self._seq._slm_mask_targets and self._seq._slm_mask_time:\n tf = self._seq._slm_mask_time[1]\n for qubit in self._seq._slm_mask_targets:\n for x in (\"amp\", \"det\", \"phase\"):\n self.samples[\"Local\"][basis][qubit][x][0:tf] = 0\n\n def build_operator(self, operations: Union[list, tuple]) -> qutip.Qobj:\n \"\"\"Creates an operator with non-trivial actions on some qubits.\n\n Takes as argument a list of tuples ``[(operator_1, qubits_1),\n (operator_2, qubits_2)...]``. Returns the operator given by the tensor\n product of {``operator_i`` applied on ``qubits_i``} and Id on the rest.\n ``(operator, 'global')`` returns the sum for all ``j`` of operator\n applied at ``qubit_j`` and identity elsewhere.\n\n Example for 4 qubits: ``[(Z, [1, 2]), (Y, [3])]`` returns `ZZYI`\n and ``[(X, 'global')]`` returns `XIII + IXII + IIXI + IIIX`\n\n Args:\n operations (list): List of tuples `(operator, qubits)`.\n `operator` can be a ``qutip.Quobj`` or a string key for\n ``self.op_matrix``. `qubits` is the list on which operator\n will be applied. The qubits can be passed as their\n index or their label in the register.\n\n Returns:\n qutip.Qobj: The final operator.\n \"\"\"\n op_list = [self.op_matrix[\"I\"] for j in range(self._size)]\n\n if not isinstance(operations, list):\n operations = [operations]\n\n for operator, qubits in operations:\n if qubits == \"global\":\n return sum(\n self.build_operator([(operator, [q_id])])\n for q_id in self._qdict\n )\n else:\n qubits_set = set(qubits)\n if len(qubits_set) < len(qubits):\n raise ValueError(\"Duplicate atom ids in argument list.\")\n if not qubits_set.issubset(self._qdict.keys()):\n raise ValueError(\n \"Invalid qubit names: \"\n f\"{qubits_set - self._qdict.keys()}\"\n )\n if isinstance(operator, str):\n try:\n operator = self.op_matrix[operator]\n except KeyError:\n raise ValueError(f\"{operator} is not a valid operator\")\n for qubit in qubits:\n k = self._qid_index[qubit]\n op_list[k] = operator\n return qutip.tensor(op_list)\n\n def _adapt_to_sampling_rate(self, full_array: np.ndarray) -> np.ndarray:\n \"\"\"Adapt list to correspond to sampling rate.\"\"\"\n indices = np.linspace(\n 0,\n self._tot_duration - 1,\n int(self._sampling_rate * self._tot_duration),\n dtype=int,\n )\n return cast(np.ndarray, full_array[indices])\n\n def _update_noise(self) -> None:\n \"\"\"Updates noise random parameters.\n\n Used at the start of each run. If SPAM isn't in chosen noises, all\n atoms are set to be correctly prepared.\n \"\"\"\n if \"SPAM\" in self.config.noise and self.config.eta > 0:\n dist = (\n np.random.uniform(size=len(self._qid_index))\n < self.config.spam_dict[\"eta\"]\n )\n self._bad_atoms = dict(zip(self._qid_index, dist))\n if \"doppler\" in self.config.noise:\n detune = np.random.normal(\n 0, self.config.doppler_sigma, size=len(self._qid_index)\n )\n self._doppler_detune = dict(zip(self._qid_index, detune))\n\n def _build_basis_and_op_matrices(self) -> None:\n \"\"\"Determine dimension, basis and projector operators.\"\"\"\n if self._interaction == \"XY\":\n self.basis_name = \"XY\"\n self.dim = 2\n basis = [\"u\", \"d\"]\n projectors = [\"uu\", \"du\", \"ud\", \"dd\"]\n else:\n # No samples => Empty dict entry => False\n if (\n not self.samples[\"Global\"][\"digital\"]\n and not self.samples[\"Local\"][\"digital\"]\n ):\n self.basis_name = \"ground-rydberg\"\n self.dim = 2\n basis = [\"r\", \"g\"]\n projectors = [\"gr\", \"rr\", \"gg\"]\n elif (\n not self.samples[\"Global\"][\"ground-rydberg\"]\n and not self.samples[\"Local\"][\"ground-rydberg\"]\n ):\n self.basis_name = \"digital\"\n self.dim = 2\n basis = [\"g\", \"h\"]\n projectors = [\"hg\", \"hh\", \"gg\"]\n else:\n self.basis_name = \"all\" # All three states\n self.dim = 3\n basis = [\"r\", \"g\", \"h\"]\n projectors = [\"gr\", \"hg\", \"rr\", \"gg\", \"hh\"]\n\n self.basis = {b: qutip.basis(self.dim, i) for i, b in enumerate(basis)}\n self.op_matrix = {\"I\": qutip.qeye(self.dim)}\n\n for proj in projectors:\n self.op_matrix[\"sigma_\" + proj] = (\n self.basis[proj[0]] * self.basis[proj[1]].dag()\n )\n\n def _construct_hamiltonian(self, update_and_extract: bool = True) -> None:\n \"\"\"Constructs the hamiltonian from the Sequence.\n\n Also builds qutip.Qobjs related to the Sequence if not built already,\n and refreshes potential noise parameters by drawing new at random.\n\n Args:\n update_and_extract(bool=True): Whether to update the noise\n parameters and extract the samples from the sequence.\n \"\"\"\n if update_and_extract:\n self._update_noise()\n self._extract_samples()\n if not hasattr(self, \"basis_name\"):\n self._build_basis_and_op_matrices()\n\n def make_vdw_term(q1: QubitId, q2: QubitId) -> qutip.Qobj:\n \"\"\"Construct the Van der Waals interaction Term.\n\n For each pair of qubits, calculate the distance between them,\n then assign the local operator \"sigma_rr\" at each pair.\n The units are given so that the coefficient includes a\n 1/hbar factor.\n \"\"\"\n dist = np.linalg.norm(self._qdict[q1] - self._qdict[q2])\n U = 0.5 * self._seq._device.interaction_coeff / dist ** 6\n return U * self.build_operator([(\"sigma_rr\", [q1, q2])])\n\n def make_xy_term(q1: QubitId, q2: QubitId) -> qutip.Qobj:\n \"\"\"Construct the XY interaction Term.\n\n For each pair of qubits, calculate the distance between them,\n then assign the local operator \"sigma_du * sigma_ud\" at each pair.\n The units are given so that the coefficient\n includes a 1/hbar factor.\n \"\"\"\n dist = np.linalg.norm(self._qdict[q1] - self._qdict[q2])\n coords_dim = len(self._qdict[q1])\n mag_norm = np.linalg.norm(self._seq.magnetic_field[:coords_dim])\n if mag_norm < 1e-8:\n cosine = 0.0\n else:\n cosine = (\n np.dot(\n (self._qdict[q1] - self._qdict[q2]),\n self._seq.magnetic_field[:coords_dim],\n )\n / (dist * mag_norm)\n )\n U = (\n 0.5\n * self._seq._device.interaction_coeff_xy\n * (1 - 3 * cosine ** 2)\n / dist ** 3\n )\n return U * self.build_operator(\n [(\"sigma_du\", [q1]), (\"sigma_ud\", [q2])]\n )\n\n def make_interaction_term(masked: bool = False) -> qutip.Qobj:\n if masked:\n # Calculate the total number of good, unmasked qubits\n effective_size = self._size - sum(self._bad_atoms.values())\n for q in self._seq._slm_mask_targets:\n if not self._bad_atoms[q]:\n effective_size -= 1\n if effective_size < 2:\n return 0 * self.build_operator([(\"I\", \"global\")])\n\n # make interaction term\n dipole_interaction = cast(qutip.Qobj, 0)\n for q1, q2 in itertools.combinations(self._qdict.keys(), r=2):\n if (\n self._bad_atoms[q1]\n or self._bad_atoms[q2]\n or (\n masked\n and (\n q1 in self._seq._slm_mask_targets\n or q2 in self._seq._slm_mask_targets\n )\n )\n ):\n continue\n\n if self._interaction == \"XY\":\n dipole_interaction += make_xy_term(q1, q2)\n else:\n dipole_interaction += make_vdw_term(q1, q2)\n return dipole_interaction\n\n def build_coeffs_ops(basis: str, addr: str) -> list[list]:\n \"\"\"Build coefficients and operators for the hamiltonian QobjEvo.\"\"\"\n samples = self.samples[addr][basis]\n operators = self.operators[addr][basis]\n # Choose operator names according to addressing:\n if basis == \"ground-rydberg\":\n op_ids = [\"sigma_gr\", \"sigma_rr\"]\n elif basis == \"digital\":\n op_ids = [\"sigma_hg\", \"sigma_gg\"]\n elif basis == \"XY\":\n op_ids = [\"sigma_du\", \"sigma_dd\"]\n\n terms = []\n if addr == \"Global\":\n coeffs = [\n 0.5 * samples[\"amp\"] * np.exp(-1j * samples[\"phase\"]),\n -0.5 * samples[\"det\"],\n ]\n for op_id, coeff in zip(op_ids, coeffs):\n if np.any(coeff != 0):\n # Build once global operators as they are needed\n if op_id not in operators:\n operators[op_id] = self.build_operator(\n [(op_id, \"global\")]\n )\n terms.append(\n [\n operators[op_id],\n self._adapt_to_sampling_rate(coeff),\n ]\n )\n elif addr == \"Local\":\n for q_id, samples_q in samples.items():\n if q_id not in operators:\n operators[q_id] = {}\n coeffs = [\n 0.5\n * samples_q[\"amp\"]\n * np.exp(-1j * samples_q[\"phase\"]),\n -0.5 * samples_q[\"det\"],\n ]\n for coeff, op_id in zip(coeffs, op_ids):\n if np.any(coeff != 0):\n if op_id not in operators[q_id]:\n operators[q_id][op_id] = self.build_operator(\n [(op_id, [q_id])]\n )\n terms.append(\n [\n operators[q_id][op_id],\n self._adapt_to_sampling_rate(coeff),\n ]\n )\n self.operators[addr][basis] = operators\n return terms\n\n qobj_list = []\n # Time independent term:\n effective_size = self._size - sum(self._bad_atoms.values())\n if self.basis_name != \"digital\" and effective_size > 1:\n # Build time-dependent or time-independent interaction term based\n # on whether an SLM mask was defined or not\n if self._seq._slm_mask_time:\n # Build an array of binary coefficients for the interaction\n # term of unmasked qubits\n coeff = np.ones(self._tot_duration)\n coeff[0 : self._seq._slm_mask_time[1]] = 0\n # Build the interaction term for unmasked qubits\n qobj_list = [\n [\n make_interaction_term(),\n self._adapt_to_sampling_rate(coeff),\n ]\n ]\n # Build the interaction term for masked qubits\n qobj_list += [\n [\n make_interaction_term(masked=True),\n self._adapt_to_sampling_rate(\n np.logical_not(coeff).astype(int)\n ),\n ]\n ]\n else:\n qobj_list = [make_interaction_term()]\n\n # Time dependent terms:\n for addr in self.samples:\n for basis in self.samples[addr]:\n if self.samples[addr][basis]:\n qobj_list += cast(list, build_coeffs_ops(basis, addr))\n\n if not qobj_list: # If qobj_list ends up empty\n qobj_list = [0 * self.build_operator([(\"I\", \"global\")])]\n\n ham = qutip.QobjEvo(qobj_list, tlist=self.sampling_times)\n ham = ham + ham.dag()\n ham.compress()\n self._hamiltonian = ham\n\n def get_hamiltonian(self, time: float) -> qutip.Qobj:\n \"\"\"Get the Hamiltonian created from the sequence at a fixed time.\n\n Args:\n time (float): The specific time at which we want to extract the\n Hamiltonian (in ns).\n\n Returns:\n qutip.Qobj: A new Qobj for the Hamiltonian with coefficients\n extracted from the effective sequence (determined by\n `self.sampling_rate`) at the specified time.\n \"\"\"\n if time > self._tot_duration:\n raise ValueError(\n f\"Provided time (`time` = {time}) must be \"\n \"less than or equal to the sequence duration \"\n f\"({self._tot_duration}).\"\n )\n if time < 0:\n raise ValueError(\n f\"Provided time (`time` = {time}) must be \"\n \"greater than or equal to 0.\"\n )\n return self._hamiltonian(time / 1000) # Creates new Qutip.Qobj\n\n # Run Simulation Evolution using Qutip\n def run(\n self,\n progress_bar: Optional[bool] = False,\n **options: qutip.solver.Options,\n ) -> SimulationResults:\n \"\"\"Simulates the sequence using QuTiP's solvers.\n\n Will return NoisyResults if the noise in the SimConfig requires it.\n Otherwise will return CoherentResults.\n\n Keyword Args:\n progress_bar (bool or None): If True, the progress bar of QuTiP's\n solver will be shown. If None or False, no text appears.\n options (qutip.solver.Options): If specified, will override\n SimConfig solver_options. If no `max_step` value is provided,\n an automatic one is calculated from the `Sequence`'s schedule\n (half of the shortest duration among pulses and delays).\n \"\"\"\n if \"max_step\" in options.keys():\n solv_ops = qutip.Options(**options)\n else:\n auto_max_step = 0.5 * (self._seq._min_pulse_duration() / 1000)\n solv_ops = qutip.Options(max_step=auto_max_step, **options)\n\n meas_errors: Optional[Mapping[str, float]] = None\n if \"SPAM\" in self.config.noise:\n meas_errors = {\n k: self.config.spam_dict[k]\n for k in (\"epsilon\", \"epsilon_prime\")\n }\n if self.config.eta > 0 and self.initial_state != qutip.tensor(\n [self.basis[\"g\"] for _ in range(self._size)]\n ):\n raise NotImplementedError(\n \"Can't combine state preparation errors with an initial \"\n \"state different from the ground.\"\n )\n\n def _run_solver() -> CoherentResults:\n \"\"\"Returns CoherentResults: Object containing evolution results.\"\"\"\n # Decide if progress bar will be fed to QuTiP solver\n if progress_bar is True:\n p_bar = True\n elif (progress_bar is False) or (progress_bar is None):\n p_bar = None\n else:\n raise ValueError(\"`progress_bar` must be a bool.\")\n\n if \"dephasing\" in self.config.noise:\n # temporary workaround due to a qutip bug when using mesolve\n liouvillian = qutip.liouvillian(\n self._hamiltonian, self._collapse_ops\n )\n result = qutip.mesolve(\n liouvillian,\n self.initial_state,\n self._eval_times_array,\n progress_bar=p_bar,\n options=solv_ops,\n )\n else:\n result = qutip.sesolve(\n self._hamiltonian,\n self.initial_state,\n self._eval_times_array,\n progress_bar=p_bar,\n options=solv_ops,\n )\n return CoherentResults(\n result.states,\n self._size,\n self.basis_name,\n self._eval_times_array,\n self._meas_basis,\n meas_errors,\n )\n\n # Check if noises ask for averaging over multiple runs:\n if set(self.config.noise).issubset({\"dephasing\", \"SPAM\"}):\n # If there is \"SPAM\", the preparation errors must be zero\n if \"SPAM\" not in self.config.noise or self.config.eta == 0:\n return _run_solver()\n\n else:\n # Stores the different initial configurations and frequency\n initial_configs = Counter(\n \"\".join(\n (\n np.random.uniform(size=len(self._qid_index))\n < self.config.eta\n )\n .astype(int)\n .astype(str) # Turns bool->int->str\n )\n for _ in range(self.config.runs)\n ).most_common()\n loop_runs = len(initial_configs)\n update_ham = False\n else:\n loop_runs = self.config.runs\n update_ham = True\n\n # Will return NoisyResults\n time_indices = range(len(self._eval_times_array))\n total_count = np.array([Counter() for _ in time_indices])\n # We run the system multiple times\n for i in range(loop_runs):\n if not update_ham:\n initial_state, reps = initial_configs[i]\n # We load the initial state manually\n self._bad_atoms = dict(\n zip(\n self._qid_index,\n np.array(list(initial_state)).astype(bool),\n )\n )\n else:\n reps = 1\n # At each run, new random noise: new Hamiltonian\n self._construct_hamiltonian(update_and_extract=update_ham)\n # Get CoherentResults instance from sequence with added noise:\n cleanres_noisyseq = _run_solver()\n # Extract statistics at eval time:\n total_count += np.array(\n [\n cleanres_noisyseq.sample_state(\n t, n_samples=self.config.samples_per_run * reps\n )\n for t in self._eval_times_array\n ]\n )\n n_measures = self.config.runs * self.config.samples_per_run\n total_run_prob = [\n Counter({k: v / n_measures for k, v in total_count[t].items()})\n for t in time_indices\n ]\n return NoisyResults(\n total_run_prob,\n self._size,\n self.basis_name,\n self._eval_times_array,\n n_measures,\n )\n" ]
[ [ "numpy.dot", "numpy.logical_not", "numpy.sqrt", "numpy.min", "numpy.arange", "numpy.linalg.norm", "matplotlib.pyplot.savefig", "numpy.ones", "numpy.sort", "numpy.max", "numpy.append", "numpy.random.normal", "numpy.any", "numpy.insert", "numpy.exp", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show" ] ]
artemyk/pandas
[ "a3cca397fd07a7ddd607d892aee9e307413c9856" ]
[ "pandas/core/ops.py" ]
[ "\"\"\"\nArithmetic operations for PandasObjects\n\nThis is not a public API.\n\"\"\"\n# necessary to enforce truediv in Python 2.X\nfrom __future__ import division\nimport operator\nimport numpy as np\nimport pandas as pd\nfrom pandas import compat, lib, tslib\nimport pandas.index as _index\nfrom pandas.util.decorators import Appender\nimport pandas.core.common as com\nimport pandas.computation.expressions as expressions\nfrom pandas.lib import isscalar\nfrom pandas.tslib import iNaT\nfrom pandas.core.common import(bind_method, is_list_like, notnull, isnull,\n _values_from_object, _maybe_match_name,\n needs_i8_conversion, is_datetimelike_v_numeric,\n is_integer_dtype, is_categorical_dtype, is_object_dtype,\n is_timedelta64_dtype, is_datetime64_dtype, is_bool_dtype)\n\n# -----------------------------------------------------------------------------\n# Functions that add arithmetic methods to objects, given arithmetic factory\n# methods\n\n\ndef _create_methods(arith_method, radd_func, comp_method, bool_method,\n use_numexpr, special=False, default_axis='columns'):\n # creates actual methods based upon arithmetic, comp and bool method\n # constructors.\n\n # NOTE: Only frame cares about default_axis, specifically: special methods\n # have default axis None, whereas flex methods have default axis 'columns'\n # if we're not using numexpr, then don't pass a str_rep\n if use_numexpr:\n op = lambda x: x\n else:\n op = lambda x: None\n if special:\n def names(x):\n if x[-1] == \"_\":\n return \"__%s_\" % x\n else:\n return \"__%s__\" % x\n else:\n names = lambda x: x\n radd_func = radd_func or operator.add\n # Inframe, all special methods have default_axis=None, flex methods have\n # default_axis set to the default (columns)\n new_methods = dict(\n add=arith_method(operator.add, names('add'), op('+'),\n default_axis=default_axis),\n radd=arith_method(radd_func, names('radd'), op('+'),\n default_axis=default_axis),\n sub=arith_method(operator.sub, names('sub'), op('-'),\n default_axis=default_axis),\n mul=arith_method(operator.mul, names('mul'), op('*'),\n default_axis=default_axis),\n truediv=arith_method(operator.truediv, names('truediv'), op('/'),\n truediv=True, fill_zeros=np.inf,\n default_axis=default_axis),\n floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'),\n default_axis=default_axis, fill_zeros=np.inf),\n # Causes a floating point exception in the tests when numexpr\n # enabled, so for now no speedup\n mod=arith_method(operator.mod, names('mod'), None,\n default_axis=default_axis, fill_zeros=np.nan),\n pow=arith_method(operator.pow, names('pow'), op('**'),\n default_axis=default_axis),\n # not entirely sure why this is necessary, but previously was included\n # so it's here to maintain compatibility\n rmul=arith_method(operator.mul, names('rmul'), op('*'),\n default_axis=default_axis, reversed=True),\n rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'),\n default_axis=default_axis, reversed=True),\n rtruediv=arith_method(lambda x, y: operator.truediv(y, x),\n names('rtruediv'), op('/'), truediv=True,\n fill_zeros=np.inf, default_axis=default_axis,\n reversed=True),\n rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x),\n names('rfloordiv'), op('//'),\n default_axis=default_axis, fill_zeros=np.inf,\n reversed=True),\n rpow=arith_method(lambda x, y: y ** x, names('rpow'), op('**'),\n default_axis=default_axis, reversed=True),\n rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'),\n default_axis=default_axis, fill_zeros=np.nan,\n reversed=True),\n )\n new_methods['div'] = new_methods['truediv']\n new_methods['rdiv'] = new_methods['rtruediv']\n\n # Comp methods never had a default axis set\n if comp_method:\n new_methods.update(dict(\n eq=comp_method(operator.eq, names('eq'), op('==')),\n ne=comp_method(operator.ne, names('ne'), op('!='), masker=True),\n lt=comp_method(operator.lt, names('lt'), op('<')),\n gt=comp_method(operator.gt, names('gt'), op('>')),\n le=comp_method(operator.le, names('le'), op('<=')),\n ge=comp_method(operator.ge, names('ge'), op('>=')),\n ))\n if bool_method:\n new_methods.update(dict(\n and_=bool_method(operator.and_, names('and_'), op('&')),\n or_=bool_method(operator.or_, names('or_'), op('|')),\n # For some reason ``^`` wasn't used in original.\n xor=bool_method(operator.xor, names('xor'), op('^')),\n rand_=bool_method(lambda x, y: operator.and_(y, x),\n names('rand_'), op('&')),\n ror_=bool_method(lambda x, y: operator.or_(y, x), names('ror_'), op('|')),\n rxor=bool_method(lambda x, y: operator.xor(y, x), names('rxor'), op('^'))\n ))\n\n new_methods = dict((names(k), v) for k, v in new_methods.items())\n return new_methods\n\n\ndef add_methods(cls, new_methods, force, select, exclude):\n if select and exclude:\n raise TypeError(\"May only pass either select or exclude\")\n methods = new_methods\n if select:\n select = set(select)\n methods = {}\n for key, method in new_methods.items():\n if key in select:\n methods[key] = method\n if exclude:\n for k in exclude:\n new_methods.pop(k, None)\n\n for name, method in new_methods.items():\n if force or name not in cls.__dict__:\n bind_method(cls, name, method)\n\n\n#----------------------------------------------------------------------\n# Arithmetic\ndef add_special_arithmetic_methods(cls, arith_method=None, radd_func=None,\n comp_method=None, bool_method=None,\n use_numexpr=True, force=False, select=None,\n exclude=None):\n \"\"\"\n Adds the full suite of special arithmetic methods (``__add__``,\n ``__sub__``, etc.) to the class.\n\n Parameters\n ----------\n arith_method : function (optional)\n factory for special arithmetic methods, with op string:\n f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)\n radd_func : function (optional)\n Possible replacement for ``operator.add`` for compatibility\n comp_method : function, optional,\n factory for rich comparison - signature: f(op, name, str_rep)\n use_numexpr : bool, default True\n whether to accelerate with numexpr, defaults to True\n force : bool, default False\n if False, checks whether function is defined **on ``cls.__dict__``**\n before defining if True, always defines functions on class base\n select : iterable of strings (optional)\n if passed, only sets functions with names in select\n exclude : iterable of strings (optional)\n if passed, will not set functions with names in exclude\n \"\"\"\n radd_func = radd_func or operator.add\n\n # in frame, special methods have default_axis = None, comp methods use\n # 'columns'\n\n new_methods = _create_methods(arith_method, radd_func, comp_method,\n bool_method, use_numexpr, default_axis=None,\n special=True)\n\n # inplace operators (I feel like these should get passed an `inplace=True`\n # or just be removed\n\n def _wrap_inplace_method(method):\n \"\"\"\n return an inplace wrapper for this method\n \"\"\"\n\n def f(self, other):\n result = method(self, other)\n\n # this makes sure that we are aligned like the input\n # we are updating inplace so we want to ignore is_copy\n self._update_inplace(result.reindex_like(self,copy=False)._data,\n verify_is_copy=False)\n\n return self\n return f\n\n new_methods.update(dict(\n __iadd__=_wrap_inplace_method(new_methods[\"__add__\"]),\n __isub__=_wrap_inplace_method(new_methods[\"__sub__\"]),\n __imul__=_wrap_inplace_method(new_methods[\"__mul__\"]),\n __itruediv__=_wrap_inplace_method(new_methods[\"__truediv__\"]),\n __ipow__=_wrap_inplace_method(new_methods[\"__pow__\"]),\n ))\n if not compat.PY3:\n new_methods[\"__idiv__\"] = new_methods[\"__div__\"]\n\n add_methods(cls, new_methods=new_methods, force=force, select=select,\n exclude=exclude)\n\n\ndef add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None,\n flex_comp_method=None, flex_bool_method=None,\n use_numexpr=True, force=False, select=None,\n exclude=None):\n \"\"\"\n Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)\n to the class.\n\n Parameters\n ----------\n flex_arith_method : function\n factory for special arithmetic methods, with op string:\n f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)\n radd_func : function (optional)\n Possible replacement for ``lambda x, y: operator.add(y, x)`` for\n compatibility\n flex_comp_method : function, optional,\n factory for rich comparison - signature: f(op, name, str_rep)\n use_numexpr : bool, default True\n whether to accelerate with numexpr, defaults to True\n force : bool, default False\n if False, checks whether function is defined **on ``cls.__dict__``**\n before defining if True, always defines functions on class base\n select : iterable of strings (optional)\n if passed, only sets functions with names in select\n exclude : iterable of strings (optional)\n if passed, will not set functions with names in exclude\n \"\"\"\n radd_func = radd_func or (lambda x, y: operator.add(y, x))\n # in frame, default axis is 'columns', doesn't matter for series and panel\n new_methods = _create_methods(\n flex_arith_method, radd_func, flex_comp_method, flex_bool_method,\n use_numexpr, default_axis='columns', special=False)\n new_methods.update(dict(\n multiply=new_methods['mul'],\n subtract=new_methods['sub'],\n divide=new_methods['div']\n ))\n # opt out of bool flex methods for now\n for k in ('ror_', 'rxor', 'rand_'):\n if k in new_methods:\n new_methods.pop(k)\n\n add_methods(cls, new_methods=new_methods, force=force, select=select,\n exclude=exclude)\n\n\nclass _TimeOp(object):\n\n \"\"\"\n Wrapper around Series datetime/time/timedelta arithmetic operations.\n Generally, you should use classmethod ``maybe_convert_for_time_op`` as an\n entry point.\n \"\"\"\n fill_value = iNaT\n wrap_results = staticmethod(lambda x: x)\n dtype = None\n\n def __init__(self, left, right, name):\n self.name = name\n\n # need to make sure that we are aligning the data\n if isinstance(left, pd.Series) and isinstance(right, pd.Series):\n left, right = left.align(right,copy=False)\n\n self.left = left\n self.right = right\n lvalues = self._convert_to_array(left, name=name)\n rvalues = self._convert_to_array(right, name=name, other=lvalues)\n\n self.is_timedelta_lhs = is_timedelta64_dtype(left)\n self.is_datetime_lhs = is_datetime64_dtype(left)\n self.is_integer_lhs = left.dtype.kind in ['i', 'u']\n self.is_datetime_rhs = is_datetime64_dtype(rvalues)\n self.is_timedelta_rhs = is_timedelta64_dtype(rvalues)\n self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')\n\n self._validate()\n\n self._convert_for_datetime(lvalues, rvalues)\n\n def _validate(self):\n # timedelta and integer mul/div\n\n if (self.is_timedelta_lhs and self.is_integer_rhs) or\\\n (self.is_integer_lhs and self.is_timedelta_rhs):\n\n if self.name not in ('__truediv__', '__div__', '__mul__'):\n raise TypeError(\"can only operate on a timedelta and an \"\n \"integer for division, but the operator [%s]\"\n \"was passed\" % self.name)\n\n # 2 datetimes\n elif self.is_datetime_lhs and self.is_datetime_rhs:\n if self.name != '__sub__':\n raise TypeError(\"can only operate on a datetimes for\"\n \" subtraction, but the operator [%s] was\"\n \" passed\" % self.name)\n\n # 2 timedeltas\n elif self.is_timedelta_lhs and self.is_timedelta_rhs:\n\n if self.name not in ('__div__', '__truediv__', '__add__',\n '__sub__'):\n raise TypeError(\"can only operate on a timedeltas for \"\n \"addition, subtraction, and division, but the\"\n \" operator [%s] was passed\" % self.name)\n\n # datetime and timedelta\n elif self.is_datetime_lhs and self.is_timedelta_rhs:\n\n if self.name not in ('__add__', '__sub__'):\n raise TypeError(\"can only operate on a datetime with a rhs of\"\n \" a timedelta for addition and subtraction, \"\n \" but the operator [%s] was passed\" %\n self.name)\n\n elif self.is_timedelta_lhs and self.is_datetime_rhs:\n\n if self.name != '__add__':\n raise TypeError(\"can only operate on a timedelta and\"\n \" a datetime for addition, but the operator\"\n \" [%s] was passed\" % self.name)\n else:\n raise TypeError('cannot operate on a series with out a rhs '\n 'of a series/ndarray of type datetime64[ns] '\n 'or a timedelta')\n\n def _convert_to_array(self, values, name=None, other=None):\n \"\"\"converts values to ndarray\"\"\"\n from pandas.tseries.timedeltas import to_timedelta\n\n coerce = True\n if not is_list_like(values):\n values = np.array([values])\n inferred_type = lib.infer_dtype(values)\n\n if inferred_type in ('datetime64', 'datetime', 'date', 'time'):\n # if we have a other of timedelta, but use pd.NaT here we\n # we are in the wrong path\n if (other is not None and other.dtype == 'timedelta64[ns]' and\n all(isnull(v) for v in values)):\n values = np.empty(values.shape, dtype=other.dtype)\n values[:] = iNaT\n\n # a datelike\n elif isinstance(values, pd.DatetimeIndex):\n values = values.to_series()\n elif not (isinstance(values, (np.ndarray, pd.Series)) and\n is_datetime64_dtype(values)):\n values = tslib.array_to_datetime(values)\n elif inferred_type in ('timedelta', 'timedelta64'):\n # have a timedelta, convert to to ns here\n values = to_timedelta(values, coerce=coerce)\n elif inferred_type == 'integer':\n # py3 compat where dtype is 'm' but is an integer\n if values.dtype.kind == 'm':\n values = values.astype('timedelta64[ns]')\n elif isinstance(values, pd.PeriodIndex):\n values = values.to_timestamp().to_series()\n elif name not in ('__truediv__', '__div__', '__mul__'):\n raise TypeError(\"incompatible type for a datetime/timedelta \"\n \"operation [{0}]\".format(name))\n elif isinstance(values[0], pd.DateOffset):\n # handle DateOffsets\n os = np.array([getattr(v, 'delta', None) for v in values])\n mask = isnull(os)\n if mask.any():\n raise TypeError(\"cannot use a non-absolute DateOffset in \"\n \"datetime/timedelta operations [{0}]\".format(\n ', '.join([com.pprint_thing(v)\n for v in values[mask]])))\n values = to_timedelta(os, coerce=coerce)\n elif inferred_type == 'floating':\n\n # all nan, so ok, use the other dtype (e.g. timedelta or datetime)\n if isnull(values).all():\n values = np.empty(values.shape, dtype=other.dtype)\n values[:] = iNaT\n else:\n raise TypeError(\n 'incompatible type [{0}] for a datetime/timedelta '\n 'operation'.format(np.array(values).dtype))\n else:\n raise TypeError(\"incompatible type [{0}] for a datetime/timedelta\"\n \" operation\".format(np.array(values).dtype))\n\n return values\n\n def _convert_for_datetime(self, lvalues, rvalues):\n mask = None\n # datetimes require views\n if self.is_datetime_lhs or self.is_datetime_rhs:\n # datetime subtraction means timedelta\n if self.is_datetime_lhs and self.is_datetime_rhs:\n self.dtype = 'timedelta64[ns]'\n else:\n self.dtype = 'datetime64[ns]'\n mask = isnull(lvalues) | isnull(rvalues)\n lvalues = lvalues.view(np.int64)\n rvalues = rvalues.view(np.int64)\n\n # otherwise it's a timedelta\n else:\n self.dtype = 'timedelta64[ns]'\n mask = isnull(lvalues) | isnull(rvalues)\n lvalues = lvalues.astype(np.int64)\n rvalues = rvalues.astype(np.int64)\n\n # time delta division -> unit less\n # integer gets converted to timedelta in np < 1.6\n if (self.is_timedelta_lhs and self.is_timedelta_rhs) and\\\n not self.is_integer_rhs and\\\n not self.is_integer_lhs and\\\n self.name in ('__div__', '__truediv__'):\n self.dtype = 'float64'\n self.fill_value = np.nan\n lvalues = lvalues.astype(np.float64)\n rvalues = rvalues.astype(np.float64)\n\n # if we need to mask the results\n if mask is not None:\n if mask.any():\n def f(x):\n x = np.array(x, dtype=self.dtype)\n np.putmask(x, mask, self.fill_value)\n return x\n self.wrap_results = f\n self.lvalues = lvalues\n self.rvalues = rvalues\n\n @classmethod\n def maybe_convert_for_time_op(cls, left, right, name):\n \"\"\"\n if ``left`` and ``right`` are appropriate for datetime arithmetic with\n operation ``name``, processes them and returns a ``_TimeOp`` object\n that stores all the required values. Otherwise, it will generate\n either a ``NotImplementedError`` or ``None``, indicating that the\n operation is unsupported for datetimes (e.g., an unsupported r_op) or\n that the data is not the right type for time ops.\n \"\"\"\n # decide if we can do it\n is_timedelta_lhs = is_timedelta64_dtype(left)\n is_datetime_lhs = is_datetime64_dtype(left)\n if not (is_datetime_lhs or is_timedelta_lhs):\n return None\n\n # rops are allowed. No need for special checks, just strip off\n # r part.\n if name.startswith('__r'):\n name = \"__\" + name[3:]\n return cls(left, right, name)\n\n\ndef _arith_method_SERIES(op, name, str_rep, fill_zeros=None,\n default_axis=None, **eval_kwargs):\n \"\"\"\n Wrapper function for Series arithmetic operations, to avoid\n code duplication.\n \"\"\"\n def na_op(x, y):\n try:\n result = expressions.evaluate(op, str_rep, x, y,\n raise_on_error=True, **eval_kwargs)\n except TypeError:\n if isinstance(y, (np.ndarray, pd.Series, pd.Index)):\n dtype = np.find_common_type([x.dtype, y.dtype], [])\n result = np.empty(x.size, dtype=dtype)\n mask = notnull(x) & notnull(y)\n result[mask] = op(x[mask], _values_from_object(y[mask]))\n elif isinstance(x, np.ndarray):\n result = np.empty(len(x), dtype=x.dtype)\n mask = notnull(x)\n result[mask] = op(x[mask], y)\n else:\n raise TypeError(\"{typ} cannot perform the operation {op}\".format(typ=type(x).__name__,op=str_rep))\n\n result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)\n\n result = com._fill_zeros(result, x, y, name, fill_zeros)\n return result\n\n def wrapper(left, right, name=name):\n\n if isinstance(right, pd.DataFrame):\n return NotImplemented\n\n time_converted = _TimeOp.maybe_convert_for_time_op(left, right, name)\n\n if time_converted is None:\n lvalues, rvalues = left, right\n dtype = None\n wrap_results = lambda x: x\n elif time_converted == NotImplemented:\n return NotImplemented\n else:\n left, right = time_converted.left, time_converted.right\n lvalues, rvalues = time_converted.lvalues, time_converted.rvalues\n dtype = time_converted.dtype\n wrap_results = time_converted.wrap_results\n\n if isinstance(rvalues, pd.Series):\n rindex = getattr(rvalues,'index',rvalues)\n name = _maybe_match_name(left, rvalues)\n lvalues = getattr(lvalues, 'values', lvalues)\n rvalues = getattr(rvalues, 'values', rvalues)\n if left.index.equals(rindex):\n index = left.index\n else:\n index, lidx, ridx = left.index.join(rindex, how='outer',\n return_indexers=True)\n\n if lidx is not None:\n lvalues = com.take_1d(lvalues, lidx)\n\n if ridx is not None:\n rvalues = com.take_1d(rvalues, ridx)\n\n arr = na_op(lvalues, rvalues)\n\n return left._constructor(wrap_results(arr), index=index,\n name=name, dtype=dtype)\n else:\n # scalars\n if hasattr(lvalues, 'values'):\n lvalues = lvalues.values\n return left._constructor(wrap_results(na_op(lvalues, rvalues)),\n index=left.index, name=left.name,\n dtype=dtype)\n return wrapper\n\n\ndef _comp_method_SERIES(op, name, str_rep, masker=False):\n \"\"\"\n Wrapper function for Series arithmetic operations, to avoid\n code duplication.\n \"\"\"\n def na_op(x, y):\n\n # dispatch to the categorical if we have a categorical\n # in either operand\n if is_categorical_dtype(x):\n return op(x,y)\n elif is_categorical_dtype(y) and not isscalar(y):\n return op(y,x)\n\n if is_object_dtype(x.dtype):\n if isinstance(y, list):\n y = lib.list_to_object_array(y)\n\n if isinstance(y, (np.ndarray, pd.Series)):\n if not is_object_dtype(y.dtype):\n result = lib.vec_compare(x, y.astype(np.object_), op)\n else:\n result = lib.vec_compare(x, y, op)\n else:\n result = lib.scalar_compare(x, y, op)\n else:\n\n # we want to compare like types\n # we only want to convert to integer like if\n # we are not NotImplemented, otherwise\n # we would allow datetime64 (but viewed as i8) against\n # integer comparisons\n if is_datetimelike_v_numeric(x, y):\n raise TypeError(\"invalid type comparison\")\n\n # numpy does not like comparisons vs None\n if isscalar(y) and isnull(y):\n y = np.nan\n\n # we have a datetime/timedelta and may need to convert\n mask = None\n if needs_i8_conversion(x) or (not isscalar(y) and needs_i8_conversion(y)):\n\n if isscalar(y):\n y = _index.convert_scalar(x,_values_from_object(y))\n else:\n y = y.view('i8')\n\n if name == '__ne__':\n mask = notnull(x)\n else:\n mask = isnull(x)\n\n x = x.view('i8')\n\n try:\n result = getattr(x, name)(y)\n if result is NotImplemented:\n raise TypeError(\"invalid type comparison\")\n except AttributeError:\n result = op(x, y)\n\n if mask is not None and mask.any():\n result[mask] = False\n\n return result\n\n def wrapper(self, other, axis=None):\n # Validate the axis parameter\n if axis is not None:\n self._get_axis_number(axis)\n\n if isinstance(other, pd.Series):\n name = _maybe_match_name(self, other)\n if len(self) != len(other):\n raise ValueError('Series lengths must match to compare')\n return self._constructor(na_op(self.values, other.values),\n index=self.index, name=name)\n elif isinstance(other, pd.DataFrame): # pragma: no cover\n return NotImplemented\n elif isinstance(other, (np.ndarray, pd.Index)):\n if len(self) != len(other):\n raise ValueError('Lengths must match to compare')\n return self._constructor(na_op(self.values, np.asarray(other)),\n index=self.index).__finalize__(self)\n elif isinstance(other, pd.Categorical):\n if not is_categorical_dtype(self):\n msg = \"Cannot compare a Categorical for op {op} with Series of dtype {typ}.\\n\"\\\n \"If you want to compare values, use 'series <op> np.asarray(other)'.\"\n raise TypeError(msg.format(op=op,typ=self.dtype))\n\n\n if is_categorical_dtype(self):\n # cats are a special case as get_values() would return an ndarray, which would then\n # not take categories ordering into account\n # we can go directly to op, as the na_op would just test again and dispatch to it.\n res = op(self.values, other)\n else:\n values = self.get_values()\n if is_list_like(other):\n other = np.asarray(other)\n\n res = na_op(values, other)\n if isscalar(res):\n raise TypeError('Could not compare %s type with Series'\n % type(other))\n\n # always return a full value series here\n res = _values_from_object(res)\n\n res = pd.Series(res, index=self.index, name=self.name,\n dtype='bool')\n return res\n return wrapper\n\n\ndef _bool_method_SERIES(op, name, str_rep):\n \"\"\"\n Wrapper function for Series arithmetic operations, to avoid\n code duplication.\n \"\"\"\n def na_op(x, y):\n try:\n result = op(x, y)\n except TypeError:\n if isinstance(y, list):\n y = lib.list_to_object_array(y)\n\n if isinstance(y, (np.ndarray, pd.Series)):\n if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):\n result = op(x, y) # when would this be hit?\n else:\n x = com._ensure_object(x)\n y = com._ensure_object(y)\n result = lib.vec_binop(x, y, op)\n else:\n try:\n\n # let null fall thru\n if not isnull(y):\n y = bool(y)\n result = lib.scalar_binop(x, y, op)\n except:\n raise TypeError(\"cannot compare a dtyped [{0}] array with \"\n \"a scalar of type [{1}]\".format(\n x.dtype, type(y).__name__))\n\n return result\n\n def wrapper(self, other):\n is_self_int_dtype = is_integer_dtype(self.dtype)\n\n fill_int = lambda x: x.fillna(0)\n fill_bool = lambda x: x.fillna(False).astype(bool)\n\n if isinstance(other, pd.Series):\n name = _maybe_match_name(self, other)\n other = other.reindex_like(self)\n is_other_int_dtype = is_integer_dtype(other.dtype)\n other = fill_int(other) if is_other_int_dtype else fill_bool(other)\n\n filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool\n return filler(self._constructor(na_op(self.values, other.values),\n index=self.index,\n name=name))\n\n elif isinstance(other, pd.DataFrame):\n return NotImplemented\n\n else:\n # scalars, list, tuple, np.array\n filler = fill_int if is_self_int_dtype and is_integer_dtype(np.asarray(other)) else fill_bool\n return filler(self._constructor(na_op(self.values, other),\n index=self.index)).__finalize__(self)\n\n return wrapper\n\n\ndef _radd_compat(left, right):\n radd = lambda x, y: y + x\n # GH #353, NumPy 1.5.1 workaround\n try:\n output = radd(left, right)\n except TypeError:\n raise\n\n return output\n\n_op_descriptions = {'add': {'op': '+', 'desc': 'Addition', 'reversed': False, 'reverse': 'radd'},\n 'sub': {'op': '-', 'desc': 'Subtraction', 'reversed': False, 'reverse': 'rsub'},\n 'mul': {'op': '*', 'desc': 'Multiplication', 'reversed': False, 'reverse': 'rmul'},\n 'mod': {'op': '%', 'desc': 'Modulo', 'reversed': False, 'reverse': 'rmod'},\n 'pow': {'op': '**', 'desc': 'Exponential power', 'reversed': False, 'reverse': 'rpow'},\n 'truediv': {'op': '/', 'desc': 'Floating division', 'reversed': False, 'reverse': 'rtruediv'},\n 'floordiv': {'op': '//', 'desc': 'Integer division', 'reversed': False, 'reverse': 'rfloordiv'}}\n\n_op_names = list(_op_descriptions.keys())\nfor k in _op_names:\n reverse_op = _op_descriptions[k]['reverse']\n _op_descriptions[reverse_op] = _op_descriptions[k].copy()\n _op_descriptions[reverse_op]['reversed'] = True\n _op_descriptions[reverse_op]['reverse'] = k\n\ndef _flex_method_SERIES(op, name, str_rep, default_axis=None,\n fill_zeros=None, **eval_kwargs):\n op_name = name.replace('__', '')\n op_desc = _op_descriptions[op_name]\n if op_desc['reversed']:\n equiv = 'other ' + op_desc['op'] + ' series'\n else:\n equiv = 'series ' + op_desc['op'] + ' other'\n\n doc = \"\"\"\n %s of series and other, element-wise (binary operator `%s`).\n\n Equivalent to ``%s``, but with support to substitute a fill_value for\n missing data in one of the inputs.\n\n Parameters\n ----------\n other: Series or scalar value\n fill_value : None or float value, default None (NaN)\n Fill missing (NaN) values with this value. If both Series are\n missing, the result will be missing\n level : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level\n\n Returns\n -------\n result : Series\n\n See also\n --------\n Series.%s\n \"\"\" % (op_desc['desc'], op_name, equiv, op_desc['reverse'])\n\n @Appender(doc)\n def flex_wrapper(self, other, level=None, fill_value=None, axis=0):\n # validate axis\n self._get_axis_number(axis)\n if isinstance(other, pd.Series):\n return self._binop(other, op, level=level, fill_value=fill_value)\n elif isinstance(other, (np.ndarray, pd.Series, list, tuple)):\n if len(other) != len(self):\n raise ValueError('Lengths must be equal')\n return self._binop(self._constructor(other, self.index), op,\n level=level, fill_value=fill_value)\n else:\n return self._constructor(op(self.values, other),\n self.index).__finalize__(self)\n\n flex_wrapper.__name__ = name\n return flex_wrapper\n\nseries_flex_funcs = dict(flex_arith_method=_flex_method_SERIES,\n radd_func=_radd_compat,\n flex_comp_method=_comp_method_SERIES)\n\nseries_special_funcs = dict(arith_method=_arith_method_SERIES,\n radd_func=_radd_compat,\n comp_method=_comp_method_SERIES,\n bool_method=_bool_method_SERIES)\n\n\n_arith_doc_FRAME = \"\"\"\nBinary operator %s with support to substitute a fill_value for missing data in\none of the inputs\n\nParameters\n----------\nother : Series, DataFrame, or constant\naxis : {0, 1, 'index', 'columns'}\n For Series input, axis to match Series index on\nfill_value : None or float value, default None\n Fill missing (NaN) values with this value. If both DataFrame locations are\n missing, the result will be missing\nlevel : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level\n\nNotes\n-----\nMismatched indices will be unioned together\n\nReturns\n-------\nresult : DataFrame\n\"\"\"\n\n\ndef _arith_method_FRAME(op, name, str_rep=None, default_axis='columns',\n fill_zeros=None, **eval_kwargs):\n def na_op(x, y):\n try:\n result = expressions.evaluate(\n op, str_rep, x, y, raise_on_error=True, **eval_kwargs)\n except TypeError:\n xrav = x.ravel()\n if isinstance(y, (np.ndarray, pd.Series)):\n dtype = np.find_common_type([x.dtype, y.dtype], [])\n result = np.empty(x.size, dtype=dtype)\n yrav = y.ravel()\n mask = notnull(xrav) & notnull(yrav)\n xrav = xrav[mask]\n yrav = yrav[mask]\n if np.prod(xrav.shape) and np.prod(yrav.shape):\n result[mask] = op(xrav, yrav)\n elif hasattr(x,'size'):\n result = np.empty(x.size, dtype=x.dtype)\n mask = notnull(xrav)\n xrav = xrav[mask]\n if np.prod(xrav.shape):\n result[mask] = op(xrav, y)\n else:\n raise TypeError(\"cannot perform operation {op} between objects \"\n \"of type {x} and {y}\".format(op=name,x=type(x),y=type(y)))\n\n result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)\n result = result.reshape(x.shape)\n\n result = com._fill_zeros(result, x, y, name, fill_zeros)\n\n return result\n\n if name in _op_descriptions:\n op_name = name.replace('__', '')\n op_desc = _op_descriptions[op_name]\n if op_desc['reversed']:\n equiv = 'other ' + op_desc['op'] + ' dataframe'\n else:\n equiv = 'dataframe ' + op_desc['op'] + ' other'\n\n doc = \"\"\"\n %s of dataframe and other, element-wise (binary operator `%s`).\n\n Equivalent to ``%s``, but with support to substitute a fill_value for\n missing data in one of the inputs.\n\n Parameters\n ----------\n other : Series, DataFrame, or constant\n axis : {0, 1, 'index', 'columns'}\n For Series input, axis to match Series index on\n fill_value : None or float value, default None\n Fill missing (NaN) values with this value. If both DataFrame locations are\n missing, the result will be missing\n level : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level\n\n Notes\n -----\n Mismatched indices will be unioned together\n\n Returns\n -------\n result : DataFrame\n\n See also\n --------\n DataFrame.%s\n \"\"\" % (op_desc['desc'], op_name, equiv, op_desc['reverse'])\n else:\n doc = _arith_doc_FRAME % name\n\n @Appender(doc)\n def f(self, other, axis=default_axis, level=None, fill_value=None):\n if isinstance(other, pd.DataFrame): # Another DataFrame\n return self._combine_frame(other, na_op, fill_value, level)\n elif isinstance(other, pd.Series):\n return self._combine_series(other, na_op, fill_value, axis, level)\n elif isinstance(other, (list, tuple)):\n if axis is not None and self._get_axis_name(axis) == 'index':\n # TODO: Get all of these to use _constructor_sliced\n # casted = self._constructor_sliced(other, index=self.index)\n casted = pd.Series(other, index=self.index)\n else:\n # casted = self._constructor_sliced(other, index=self.columns)\n casted = pd.Series(other, index=self.columns)\n return self._combine_series(casted, na_op, fill_value, axis, level)\n elif isinstance(other, np.ndarray) and other.ndim: # skips np scalar\n if other.ndim == 1:\n if axis is not None and self._get_axis_name(axis) == 'index':\n # casted = self._constructor_sliced(other,\n # index=self.index)\n casted = pd.Series(other, index=self.index)\n else:\n # casted = self._constructor_sliced(other,\n # index=self.columns)\n casted = pd.Series(other, index=self.columns)\n return self._combine_series(casted, na_op, fill_value,\n axis, level)\n elif other.ndim == 2:\n # casted = self._constructor(other, index=self.index,\n # columns=self.columns)\n casted = pd.DataFrame(other, index=self.index,\n columns=self.columns)\n return self._combine_frame(casted, na_op, fill_value, level)\n else:\n raise ValueError(\"Incompatible argument shape: %s\" %\n (other.shape, ))\n else:\n return self._combine_const(other, na_op)\n\n f.__name__ = name\n\n return f\n\n\n# Masker unused for now\ndef _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns',\n masker=False):\n\n def na_op(x, y):\n try:\n result = op(x, y)\n except TypeError:\n xrav = x.ravel()\n result = np.empty(x.size, dtype=x.dtype)\n if isinstance(y, (np.ndarray, pd.Series)):\n yrav = y.ravel()\n mask = notnull(xrav) & notnull(yrav)\n result[mask] = op(np.array(list(xrav[mask])),\n np.array(list(yrav[mask])))\n else:\n mask = notnull(xrav)\n result[mask] = op(np.array(list(xrav[mask])), y)\n\n if op == operator.ne: # pragma: no cover\n np.putmask(result, ~mask, True)\n else:\n np.putmask(result, ~mask, False)\n result = result.reshape(x.shape)\n\n return result\n\n @Appender('Wrapper for flexible comparison methods %s' % name)\n def f(self, other, axis=default_axis, level=None):\n if isinstance(other, pd.DataFrame): # Another DataFrame\n return self._flex_compare_frame(other, na_op, str_rep, level)\n\n elif isinstance(other, pd.Series):\n return self._combine_series(other, na_op, None, axis, level)\n\n elif isinstance(other, (list, tuple)):\n if axis is not None and self._get_axis_name(axis) == 'index':\n casted = pd.Series(other, index=self.index)\n else:\n casted = pd.Series(other, index=self.columns)\n\n return self._combine_series(casted, na_op, None, axis, level)\n\n elif isinstance(other, np.ndarray):\n if other.ndim == 1:\n if axis is not None and self._get_axis_name(axis) == 'index':\n casted = pd.Series(other, index=self.index)\n else:\n casted = pd.Series(other, index=self.columns)\n\n return self._combine_series(casted, na_op, None, axis, level)\n\n elif other.ndim == 2:\n casted = pd.DataFrame(other, index=self.index,\n columns=self.columns)\n\n return self._flex_compare_frame(casted, na_op, str_rep, level)\n\n else:\n raise ValueError(\"Incompatible argument shape: %s\" %\n (other.shape, ))\n\n else:\n return self._combine_const(other, na_op)\n\n f.__name__ = name\n\n return f\n\n\ndef _comp_method_FRAME(func, name, str_rep, masker=False):\n @Appender('Wrapper for comparison method %s' % name)\n def f(self, other):\n if isinstance(other, pd.DataFrame): # Another DataFrame\n return self._compare_frame(other, func, str_rep)\n elif isinstance(other, pd.Series):\n return self._combine_series_infer(other, func)\n else:\n\n # straight boolean comparisions we want to allow all columns\n # (regardless of dtype to pass thru) See #4537 for discussion.\n res = self._combine_const(other, func, raise_on_error=False)\n return res.fillna(True).astype(bool)\n\n f.__name__ = name\n\n return f\n\n\nframe_flex_funcs = dict(flex_arith_method=_arith_method_FRAME,\n radd_func=_radd_compat,\n flex_comp_method=_flex_comp_method_FRAME)\n\n\nframe_special_funcs = dict(arith_method=_arith_method_FRAME,\n radd_func=_radd_compat,\n comp_method=_comp_method_FRAME,\n bool_method=_arith_method_FRAME)\n\n\ndef _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None,\n default_axis=None, **eval_kwargs):\n # copied from Series na_op above, but without unnecessary branch for\n # non-scalar\n def na_op(x, y):\n try:\n result = expressions.evaluate(op, str_rep, x, y,\n raise_on_error=True, **eval_kwargs)\n except TypeError:\n\n # TODO: might need to find_common_type here?\n result = np.empty(len(x), dtype=x.dtype)\n mask = notnull(x)\n result[mask] = op(x[mask], y)\n result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)\n\n result = com._fill_zeros(result, x, y, name, fill_zeros)\n return result\n\n # work only for scalars\n def f(self, other):\n if not isscalar(other):\n raise ValueError('Simple arithmetic with %s can only be '\n 'done with scalar values' %\n self._constructor.__name__)\n\n return self._combine(other, op)\n f.__name__ = name\n return f\n\n\ndef _comp_method_PANEL(op, name, str_rep=None, masker=False):\n\n def na_op(x, y):\n try:\n result = expressions.evaluate(op, str_rep, x, y,\n raise_on_error=True)\n except TypeError:\n xrav = x.ravel()\n result = np.empty(x.size, dtype=bool)\n if isinstance(y, np.ndarray):\n yrav = y.ravel()\n mask = notnull(xrav) & notnull(yrav)\n result[mask] = op(np.array(list(xrav[mask])),\n np.array(list(yrav[mask])))\n else:\n mask = notnull(xrav)\n result[mask] = op(np.array(list(xrav[mask])), y)\n\n if op == operator.ne: # pragma: no cover\n np.putmask(result, ~mask, True)\n else:\n np.putmask(result, ~mask, False)\n result = result.reshape(x.shape)\n\n return result\n\n @Appender('Wrapper for comparison method %s' % name)\n def f(self, other):\n if isinstance(other, self._constructor):\n return self._compare_constructor(other, na_op)\n elif isinstance(other, (self._constructor_sliced, pd.DataFrame,\n pd.Series)):\n raise Exception(\"input needs alignment for this object [%s]\" %\n self._constructor)\n else:\n return self._combine_const(other, na_op)\n\n f.__name__ = name\n\n return f\n\n\npanel_special_funcs = dict(arith_method=_arith_method_PANEL,\n comp_method=_comp_method_PANEL,\n bool_method=_arith_method_PANEL)\n" ]
[ [ "pandas.core.common.is_list_like", "pandas.Series", "pandas.core.common.is_integer_dtype", "pandas.core.common.is_categorical_dtype", "pandas.lib.scalar_compare", "numpy.asarray", "pandas.tslib.array_to_datetime", "pandas.core.common.is_bool_dtype", "pandas.core.common.bind_method", "pandas.DataFrame", "pandas.core.common.is_timedelta64_dtype", "pandas.core.common._ensure_object", "pandas.core.common.notnull", "pandas.core.common._maybe_upcast_putmask", "pandas.lib.vec_binop", "pandas.lib.scalar_binop", "pandas.core.common.needs_i8_conversion", "pandas.lib.vec_compare", "pandas.lib.isscalar", "pandas.core.common.take_1d", "pandas.core.common._maybe_match_name", "pandas.util.decorators.Appender", "pandas.core.common.is_datetimelike_v_numeric", "pandas.core.common.pprint_thing", "pandas.core.common.is_object_dtype", "numpy.putmask", "pandas.core.common._fill_zeros", "pandas.lib.list_to_object_array", "numpy.find_common_type", "numpy.array", "pandas.computation.expressions.evaluate", "numpy.empty", "pandas.lib.infer_dtype", "pandas.core.common._values_from_object", "pandas.core.common.is_datetime64_dtype", "pandas.core.common.isnull", "numpy.prod", "pandas.tseries.timedeltas.to_timedelta" ] ]
aldakata/ClassConditionalC2D
[ "dd73e1d4d5f0f82438340211e3c479dbd16b8ffc" ]
[ "main_cifar.py" ]
[ "from __future__ import print_function\n\nimport argparse\nimport os, sys\nimport tables\nimport random\n\nimport numpy as np\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nfrom torchvision import models\n\nfrom dataloaders import dataloader_cifar as dataloader\nfrom models import bit_models\nfrom models.PreResNet import *\nfrom models.resnet import SupCEResNet\nfrom train_cifar import run_train_loop\nfrom train_cifar_uncertainty import run_train_loop_mcdo\nfrom train_cifar_uncertainty_MCBN import run_train_loop_mcbn\n\nfrom constants import GMM, CCGMM, OR_CCGMM, AND_CCGMM, DIVISION_OPTIONS\n\nfrom processing_utils import load_net_optimizer_from_ckpt_to_device, get_epoch_from_checkpoint\nfrom predict_utils import pred_test, pred_train\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='PyTorch CIFAR Training')\n parser.add_argument('--batch_size', default=64, type=int, help='train batchsize')\n parser.add_argument('--lr', '--learning_rate', default=0.02, type=float, help='initial learning rate')\n parser.add_argument('--noise_mode', default='sym')\n parser.add_argument('--alpha', default=4., type=float, help='parameter for Beta')\n parser.add_argument('--alpha-loss', default=0.5, type=float, help='parameter for Beta in loss')\n parser.add_argument('--lambda_u', default=25, type=float, help='weight for unsupervised loss')\n parser.add_argument('--p_threshold', default=0.5, type=float, help='clean probability threshold')\n parser.add_argument('--T', default=0.5, type=float, help='sharpening temperature')\n parser.add_argument('--num_epochs', default=360, type=int)\n parser.add_argument('--r', default=0.5, type=float, help='noise ratio')\n parser.add_argument('--id', default='')\n parser.add_argument('--seed', default=123)\n parser.add_argument('--gpuid', default=0, type=int)\n parser.add_argument('--data_path', default='/home/acatalan/Private/datasets/cifar-10-batches-py', type=str, help='path to dataset')\n parser.add_argument('--net', default='resnet18', type=str, help='net')\n parser.add_argument('--method', default='reg', type=str, help='method')\n parser.add_argument('--dataset', default='cifar10', type=str)\n parser.add_argument('--experiment-name', required=True, type=str)\n parser.add_argument('--aug', dest='aug', action='store_true', help='use stronger aug')\n parser.add_argument('--use-std', dest='use_std', action='store_true', help='use stronger aug')\n parser.add_argument('--drop', dest='drop', action='store_true', help='use drop')\n parser.add_argument('--not-rampup', dest='not_rampup', action='store_true', help='not rumpup')\n parser.add_argument('--supcon', dest='supcon', action='store_true', help='use supcon')\n parser.add_argument('--use-aa', dest='use_aa', action='store_true', help='use supcon')\n parser.add_argument('--resume', default=None, type=str, help='None if fresh start, base checkpoint path of the NN otherwise, we will asume that NN1 path ends with _1 and NN2 path ends with _2.')\n parser.add_argument('--dropout', default=False, type=bool, help='To add dropout layer before classifier in the ResNet18.')\n parser.add_argument('--mcdo', default=False, type=bool, help='To do multiple forward passes with the dropout layer enabled at codivide time.')\n parser.add_argument('--mcbn', default=False, type=bool, help='To do multiple forward passes with the BatchNorm layer enabled at codivide time.')\n parser.add_argument('--division', default=GMM, type=str, help='gmm, ccgmm, or_ccgmm, and_ccgmm')\n parser.add_argument('--lambda_x', default=0, type=float, help='weight for class variance in Lx')\n parser.add_argument('--lambda_unlabeled', default=0, type=float, help='weight for class variance in Lu')\n parser.add_argument('--predict', default=False, type=bool, help='True if predict False if not.')\n\n args = parser.parse_args()\n\n if torch.cuda.is_available():\n torch.cuda.set_device(args.gpuid)\n torch.cuda.manual_seed_all(args.seed)\n args.device = 'cuda:0'\n else:\n args.device = 'cpu'\n\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n return args\n\ndef linear_rampup(current, warm_up, lambda_u, rampup_length=16):\n current = np.clip((current - warm_up) / rampup_length, 0.0, 1.0)\n return lambda_u * float(current)\n\nclass SemiLoss(object):\n def __call__(self, outputs_x, targets_x, outputs_u, targets_u, epoch, warm_up, lambda_u):\n probs_u = torch.softmax(outputs_u, dim=1)\n\n Lx = -torch.mean(torch.sum(F.log_softmax(outputs_x, dim=1) * targets_x, dim=1))\n Lu = torch.mean((probs_u - targets_u) ** 2)\n\n return Lx, Lu, linear_rampup(epoch, warm_up, lambda_u)\n\nclass SemiLoss_uncertainty(object):\n def __call__(self, outputs_x, targets_x, uncertainty_weights_x, outputs_u, targets_u, uncertainty_weights_u, epoch, warm_up, lambda_u):\n probs_u = torch.softmax(outputs_u, dim=1)\n\n Lx = -torch.mean(uncertainty_weights_x * torch.sum(F.log_softmax(outputs_x, dim=1) * targets_x, dim=1))\n Lu = torch.mean(uncertainty_weights_u * torch.mean((probs_u - targets_u) ** 2, dim=1))\n\n return Lx, Lu, linear_rampup(epoch, warm_up, lambda_u)\n\nclass NegEntropy(object):\n def __call__(self, outputs):\n probs = torch.softmax(outputs, dim=1)\n return torch.mean(torch.sum(probs.log() * probs, dim=1))\n\n\ndef create_model_reg(net='resnet18', dataset='cifar100', num_classes=100, device='cuda:0', drop=0, usedropout=False):\n if net == 'resnet18':\n model = ResNet18(num_classes=num_classes, drop=drop, usedropout=usedropout)\n model = model.to(device)\n return model\n else:\n model = SupCEResNet(net, num_classes=num_classes)\n model = model.to(device)\n return model\n\n\ndef create_model_selfsup(net='resnet18', dataset='cifar100', num_classes=100, device='cuda:0', drop=0, usedropout=False):\n chekpoint = torch.load('pretrained/ckpt_{}_{}.pth'.format(dataset, net))\n sd = {}\n for ke in chekpoint['model']:\n nk = ke.replace('module.', '')\n sd[nk] = chekpoint['model'][ke]\n model = SupCEResNet(net, num_classes=num_classes)\n model.load_state_dict(sd, strict=False)\n model = model.to(device)\n return model\n\n\ndef create_model_bit(net='resnet18', dataset='cifar100', num_classes=100, device='cuda:0', drop=0, mcdo=False):\n if net == 'resnet50':\n model = bit_models.KNOWN_MODELS['BiT-S-R50x1'](head_size=num_classes, zero_head=True)\n model.load_from(np.load(\"pretrained/BiT-S-R50x1.npz\"))\n model = model.to(device)\n elif net == 'resnet18':\n model = models.resnet18(pretrained=True)\n model.fc = nn.Linear(512 * 1, num_classes)\n model = model.to(device)\n else:\n raise ValueError()\n return model\n\n\ndef main():\n args = parse_args()\n log_dir = f'./checkpoint/{args.experiment_name}'\n os.makedirs(f'{log_dir}/models', exist_ok=True)\n log_name = f'{log_dir}/{args.dataset}_{args.r}_{args.lambda_u}_{args.noise_mode}'\n stats_log = open(log_name + '_stats.txt', 'a')\n test_log = open(log_name + '_acc.txt', 'a')\n gmm_log = open(log_name + '_gmm_acc.txt', 'a')\n cv_log = open(log_name + '_class_variance.txt', 'a')\n loss_log2 = open(log_name + '_loss2.txt', 'a')\n\n # assert division type\n assert args.division in DIVISION_OPTIONS, f'{args.division} division method not implemented. Choose from {DIVISION_OPTIONS}'\n\n\n # define warmup\n if args.dataset == 'cifar10':\n if args.method == 'reg':\n warm_up = 20 if args.aug else 10\n else:\n warm_up = 5\n num_classes = 10\n elif args.dataset == 'cifar100':\n if args.method == 'reg':\n warm_up = 60 if args.aug else 30\n else:\n warm_up = 5\n num_classes = 100\n else:\n raise ValueError('Wrong dataset')\n\n loader = dataloader.cifar_dataloader(args.dataset, r=args.r, noise_mode=args.noise_mode, batch_size=args.batch_size,\n num_workers=5, root_dir=args.data_path, log=stats_log,\n noise_file='%s/%.2f_%s.json' % (args.data_path, args.r, args.noise_mode),\n stronger_aug=args.aug)\n\n print('| Building net')\n if args.method == 'bit':\n create_model = create_model_bit\n elif args.method == 'reg':\n create_model = create_model_reg\n elif args.method == 'selfsup':\n create_model = create_model_selfsup\n else:\n raise ValueError()\n\n net1 = create_model(net=args.net, dataset=args.dataset, num_classes=num_classes, device=args.device, drop=args.drop)\n net2 = create_model(net=args.net, dataset=args.dataset, num_classes=num_classes, device=args.device, drop=args.drop)\n cudnn.benchmark = False # True\n\n uncertainty_criterion = SemiLoss_uncertainty()\n criterion = SemiLoss()\n\n if args.resume is None:\n optimizer1 = optim.SGD(net1.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)\n optimizer2 = optim.SGD(net2.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)\n resume_epoch = 0\n else:\n net1, optimizer1 = load_net_optimizer_from_ckpt_to_device(net1, args, f'{args.resume}_1.pt', args.device)\n net2, optimizer2 = load_net_optimizer_from_ckpt_to_device(net2, args, f'{args.resume}_2.pt', args.device)\n resume_epoch = get_epoch_from_checkpoint(args.resume)\n\n sched1 = torch.optim.lr_scheduler.StepLR(optimizer1, 150, gamma=0.1)\n sched2 = torch.optim.lr_scheduler.StepLR(optimizer2, 150, gamma=0.1)\n\n CE = nn.CrossEntropyLoss(reduction='none')\n CEloss = nn.CrossEntropyLoss()\n if args.noise_mode == 'asym':\n conf_penalty = NegEntropy()\n else:\n conf_penalty = None\n all_loss = [[], []] # save the history of losses from two networks\n\n\n if args.predict:\n pred_trainloader = loader.run('eval_train')\n pred_testloader = loader.run('test')\n\n pred_test(pred_testloader, net1, net2, '{log_dir}/predicted_test.json')\n pred_train(pred_trainloader, net1, net2, '{log_dir}/predicted_train')\n print('Labels predicted!')\n sys.exit()\n\n\n print(f'MCDO? : {args.mcdo}\\tMCBN? : {args.mcbn}')\n if args.mcdo:\n run_train_loop_mcdo(net1, optimizer1, sched1, net2, optimizer2, sched2, criterion, CEloss, CE, loader, args.p_threshold,\n warm_up, args.num_epochs, all_loss, args.batch_size, num_classes, args.device, args.lambda_u, args.lambda_x, args.T,\n args.alpha, args.noise_mode, args.dataset, args.r, conf_penalty, stats_log, cv_log, loss_log2, test_log, gmm_log, f'{log_dir}/models', resume_epoch, args.division)\n elif args.mcbn:\n run_train_loop_mcbn(net1, optimizer1, sched1, net2, optimizer2, sched2, criterion, uncertainty_criterion, CEloss, CE, loader, args.p_threshold,\n warm_up, args.num_epochs, all_loss, args.batch_size, num_classes, args.device, args.lambda_u, args.lambda_x, args.lambda_unlabeled, args.T,\n args.alpha, args.noise_mode, args.dataset, args.r, conf_penalty, stats_log, cv_log, loss_log2, test_log, gmm_log, f'{log_dir}/models', resume_epoch, args.division)\n else:\n print('Vanilla')\n run_train_loop(net1, optimizer1, sched1, net2, optimizer2, sched2, criterion, CEloss, CE, loader, args.p_threshold,\n warm_up, args.num_epochs, all_loss, args.batch_size, num_classes, args.device, args.lambda_u, args.T,\n args.alpha, args.noise_mode, args.dataset, args.r, conf_penalty, stats_log, loss_log2, test_log, f'{log_dir}/models', resume_epoch, args.division)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.load", "numpy.clip" ] ]
cbc-group/stitching
[ "f1baca2f34394e072c2f5c5787882b108d1b7c27" ]
[ "stitching/reader.py" ]
[ "import glob\r\nimport logging\r\nimport os\r\nimport re\r\n\r\nimport pandas as pd\r\nfrom re import X\r\n\r\n__all__ = [\"filename_to_tile\", \"read_script\", \"read_settings\"]\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef read_script(script_path):\r\n # summary section\r\n df = pd.read_csv(script_path, nrows=1)\r\n tile_shape = tuple(df[f\"# of Subvolume Z Stacks {ax}\"][0] for ax in (\"Z\", \"Y\", \"X\"))\r\n logger.info(f\"tile shape: {tile_shape}\")\r\n\r\n # actual position\r\n df = pd.read_csv(script_path, skiprows=2)\r\n # sort by tile spatial index, fastest dimension first\r\n df = df.sort_values(by=[f\"Stack {ax}\" for ax in (\"X\", \"Y\", \"Z\")])\r\n\r\n return (\r\n tile_shape,\r\n {\r\n tuple(index): tuple(pos)\r\n for index, pos in zip(\r\n df[[f\"Stack {ax}\" for ax in (\"Z\", \"Y\", \"X\")]].values,\r\n df[[f\"Absolute {ax} (um)\" for ax in (\"Z\", \"Y\", \"X\")]].values,\r\n )\r\n },\r\n )\r\n\r\n\r\ndef filename_to_tile(data_dir, script_path):\r\n \"\"\"\r\n Map filename to tile index.\r\n\r\n Args:\r\n data_dir (str): path to the raw data folder\r\n script_path (str): path to the script that generated scanning steps\r\n\r\n Returns:\r\n (tuple of int): (Z, Y, X)\r\n \"\"\"\r\n file_list = glob.glob(os.path.join(data_dir, \"*.tif\"))\r\n file_list.sort()\r\n \r\n # determine if we need to skip multiple rows\r\n with open(script_path, 'r') as fd:\r\n offset = -1\r\n for lineno, line in enumerate(fd):\r\n if line.startswith('----Stack scan order----'):\r\n offset = lineno\r\n break\r\n else:\r\n # summary section contains 2 lines\r\n offset = 2\r\n logger.info(f'offset lines: {offset}')\r\n\r\n # actual position\r\n df = pd.read_csv(script_path, skiprows=offset)\r\n # ... only keep index\r\n df = df[[f\"Stack {ax}\" for ax in (\"Z\", \"Y\", \"X\")]]\r\n \r\n # NOTE compensate\r\n nx = df['Stack X'].max()\r\n df['Stack X'] = nx - df['Stack X']\r\n\r\n return {fname: tuple(row) for fname, row in zip(file_list, df.values)}\r\n\r\n\r\ndef read_settings(data_dir):\r\n \"\"\"\r\n Read SPIM generated setting file.\r\n\r\n Returns:\r\n (tuple of int): (Z, Y, X)\r\n \"\"\"\r\n file_list = glob.glob(os.path.join(data_dir, \"*_Settings.txt\"))\r\n if len(file_list) > 1:\r\n logger.warning(\"found multiple setting file, ignored\")\r\n setting_path = file_list[0]\r\n\r\n # parse image size\r\n image_shape, binning = None, None\r\n with open(setting_path, \"r\") as fd:\r\n for line in fd:\r\n matches = re.match(r\"# of Pixels :\\s+X=(\\d+) Y=(\\d+)\", line)\r\n if matches is not None:\r\n # NOTE we know z will only have 1 layer\r\n image_shape = int(matches.group(2)), int(matches.group(1))\r\n\r\n matches = re.match(r\"Binning :\\s+X=(\\d+) Y=(\\d+)\", line)\r\n if matches is not None:\r\n # NOTE we know z will only have 1 layer\r\n binning = int(matches.group(2)), int(matches.group(1))\r\n\r\n # recalibrate to actual size\r\n image_shape = tuple(s // b for s, b in zip(image_shape, binning))\r\n\r\n # force to 3D\r\n # NOTE parse upper section for z info\r\n return (1,) + image_shape\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import coloredlogs\r\n\r\n from utils import find_dataset_dir\r\n\r\n coloredlogs.install(\r\n level=\"DEBUG\", fmt=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%H:%M:%S\"\r\n )\r\n\r\n path = find_dataset_dir(\"trial_7\")\r\n path = os.path.join(path, \"volume.csv\")\r\n print(path)\r\n print(read_script(path))\r\n" ]
[ [ "pandas.read_csv" ] ]
bgyori/pyobo
[ "f199f62f65fc7faff307b56f979a369202c8ad33" ]
[ "src/pyobo/sources/hgncgenefamily.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"Converter for HGNC Gene Families.\"\"\"\n\nfrom collections import defaultdict\nfrom typing import Iterable, List, Mapping\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom ..path_utils import ensure_path\nfrom ..struct import Obo, Reference, Synonym, SynonymTypeDef, Term, from_species\n\nPREFIX = 'hgnc.genefamily'\nFAMILIES_URL = 'ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/csv/genefamily_db_tables/family.csv'\nHIERARCHY_URL = 'ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/csv/genefamily_db_tables/hierarchy.csv'\n\nsymbol_type = SynonymTypeDef(id='symbol', name='symbol')\n\n\ndef get_obo() -> Obo:\n \"\"\"Get HGNC Gene Families as OBO.\"\"\"\n return Obo(\n ontology=PREFIX,\n name='HGNC Gene Families',\n iter_terms=get_terms,\n synonym_typedefs=[symbol_type],\n typedefs=[from_species],\n auto_generated_by=f'bio2obo:{PREFIX}',\n )\n\n\ndef get_hierarchy() -> Mapping[str, List[str]]:\n \"\"\"Get the HGNC Gene Families hierarchy as a dictionary.\"\"\"\n path = ensure_path(PREFIX, HIERARCHY_URL)\n df = pd.read_csv(path, dtype={'parent_fam_id': str, 'child_fam_id': str})\n d = defaultdict(list)\n for parent_id, child_id in df.values:\n d[child_id].append(parent_id)\n return dict(d)\n\n\nCOLUMNS = ['id', 'abbreviation', 'name', 'pubmed_ids', 'desc_comment', 'desc_go']\n\n\ndef get_terms() -> Iterable[Term]:\n \"\"\"Get the HGNC Gene Family terms.\"\"\"\n terms = list(_get_terms_helper())\n hierarchy = get_hierarchy()\n\n id_to_term = {term.reference.identifier: term for term in terms}\n for child_id, parent_ids in hierarchy.items():\n child = id_to_term[child_id]\n for parent_id in parent_ids:\n parent: Term = id_to_term[parent_id]\n child.parents.append(Reference(\n prefix=PREFIX,\n identifier=parent_id,\n name=parent.name,\n ))\n return terms\n\n\ndef _get_terms_helper() -> Iterable[Term]:\n path = ensure_path(PREFIX, FAMILIES_URL)\n df = pd.read_csv(path, dtype={'id': str})\n\n it = tqdm(df[COLUMNS].values, desc=f'Mapping {PREFIX}')\n for hgncgenefamily_id, symbol, name, pubmed_ids, definition, desc_go in it:\n if pubmed_ids and pd.notna(pubmed_ids):\n provenance = [Reference(prefix='pubmed', identifier=s.strip()) for s in pubmed_ids.split(',')]\n else:\n provenance = []\n\n if not definition or pd.isna(definition):\n definition = ''\n\n xrefs = []\n if desc_go and pd.notna(desc_go):\n go_id = desc_go[len('http://purl.uniprot.org/go/'):]\n xrefs.append(Reference(prefix='go', identifier=go_id))\n\n synonyms = []\n if symbol and pd.notna(symbol):\n synonyms.append(Synonym(name=symbol, type=symbol_type))\n\n term = Term(\n reference=Reference(prefix=PREFIX, identifier=hgncgenefamily_id, name=name),\n definition=definition,\n provenance=provenance,\n xrefs=xrefs,\n synonyms=synonyms,\n )\n term.set_species(identifier='9606', name='Homo sapiens')\n yield term\n\n\nif __name__ == '__main__':\n get_obo().write_default()\n" ]
[ [ "pandas.notna", "pandas.isna", "pandas.read_csv" ] ]
sanketvmehta/continual-learning
[ "9945f84c13a9f63831a11568f5b4a2e1cd6cc96b" ]
[ "_compare_taskID.py" ]
[ "#!/usr/bin/env python3\nimport argparse\nimport os\nimport numpy as np\nfrom param_stamp import get_param_stamp_from_args\nimport visual_plt\nimport main\nfrom param_values import set_default_values\n\n\ndescription = 'Compare two ways of using task-ID info (with different CL strategies) on permuted / split MNIST.'\nparser = argparse.ArgumentParser('./_compare_taskID.py', description=description)\nparser.add_argument('--seed', type=int, default=1, help='[first] random seed (for each random-module used)')\nparser.add_argument('--n-seeds', type=int, default=1, help='how often to repeat?')\nparser.add_argument('--no-gpus', action='store_false', dest='cuda', help=\"don't use GPUs\")\nparser.add_argument('--data-dir', type=str, default='./datasets', dest='d_dir', help=\"default: %(default)s\")\nparser.add_argument('--plot-dir', type=str, default='./plots', dest='p_dir', help=\"default: %(default)s\")\nparser.add_argument('--results-dir', type=str, default='./results', dest='r_dir', help=\"default: %(default)s\")\n\n# expirimental task parameters.\ntask_params = parser.add_argument_group('Task Parameters')\ntask_params.add_argument('--experiment', type=str, default='permMNIST', choices=['permMNIST', 'splitMNIST'])\ntask_params.add_argument('--tasks', type=int, help='number of tasks')\n\n# specify loss functions to be used\nloss_params = parser.add_argument_group('Loss Parameters')\nloss_params.add_argument('--bce', action='store_true', help=\"use binary (instead of multi-class) classication loss\")\n\n# model architecture parameters\nmodel_params = parser.add_argument_group('Parameters Main Model')\nmodel_params.add_argument('--fc-layers', type=int, default=3, dest='fc_lay', help=\"# of fully-connected layers\")\nmodel_params.add_argument('--fc-units', type=int, metavar=\"N\", help=\"# of units in first fc-layers\")\nmodel_params.add_argument('--fc-drop', type=float, default=0., help=\"dropout probability for fc-units\")\nmodel_params.add_argument('--fc-bn', type=str, default=\"no\", help=\"use batch-norm in the fc-layers (no|yes)\")\nmodel_params.add_argument('--fc-nl', type=str, default=\"relu\", choices=[\"relu\", \"leakyrelu\"])\n\n# training hyperparameters / initialization\ntrain_params = parser.add_argument_group('Training Parameters')\ntrain_params.add_argument('--iters', type=int, help=\"# batches to optimize solver\")\ntrain_params.add_argument('--lr', type=float, help=\"learning rate\")\ntrain_params.add_argument('--batch', type=int, default=128, help=\"batch-size\")\ntrain_params.add_argument('--optimizer', type=str, choices=['adam', 'adam_reset', 'sgd'], default='adam')\n\n# \"memory replay\" parameters\nreplay_params = parser.add_argument_group('Replay Parameters')\nreplay_params.add_argument('--temp', type=float, default=2., dest='temp', help=\"temperature for distillation\")\n# -generative model parameters (if separate model)\ngenmodel_params = parser.add_argument_group('Generative Model Parameters')\ngenmodel_params.add_argument('--g-z-dim', type=int, default=100, help='size of latent representation (default: 100)')\ngenmodel_params.add_argument('--g-fc-lay', type=int, help='[fc_layers] in generator (default: same as classifier)')\ngenmodel_params.add_argument('--g-fc-uni', type=int, help='[fc_units] in generator (default: same as classifier)')\n# - hyper-parameters for generative model (if separate model)\ngen_params = parser.add_argument_group('Generator Hyper Parameters')\ngen_params.add_argument('--g-iters', type=int, help=\"# batches to train generator (default: as classifier)\")\ngen_params.add_argument('--lr-gen', type=float, help=\"learning rate generator (default: lr)\")\n\n# \"memory allocation\" parameters\ncl_params = parser.add_argument_group('Memory Allocation Parameters')\ncl_params.add_argument('--lambda', type=float, dest=\"ewc_lambda\", help=\"--> EWC: regularisation strength\")\ncl_params.add_argument('--o-lambda', type=float, help=\"--> online EWC: regularisation strength\")\ncl_params.add_argument('--fisher-n', type=int, help=\"--> EWC: sample size estimating Fisher Information\")\ncl_params.add_argument('--gamma', type=float, help=\"--> EWC: forgetting coefficient (for 'online EWC')\")\ncl_params.add_argument('--emp-fi', action='store_true', help=\"--> EWC: estimate FI with provided labels\")\ncl_params.add_argument('--c', type=float, dest=\"si_c\", help=\"--> SI: regularisation strength\")\ncl_params.add_argument('--epsilon', type=float, default=0.1, dest=\"epsilon\", help=\"--> SI: dampening parameter\")\ncl_params.add_argument('--gating-prop', type=float, metavar=\"PROP\", help=\"--> XdG: prop neurons per layer to gate\")\n\n# exemplar parameters\nexemplar_params = parser.add_argument_group('Exemplar Parameters')\nexemplar_params.add_argument('--use-exemplars', action='store_true', help=\"use stored exemplars for classification?\")\nexemplar_params.add_argument('--budget', type=int, default=2000, dest=\"budget\",help=\"how many exemplars can be stored?\")\nexemplar_params.add_argument('--herding',action='store_true',help=\"use herding to select exemplars (instead of random)\")\nexemplar_params.add_argument('--norm-exemplars', action='store_true', help=\"normalize features/averages of exemplars\")\n\n# evaluation parameters\neval_params = parser.add_argument_group('Evaluation Parameters')\neval_params.add_argument('--pdf', action='store_true', help=\"generate pdfs for individual experiments\")\neval_params.add_argument('--visdom', action='store_true', help=\"use visdom for on-the-fly plots\")\neval_params.add_argument('--prec-n', type=int, default=1024, help=\"# samples for evaluating solver's precision\")\neval_params.add_argument('--sample-n', type=int, default=64, help=\"# images to show\")\n\n\n\ndef get_prec(args, ext=\"\"):\n # -get param-stamp\n param_stamp = get_param_stamp_from_args(args)\n # -check whether already run; if not do so\n if not os.path.isfile('{}/prec{}-{}.txt'.format(args.r_dir, ext, param_stamp)):\n print(\" ...running: ... \")\n main.run(args)\n # -get average precision\n fileName = '{}/prec{}-{}.txt'.format(args.r_dir, ext, param_stamp)\n file = open(fileName)\n ave = float(file.readline())\n file.close()\n # -print average precision on screen\n print(\"--> average precision: {}\".format(ave))\n # -return average precision\n return ave\n\n\ndef collect_all(method_dict, seed_list, args, ext=\"\", name=None):\n # -print name of method on screen\n if name is not None:\n print(\"\\n------{}------\".format(name))\n # -run method for all random seeds\n for seed in seed_list:\n args.seed = seed\n method_dict[seed] = get_prec(args, ext=ext)\n # -return updated dictionary with results\n return method_dict\n\n\n\nif __name__ == '__main__':\n\n ## Load input-arguments\n args = parser.parse_args()\n # -set the scenario (as this script is about comparing different ways of using the task-ID label)\n args.scenario = \"task\"\n # -set default-values for certain arguments based on chosen scenario & experiment\n args = set_default_values(args)\n # -set other default arguments\n args.lr_gen = args.lr if args.lr_gen is None else args.lr_gen\n args.g_iters = args.iters if args.g_iters is None else args.g_iters\n args.g_fc_lay = args.fc_lay if args.g_fc_lay is None else args.g_fc_lay\n args.g_fc_uni = args.fc_units if args.g_fc_uni is None else args.g_fc_uni\n # -create results-directory if needed\n if not os.path.isdir(args.r_dir):\n os.mkdir(args.r_dir)\n # -create plots-directory if needed\n if not os.path.isdir(args.p_dir):\n os.mkdir(args.p_dir)\n\n ## Add non-optional input argument that will be the same for all runs\n args.feedback = False\n args.add_exemplars = False\n args.bce_distill= False\n args.icarl = False\n args.log_per_task = True\n\n ## Add input arguments that will be different for different runs\n args.distill = False\n args.ewc = False\n args.online = False\n args.si = False\n args.xdg = False\n args.singlehead = False\n # args.seed could of course also vary!\n\n #-------------------------------------------------------------------------------------------------#\n\n #--------------------------#\n #----- RUN ALL MODELS -----#\n #--------------------------#\n\n seed_list = list(range(args.seed, args.seed+args.n_seeds))\n\n\n #########---> Task-ID only in output layer (i.e., multi-headed softmax layer)\n\n ## None\n args.replay = \"none\"\n NONE = {}\n NONE = collect_all(NONE, seed_list, args, name=\"None\")\n\n ## EWC\n args.ewc = True\n EWC = {}\n EWC = collect_all(EWC, seed_list, args, name=\"EWC\")\n\n ## online EWC\n args.online = True\n args.ewc_lambda = args.o_lambda\n OEWC = {}\n OEWC = collect_all(OEWC, seed_list, args, name=\"Online EWC\")\n args.ewc = False\n args.online = False\n\n ## SI\n args.si = True\n SI = {}\n SI = collect_all(SI, seed_list, args, name=\"SI\")\n args.si = False\n\n ## LwF\n args.replay = \"current\"\n args.distill = True\n LWF = {}\n LWF = collect_all(LWF, seed_list, args, name=\"LwF\")\n\n ## DGR\n args.replay = \"generative\"\n args.distill = False\n RP = {}\n RP = collect_all(RP, seed_list, args, name=\"DGR\")\n\n ## DGR+distill\n args.replay = \"generative\"\n args.distill = True\n RKD = {}\n RKD = collect_all(RKD, seed_list, args, name=\"DGR+distill\")\n args.replay = \"none\"\n args.distill = False\n\n\n\n #########---> Task-ID only in hidden layers (i.e., XdG with single-headed softmax layer)\n args.singlehead = True\n args.xdg = True\n\n ## None\n args.replay = \"none\"\n SNONE = {}\n SNONE = collect_all(SNONE, seed_list, args, name=\"None\")\n\n ## EWC\n args.ewc = True\n SEWC = {}\n SEWC = collect_all(SEWC, seed_list, args, name=\"EWC\")\n\n ## online EWC\n args.online = True\n args.ewc_lambda = args.o_lambda\n SOEWC = {}\n SOEWC = collect_all(SOEWC, seed_list, args, name=\"Online EWC\")\n args.ewc = False\n args.online = False\n\n ## SI\n args.si = True\n SSI = {}\n SSI = collect_all(SSI, seed_list, args, name=\"SI\")\n args.si = False\n\n ## LwF\n args.replay = \"current\"\n args.distill = True\n SLWF = {}\n SLWF = collect_all(SLWF, seed_list, args, name=\"LwF\")\n\n ## DGR\n args.replay = \"generative\"\n args.distill = False\n SRP = {}\n SRP = collect_all(SRP, seed_list, args, name=\"DGR\")\n\n ## DGR+distill\n args.replay = \"generative\"\n args.distill = True\n SRKD = {}\n SRKD = collect_all(SRKD, seed_list, args, name=\"DGR+distill\")\n args.replay = \"none\"\n args.distill = False\n\n\n #-------------------------------------------------------------------------------------------------#\n\n #---------------------------#\n #----- COLLECT RESULTS -----#\n #---------------------------#\n\n ave_prec = {}\n\n ## For each seed, create list with average precisions\n for seed in seed_list:\n ave_prec[seed] = [NONE[seed], EWC[seed], OEWC[seed], SI[seed], LWF[seed], RP[seed], RKD[seed],\n SNONE[seed], SEWC[seed], SOEWC[seed], SSI[seed], SLWF[seed], SRP[seed], SRKD[seed]]\n\n\n\n #-------------------------------------------------------------------------------------------------#\n\n #--------------------#\n #----- PLOTTING -----#\n #--------------------#\n\n # name for plot\n plot_name = \"summaryTaskID-{}{}-{}\".format(args.experiment, args.tasks, args.scenario)\n scheme = \"{}-incremental learning\".format(args.scenario)\n title = \"{} - {}\".format(args.experiment, scheme)\n title_list = [\"Task-ID in output-layer\", \"Task-ID in hidden-layers\"]\n\n # select names / colors / ids\n names = [\"None\", \"EWC\", \"o-EWC\", \"SI\", \"LwF\", \"DGR\", \"DGR+distil\"]\n colors = [\"grey\", \"deepskyblue\", \"blue\", \"yellowgreen\", \"goldenrod\", \"indianred\", \"red\"]\n base_ids = [0,1,2,3,4,5,6]\n ids = [\n base_ids,\n [i+len(base_ids) for i in base_ids]\n ]\n\n # open pdf\n pp = visual_plt.open_pdf(\"{}/{}.pdf\".format(args.p_dir, plot_name))\n figure_list = []\n\n # multiple bar-plot\n mean_list = []\n sems_list = []\n cis_list = []\n for id_list in ids:\n mean_list.append([np.mean([ave_prec[seed][id] for seed in seed_list]) for id in id_list])\n if args.n_seeds>1:\n sems_list.append([np.sqrt(np.var([ave_prec[seed][id] for seed in seed_list])/(len(seed_list)-1)) for id in id_list])\n cis_list.append([1.96*np.sqrt(np.var([ave_prec[seed][id] for seed in seed_list])/(len(seed_list)-1)) for id in id_list])\n figure = visual_plt.plot_bars(mean_list, names=names, colors=colors, ylabel=\"average precision (after all tasks)\",\n title_list=title_list, top_title=title, yerr=cis_list if args.n_seeds>1 else None,\n ylim=(0,1))\n figure_list.append(figure)\n\n # print results to screen\n print(\"\\n\\n\"+\"#\"*70+\"\\n SUMMARY RESULTS: {}\\n\".format(title)+\"-\"*70)\n print(\" \"*13+\"Task-ID in output layer Task-ID in hidden layers\\n\"+\"-\"*70)\n for i,name in enumerate(names):\n if len(seed_list) > 1:\n print(\"{:15s} {:.2f} ({:.2f}) {:.2f} ({:.2f})\".format(\n name, 100*mean_list[0][i], 100*sems_list[0][i], 100*mean_list[1][i], 100*sems_list[1][i],\n ))\n else:\n print(\"{:16s} {:.2f} {:.2f}\".format(\n name, 100*mean_list[0][i], 100*mean_list[1][i]\n ))\n print(\"#\"*70)\n\n # add all figures to pdf\n for figure in figure_list:\n pp.savefig(figure)\n\n # close the pdf\n pp.close()\n\n # Print name of generated plot on screen\n print(\"\\nGenerated plot: {}/{}.pdf\\n\".format(args.p_dir, plot_name))" ]
[ [ "numpy.var", "numpy.mean" ] ]
ivanliu1989/pandas
[ "5bbe99e7cd26651e9ecb4ab59a4cdcd335535874" ]
[ "pandas/tools/plotting.py" ]
[ "# being a bit too dynamic\n# pylint: disable=E1101\nimport datetime\nimport warnings\nimport re\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nfrom distutils.version import LooseVersion\n\nimport numpy as np\n\nfrom pandas.util.decorators import cache_readonly, deprecate_kwarg\nimport pandas.core.common as com\nfrom pandas.core.generic import _shared_docs, _shared_doc_kwargs\nfrom pandas.core.index import Index, MultiIndex\nfrom pandas.core.series import Series, remove_na\nfrom pandas.tseries.index import DatetimeIndex\nfrom pandas.tseries.period import PeriodIndex, Period\nimport pandas.tseries.frequencies as frequencies\nfrom pandas.tseries.offsets import DateOffset\nfrom pandas.compat import range, lrange, lmap, map, zip, string_types\nimport pandas.compat as compat\nfrom pandas.util.decorators import Appender\n\ntry: # mpl optional\n import pandas.tseries.converter as conv\n conv.register() # needs to override so set_xlim works with str/number\nexcept ImportError:\n pass\n\n# Extracted from https://gist.github.com/huyng/816622\n# this is the rcParams set when setting display.with_mpl_style\n# to True.\nmpl_stylesheet = {\n 'axes.axisbelow': True,\n 'axes.color_cycle': ['#348ABD',\n '#7A68A6',\n '#A60628',\n '#467821',\n '#CF4457',\n '#188487',\n '#E24A33'],\n 'axes.edgecolor': '#bcbcbc',\n 'axes.facecolor': '#eeeeee',\n 'axes.grid': True,\n 'axes.labelcolor': '#555555',\n 'axes.labelsize': 'large',\n 'axes.linewidth': 1.0,\n 'axes.titlesize': 'x-large',\n 'figure.edgecolor': 'white',\n 'figure.facecolor': 'white',\n 'figure.figsize': (6.0, 4.0),\n 'figure.subplot.hspace': 0.5,\n 'font.family': 'monospace',\n 'font.monospace': ['Andale Mono',\n 'Nimbus Mono L',\n 'Courier New',\n 'Courier',\n 'Fixed',\n 'Terminal',\n 'monospace'],\n 'font.size': 10,\n 'interactive': True,\n 'keymap.all_axes': ['a'],\n 'keymap.back': ['left', 'c', 'backspace'],\n 'keymap.forward': ['right', 'v'],\n 'keymap.fullscreen': ['f'],\n 'keymap.grid': ['g'],\n 'keymap.home': ['h', 'r', 'home'],\n 'keymap.pan': ['p'],\n 'keymap.save': ['s'],\n 'keymap.xscale': ['L', 'k'],\n 'keymap.yscale': ['l'],\n 'keymap.zoom': ['o'],\n 'legend.fancybox': True,\n 'lines.antialiased': True,\n 'lines.linewidth': 1.0,\n 'patch.antialiased': True,\n 'patch.edgecolor': '#EEEEEE',\n 'patch.facecolor': '#348ABD',\n 'patch.linewidth': 0.5,\n 'toolbar': 'toolbar2',\n 'xtick.color': '#555555',\n 'xtick.direction': 'in',\n 'xtick.major.pad': 6.0,\n 'xtick.major.size': 0.0,\n 'xtick.minor.pad': 6.0,\n 'xtick.minor.size': 0.0,\n 'ytick.color': '#555555',\n 'ytick.direction': 'in',\n 'ytick.major.pad': 6.0,\n 'ytick.major.size': 0.0,\n 'ytick.minor.pad': 6.0,\n 'ytick.minor.size': 0.0\n}\n\ndef _get_standard_kind(kind):\n return {'density': 'kde'}.get(kind, kind)\n\ndef _get_standard_colors(num_colors=None, colormap=None, color_type='default',\n color=None):\n import matplotlib.pyplot as plt\n\n if color is None and colormap is not None:\n if isinstance(colormap, compat.string_types):\n import matplotlib.cm as cm\n cmap = colormap\n colormap = cm.get_cmap(colormap)\n if colormap is None:\n raise ValueError(\"Colormap {0} is not recognized\".format(cmap))\n colors = lmap(colormap, np.linspace(0, 1, num=num_colors))\n elif color is not None:\n if colormap is not None:\n warnings.warn(\"'color' and 'colormap' cannot be used \"\n \"simultaneously. Using 'color'\")\n colors = color\n else:\n if color_type == 'default':\n colors = plt.rcParams.get('axes.color_cycle', list('bgrcmyk'))\n if isinstance(colors, compat.string_types):\n colors = list(colors)\n elif color_type == 'random':\n import random\n def random_color(column):\n random.seed(column)\n return [random.random() for _ in range(3)]\n\n colors = lmap(random_color, lrange(num_colors))\n else:\n raise NotImplementedError\n\n if len(colors) != num_colors:\n multiple = num_colors//len(colors) - 1\n mod = num_colors % len(colors)\n\n colors += multiple * colors\n colors += colors[:mod]\n\n return colors\n\nclass _Options(dict):\n \"\"\"\n Stores pandas plotting options.\n Allows for parameter aliasing so you can just use parameter names that are\n the same as the plot function parameters, but is stored in a canonical\n format that makes it easy to breakdown into groups later\n \"\"\"\n\n # alias so the names are same as plotting method parameter names\n _ALIASES = {'x_compat': 'xaxis.compat'}\n _DEFAULT_KEYS = ['xaxis.compat']\n\n def __init__(self):\n self['xaxis.compat'] = False\n\n def __getitem__(self, key):\n key = self._get_canonical_key(key)\n if key not in self:\n raise ValueError('%s is not a valid pandas plotting option' % key)\n return super(_Options, self).__getitem__(key)\n\n def __setitem__(self, key, value):\n key = self._get_canonical_key(key)\n return super(_Options, self).__setitem__(key, value)\n\n def __delitem__(self, key):\n key = self._get_canonical_key(key)\n if key in self._DEFAULT_KEYS:\n raise ValueError('Cannot remove default parameter %s' % key)\n return super(_Options, self).__delitem__(key)\n\n def __contains__(self, key):\n key = self._get_canonical_key(key)\n return super(_Options, self).__contains__(key)\n\n def reset(self):\n \"\"\"\n Reset the option store to its initial state\n\n Returns\n -------\n None\n \"\"\"\n self.__init__()\n\n def _get_canonical_key(self, key):\n return self._ALIASES.get(key, key)\n\n @contextmanager\n def use(self, key, value):\n \"\"\"\n Temporarily set a parameter value using the with statement.\n Aliasing allowed.\n \"\"\"\n old_value = self[key]\n try:\n self[key] = value\n yield self\n finally:\n self[key] = old_value\n\n\nplot_params = _Options()\n\n\ndef scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,\n diagonal='hist', marker='.', density_kwds=None,\n hist_kwds=None, range_padding=0.05, **kwds):\n \"\"\"\n Draw a matrix of scatter plots.\n\n Parameters\n ----------\n frame : DataFrame\n alpha : float, optional\n amount of transparency applied\n figsize : (float,float), optional\n a tuple (width, height) in inches\n ax : Matplotlib axis object, optional\n grid : bool, optional\n setting this to True will show the grid\n diagonal : {'hist', 'kde'}\n pick between 'kde' and 'hist' for\n either Kernel Density Estimation or Histogram\n plot in the diagonal\n marker : str, optional\n Matplotlib marker type, default '.'\n hist_kwds : other plotting keyword arguments\n To be passed to hist function\n density_kwds : other plotting keyword arguments\n To be passed to kernel density estimate plot\n range_padding : float, optional\n relative extension of axis range in x and y\n with respect to (x_max - x_min) or (y_max - y_min),\n default 0.05\n kwds : other plotting keyword arguments\n To be passed to scatter function\n\n Examples\n --------\n >>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])\n >>> scatter_matrix(df, alpha=0.2)\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.artist import setp\n\n df = frame._get_numeric_data()\n n = df.columns.size\n naxes = n * n\n fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,\n squeeze=False)\n\n # no gaps between subplots\n fig.subplots_adjust(wspace=0, hspace=0)\n\n mask = com.notnull(df)\n\n marker = _get_marker_compat(marker)\n\n hist_kwds = hist_kwds or {}\n density_kwds = density_kwds or {}\n\n # workaround because `c='b'` is hardcoded in matplotlibs scatter method\n kwds.setdefault('c', plt.rcParams['patch.facecolor'])\n\n boundaries_list = []\n for a in df.columns:\n values = df[a].values[mask[a].values]\n rmin_, rmax_ = np.min(values), np.max(values)\n rdelta_ext = (rmax_ - rmin_) * range_padding / 2.\n boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))\n\n for i, a in zip(lrange(n), df.columns):\n for j, b in zip(lrange(n), df.columns):\n ax = axes[i, j]\n\n if i == j:\n values = df[a].values[mask[a].values]\n\n # Deal with the diagonal by drawing a histogram there.\n if diagonal == 'hist':\n ax.hist(values, **hist_kwds)\n\n elif diagonal in ('kde', 'density'):\n from scipy.stats import gaussian_kde\n y = values\n gkde = gaussian_kde(y)\n ind = np.linspace(y.min(), y.max(), 1000)\n ax.plot(ind, gkde.evaluate(ind), **density_kwds)\n\n ax.set_xlim(boundaries_list[i])\n\n else:\n common = (mask[a] & mask[b]).values\n\n ax.scatter(df[b][common], df[a][common],\n marker=marker, alpha=alpha, **kwds)\n\n ax.set_xlim(boundaries_list[j])\n ax.set_ylim(boundaries_list[i])\n\n ax.set_xlabel('')\n ax.set_ylabel('')\n\n _label_axis(ax, kind='x', label=b, position='bottom', rotate=True)\n\n _label_axis(ax, kind='y', label=a, position='left')\n\n if j!= 0:\n ax.yaxis.set_visible(False)\n if i != n-1:\n ax.xaxis.set_visible(False)\n\n for ax in axes.flat:\n setp(ax.get_xticklabels(), fontsize=8)\n setp(ax.get_yticklabels(), fontsize=8)\n\n return axes\n\ndef _label_axis(ax, kind='x', label='', position='top',\n ticks=True, rotate=False):\n\n from matplotlib.artist import setp\n if kind == 'x':\n ax.set_xlabel(label, visible=True)\n ax.xaxis.set_visible(True)\n ax.xaxis.set_ticks_position(position)\n ax.xaxis.set_label_position(position)\n if rotate:\n setp(ax.get_xticklabels(), rotation=90)\n elif kind == 'y':\n ax.yaxis.set_visible(True)\n ax.set_ylabel(label, visible=True)\n # ax.set_ylabel(a)\n ax.yaxis.set_ticks_position(position)\n ax.yaxis.set_label_position(position)\n return\n\n\n\n\n\ndef _gca():\n import matplotlib.pyplot as plt\n return plt.gca()\n\n\ndef _gcf():\n import matplotlib.pyplot as plt\n return plt.gcf()\n\ndef _get_marker_compat(marker):\n import matplotlib.lines as mlines\n import matplotlib as mpl\n if mpl.__version__ < '1.1.0' and marker == '.':\n return 'o'\n if marker not in mlines.lineMarkers:\n return 'o'\n return marker\n\ndef radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):\n \"\"\"RadViz - a multivariate data visualization algorithm\n\n Parameters:\n -----------\n frame: DataFrame\n class_column: str\n Column name containing class names\n ax: Matplotlib axis object, optional\n color: list or tuple, optional\n Colors to use for the different classes\n colormap : str or matplotlib colormap object, default None\n Colormap to select colors from. If string, load colormap with that name\n from matplotlib.\n kwds: keywords\n Options to pass to matplotlib scatter plotting method\n\n Returns:\n --------\n ax: Matplotlib axis object\n \"\"\"\n import matplotlib.pyplot as plt\n import matplotlib.patches as patches\n\n def normalize(series):\n a = min(series)\n b = max(series)\n return (series - a) / (b - a)\n\n n = len(frame)\n classes = frame[class_column].drop_duplicates()\n class_col = frame[class_column]\n df = frame.drop(class_column, axis=1).apply(normalize)\n\n if ax is None:\n ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])\n\n to_plot = {}\n colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,\n color_type='random', color=color)\n\n for kls in classes:\n to_plot[kls] = [[], []]\n\n n = len(frame.columns) - 1\n s = np.array([(np.cos(t), np.sin(t))\n for t in [2.0 * np.pi * (i / float(n))\n for i in range(n)]])\n\n for i in range(n):\n row = df.iloc[i].values\n row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)\n y = (s * row_).sum(axis=0) / row.sum()\n kls = class_col.iat[i]\n to_plot[kls][0].append(y[0])\n to_plot[kls][1].append(y[1])\n\n for i, kls in enumerate(classes):\n ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],\n label=com.pprint_thing(kls), **kwds)\n ax.legend()\n\n ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))\n\n for xy, name in zip(s, df.columns):\n\n ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))\n\n if xy[0] < 0.0 and xy[1] < 0.0:\n ax.text(xy[0] - 0.025, xy[1] - 0.025, name,\n ha='right', va='top', size='small')\n elif xy[0] < 0.0 and xy[1] >= 0.0:\n ax.text(xy[0] - 0.025, xy[1] + 0.025, name,\n ha='right', va='bottom', size='small')\n elif xy[0] >= 0.0 and xy[1] < 0.0:\n ax.text(xy[0] + 0.025, xy[1] - 0.025, name,\n ha='left', va='top', size='small')\n elif xy[0] >= 0.0 and xy[1] >= 0.0:\n ax.text(xy[0] + 0.025, xy[1] + 0.025, name,\n ha='left', va='bottom', size='small')\n\n ax.axis('equal')\n return ax\n\n@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')\ndef andrews_curves(frame, class_column, ax=None, samples=200, color=None,\n colormap=None, **kwds):\n \"\"\"\n Parameters:\n -----------\n frame : DataFrame\n Data to be plotted, preferably normalized to (0.0, 1.0)\n class_column : Name of the column containing class names\n ax : matplotlib axes object, default None\n samples : Number of points to plot in each curve\n color: list or tuple, optional\n Colors to use for the different classes\n colormap : str or matplotlib colormap object, default None\n Colormap to select colors from. If string, load colormap with that name\n from matplotlib.\n kwds: keywords\n Options to pass to matplotlib plotting method\n\n Returns:\n --------\n ax: Matplotlib axis object\n\n \"\"\"\n from math import sqrt, pi, sin, cos\n import matplotlib.pyplot as plt\n\n def function(amplitudes):\n def f(x):\n x1 = amplitudes[0]\n result = x1 / sqrt(2.0)\n harmonic = 1.0\n for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):\n result += (x_even * sin(harmonic * x) +\n x_odd * cos(harmonic * x))\n harmonic += 1.0\n if len(amplitudes) % 2 != 0:\n result += amplitudes[-1] * sin(harmonic * x)\n return result\n return f\n\n n = len(frame)\n class_col = frame[class_column]\n classes = frame[class_column].drop_duplicates()\n df = frame.drop(class_column, axis=1)\n x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]\n used_legends = set([])\n\n color_values = _get_standard_colors(num_colors=len(classes),\n colormap=colormap, color_type='random',\n color=color)\n colors = dict(zip(classes, color_values))\n if ax is None:\n ax = plt.gca(xlim=(-pi, pi))\n for i in range(n):\n row = df.iloc[i].values\n f = function(row)\n y = [f(t) for t in x]\n kls = class_col.iat[i]\n label = com.pprint_thing(kls)\n if label not in used_legends:\n used_legends.add(label)\n ax.plot(x, y, color=colors[kls], label=label, **kwds)\n else:\n ax.plot(x, y, color=colors[kls], **kwds)\n\n ax.legend(loc='upper right')\n ax.grid()\n return ax\n\n\ndef bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):\n \"\"\"Bootstrap plot.\n\n Parameters:\n -----------\n series: Time series\n fig: matplotlib figure object, optional\n size: number of data points to consider during each sampling\n samples: number of times the bootstrap procedure is performed\n kwds: optional keyword arguments for plotting commands, must be accepted\n by both hist and plot\n\n Returns:\n --------\n fig: matplotlib figure\n \"\"\"\n import random\n import matplotlib.pyplot as plt\n\n # random.sample(ndarray, int) fails on python 3.3, sigh\n data = list(series.values)\n samplings = [random.sample(data, size) for _ in range(samples)]\n\n means = np.array([np.mean(sampling) for sampling in samplings])\n medians = np.array([np.median(sampling) for sampling in samplings])\n midranges = np.array([(min(sampling) + max(sampling)) * 0.5\n for sampling in samplings])\n if fig is None:\n fig = plt.figure()\n x = lrange(samples)\n axes = []\n ax1 = fig.add_subplot(2, 3, 1)\n ax1.set_xlabel(\"Sample\")\n axes.append(ax1)\n ax1.plot(x, means, **kwds)\n ax2 = fig.add_subplot(2, 3, 2)\n ax2.set_xlabel(\"Sample\")\n axes.append(ax2)\n ax2.plot(x, medians, **kwds)\n ax3 = fig.add_subplot(2, 3, 3)\n ax3.set_xlabel(\"Sample\")\n axes.append(ax3)\n ax3.plot(x, midranges, **kwds)\n ax4 = fig.add_subplot(2, 3, 4)\n ax4.set_xlabel(\"Mean\")\n axes.append(ax4)\n ax4.hist(means, **kwds)\n ax5 = fig.add_subplot(2, 3, 5)\n ax5.set_xlabel(\"Median\")\n axes.append(ax5)\n ax5.hist(medians, **kwds)\n ax6 = fig.add_subplot(2, 3, 6)\n ax6.set_xlabel(\"Midrange\")\n axes.append(ax6)\n ax6.hist(midranges, **kwds)\n for axis in axes:\n plt.setp(axis.get_xticklabels(), fontsize=8)\n plt.setp(axis.get_yticklabels(), fontsize=8)\n return fig\n\n@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')\n@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')\ndef parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,\n use_columns=False, xticks=None, colormap=None,\n **kwds):\n \"\"\"Parallel coordinates plotting.\n\n Parameters\n ----------\n frame: DataFrame\n class_column: str\n Column name containing class names\n cols: list, optional\n A list of column names to use\n ax: matplotlib.axis, optional\n matplotlib axis object\n color: list or tuple, optional\n Colors to use for the different classes\n use_columns: bool, optional\n If true, columns will be used as xticks\n xticks: list or tuple, optional\n A list of values to use for xticks\n colormap: str or matplotlib colormap, default None\n Colormap to use for line colors.\n kwds: keywords\n Options to pass to matplotlib plotting method\n\n Returns\n -------\n ax: matplotlib axis object\n\n Examples\n --------\n >>> from pandas import read_csv\n >>> from pandas.tools.plotting import parallel_coordinates\n >>> from matplotlib import pyplot as plt\n >>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')\n >>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))\n >>> plt.show()\n \"\"\"\n import matplotlib.pyplot as plt\n\n n = len(frame)\n classes = frame[class_column].drop_duplicates()\n class_col = frame[class_column]\n\n if cols is None:\n df = frame.drop(class_column, axis=1)\n else:\n df = frame[cols]\n\n used_legends = set([])\n\n ncols = len(df.columns)\n\n # determine values to use for xticks\n if use_columns is True:\n if not np.all(np.isreal(list(df.columns))):\n raise ValueError('Columns must be numeric to be used as xticks')\n x = df.columns\n elif xticks is not None:\n if not np.all(np.isreal(xticks)):\n raise ValueError('xticks specified must be numeric')\n elif len(xticks) != ncols:\n raise ValueError('Length of xticks must match number of columns')\n x = xticks\n else:\n x = lrange(ncols)\n\n if ax is None:\n ax = plt.gca()\n\n color_values = _get_standard_colors(num_colors=len(classes),\n colormap=colormap, color_type='random',\n color=color)\n\n colors = dict(zip(classes, color_values))\n\n for i in range(n):\n y = df.iloc[i].values\n kls = class_col.iat[i]\n label = com.pprint_thing(kls)\n if label not in used_legends:\n used_legends.add(label)\n ax.plot(x, y, color=colors[kls], label=label, **kwds)\n else:\n ax.plot(x, y, color=colors[kls], **kwds)\n\n for i in x:\n ax.axvline(i, linewidth=1, color='black')\n\n ax.set_xticks(x)\n ax.set_xticklabels(df.columns)\n ax.set_xlim(x[0], x[-1])\n ax.legend(loc='upper right')\n ax.grid()\n return ax\n\n\ndef lag_plot(series, lag=1, ax=None, **kwds):\n \"\"\"Lag plot for time series.\n\n Parameters:\n -----------\n series: Time series\n lag: lag of the scatter plot, default 1\n ax: Matplotlib axis object, optional\n kwds: Matplotlib scatter method keyword arguments, optional\n\n Returns:\n --------\n ax: Matplotlib axis object\n \"\"\"\n import matplotlib.pyplot as plt\n\n # workaround because `c='b'` is hardcoded in matplotlibs scatter method\n kwds.setdefault('c', plt.rcParams['patch.facecolor'])\n\n data = series.values\n y1 = data[:-lag]\n y2 = data[lag:]\n if ax is None:\n ax = plt.gca()\n ax.set_xlabel(\"y(t)\")\n ax.set_ylabel(\"y(t + %s)\" % lag)\n ax.scatter(y1, y2, **kwds)\n return ax\n\n\ndef autocorrelation_plot(series, ax=None, **kwds):\n \"\"\"Autocorrelation plot for time series.\n\n Parameters:\n -----------\n series: Time series\n ax: Matplotlib axis object, optional\n kwds : keywords\n Options to pass to matplotlib plotting method\n\n Returns:\n -----------\n ax: Matplotlib axis object\n \"\"\"\n import matplotlib.pyplot as plt\n n = len(series)\n data = np.asarray(series)\n if ax is None:\n ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))\n mean = np.mean(data)\n c0 = np.sum((data - mean) ** 2) / float(n)\n\n def r(h):\n return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0\n x = np.arange(n) + 1\n y = lmap(r, x)\n z95 = 1.959963984540054\n z99 = 2.5758293035489004\n ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')\n ax.axhline(y=z95 / np.sqrt(n), color='grey')\n ax.axhline(y=0.0, color='black')\n ax.axhline(y=-z95 / np.sqrt(n), color='grey')\n ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')\n ax.set_xlabel(\"Lag\")\n ax.set_ylabel(\"Autocorrelation\")\n ax.plot(x, y, **kwds)\n if 'label' in kwds:\n ax.legend()\n ax.grid()\n return ax\n\n\nclass MPLPlot(object):\n \"\"\"\n Base class for assembling a pandas plot using matplotlib\n\n Parameters\n ----------\n data :\n\n \"\"\"\n _layout_type = 'vertical'\n _default_rot = 0\n orientation = None\n\n _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',\n 'mark_right', 'stacked']\n _attr_defaults = {'logy': False, 'logx': False, 'loglog': False,\n 'mark_right': True, 'stacked': False}\n\n def __init__(self, data, kind=None, by=None, subplots=False, sharex=True,\n sharey=False, use_index=True,\n figsize=None, grid=None, legend=True, rot=None,\n ax=None, fig=None, title=None, xlim=None, ylim=None,\n xticks=None, yticks=None,\n sort_columns=False, fontsize=None,\n secondary_y=False, colormap=None,\n table=False, layout=None, **kwds):\n\n self.data = data\n self.by = by\n\n self.kind = kind\n\n self.sort_columns = sort_columns\n\n self.subplots = subplots\n self.sharex = sharex\n self.sharey = sharey\n self.figsize = figsize\n self.layout = layout\n\n self.xticks = xticks\n self.yticks = yticks\n self.xlim = xlim\n self.ylim = ylim\n self.title = title\n self.use_index = use_index\n\n self.fontsize = fontsize\n\n if rot is not None:\n self.rot = rot\n else:\n if isinstance(self._default_rot, dict):\n self.rot = self._default_rot[self.kind]\n else:\n self.rot = self._default_rot\n\n if grid is None:\n grid = False if secondary_y else True\n\n self.grid = grid\n self.legend = legend\n self.legend_handles = []\n self.legend_labels = []\n\n for attr in self._pop_attributes:\n value = kwds.pop(attr, self._attr_defaults.get(attr, None))\n setattr(self, attr, value)\n\n self.ax = ax\n self.fig = fig\n self.axes = None\n\n # parse errorbar input if given\n xerr = kwds.pop('xerr', None)\n yerr = kwds.pop('yerr', None)\n self.errors = {}\n for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):\n self.errors[kw] = self._parse_errorbars(kw, err)\n\n if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):\n secondary_y = [secondary_y]\n self.secondary_y = secondary_y\n\n # ugly TypeError if user passes matplotlib's `cmap` name.\n # Probably better to accept either.\n if 'cmap' in kwds and colormap:\n raise TypeError(\"Only specify one of `cmap` and `colormap`.\")\n elif 'cmap' in kwds:\n self.colormap = kwds.pop('cmap')\n else:\n self.colormap = colormap\n\n self.table = table\n\n self.kwds = kwds\n\n self._validate_color_args()\n\n def _validate_color_args(self):\n from pandas import DataFrame\n if 'color' not in self.kwds and 'colors' in self.kwds:\n warnings.warn((\"'colors' is being deprecated. Please use 'color'\"\n \"instead of 'colors'\"))\n colors = self.kwds.pop('colors')\n self.kwds['color'] = colors\n\n if ('color' in self.kwds and\n (isinstance(self.data, Series) or\n isinstance(self.data, DataFrame) and len(self.data.columns) == 1)):\n # support series.plot(color='green')\n self.kwds['color'] = [self.kwds['color']]\n\n if ('color' in self.kwds or 'colors' in self.kwds) and \\\n self.colormap is not None:\n warnings.warn(\"'color' and 'colormap' cannot be used \"\n \"simultaneously. Using 'color'\")\n\n if 'color' in self.kwds and self.style is not None:\n # need only a single match\n if re.match('^[a-z]+?', self.style) is not None:\n raise ValueError(\"Cannot pass 'style' string with a color \"\n \"symbol and 'color' keyword argument. Please\"\n \" use one or the other or pass 'style' \"\n \"without a color symbol\")\n\n def _iter_data(self, data=None, keep_index=False, fillna=None):\n if data is None:\n data = self.data\n if fillna is not None:\n data = data.fillna(fillna)\n\n from pandas.core.frame import DataFrame\n if isinstance(data, (Series, np.ndarray, Index)):\n if keep_index is True:\n yield self.label, data\n else:\n yield self.label, np.asarray(data)\n elif isinstance(data, DataFrame):\n if self.sort_columns:\n columns = com._try_sort(data.columns)\n else:\n columns = data.columns\n\n for col in columns:\n # # is this right?\n # empty = df[col].count() == 0\n # values = df[col].values if not empty else np.zeros(len(df))\n\n if keep_index is True:\n yield col, data[col]\n else:\n yield col, data[col].values\n\n @property\n def nseries(self):\n if self.data.ndim == 1:\n return 1\n else:\n return self.data.shape[1]\n\n def draw(self):\n self.plt.draw_if_interactive()\n\n def generate(self):\n self._args_adjust()\n self._compute_plot_data()\n self._setup_subplots()\n self._make_plot()\n self._add_table()\n self._make_legend()\n self._post_plot_logic()\n self._adorn_subplots()\n\n def _args_adjust(self):\n pass\n\n def _maybe_right_yaxis(self, ax):\n if hasattr(ax, 'right_ax'):\n return ax.right_ax\n else:\n orig_ax, new_ax = ax, ax.twinx()\n new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle\n\n orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax\n new_ax.right_ax = new_ax\n\n if len(orig_ax.get_lines()) == 0: # no data on left y\n orig_ax.get_yaxis().set_visible(False)\n return new_ax\n\n def _setup_subplots(self):\n if self.subplots:\n fig, axes = _subplots(naxes=self.nseries,\n sharex=self.sharex, sharey=self.sharey,\n figsize=self.figsize, ax=self.ax,\n layout=self.layout,\n layout_type=self._layout_type)\n else:\n if self.ax is None:\n fig = self.plt.figure(figsize=self.figsize)\n axes = fig.add_subplot(111)\n else:\n fig = self.ax.get_figure()\n if self.figsize is not None:\n fig.set_size_inches(self.figsize)\n axes = self.ax\n\n axes = _flatten(axes)\n\n if self.logx or self.loglog:\n [a.set_xscale('log') for a in axes]\n if self.logy or self.loglog:\n [a.set_yscale('log') for a in axes]\n\n self.fig = fig\n self.axes = axes\n\n @property\n def result(self):\n \"\"\"\n Return result axes\n \"\"\"\n if self.subplots:\n if self.layout is not None and not com.is_list_like(self.ax):\n return self.axes.reshape(*self.layout)\n else:\n return self.axes\n else:\n return self.axes[0]\n\n def _compute_plot_data(self):\n numeric_data = self.data.convert_objects()._get_numeric_data()\n\n try:\n is_empty = numeric_data.empty\n except AttributeError:\n is_empty = not len(numeric_data)\n\n # no empty frames or series allowed\n if is_empty:\n raise TypeError('Empty {0!r}: no numeric data to '\n 'plot'.format(numeric_data.__class__.__name__))\n\n self.data = numeric_data\n\n def _make_plot(self):\n raise NotImplementedError\n\n def _add_table(self):\n if self.table is False:\n return\n elif self.table is True:\n from pandas.core.frame import DataFrame\n if isinstance(self.data, Series):\n data = DataFrame(self.data, columns=[self.data.name])\n elif isinstance(self.data, DataFrame):\n data = self.data\n data = data.transpose()\n else:\n data = self.table\n ax = self._get_ax(0)\n table(ax, data)\n\n def _post_plot_logic(self):\n pass\n\n def _adorn_subplots(self):\n to_adorn = self.axes\n\n # todo: sharex, sharey handling?\n\n for ax in to_adorn:\n if self.yticks is not None:\n ax.set_yticks(self.yticks)\n\n if self.xticks is not None:\n ax.set_xticks(self.xticks)\n\n if self.ylim is not None:\n ax.set_ylim(self.ylim)\n\n if self.xlim is not None:\n ax.set_xlim(self.xlim)\n\n ax.grid(self.grid)\n\n if self.title:\n if self.subplots:\n self.fig.suptitle(self.title)\n else:\n self.axes[0].set_title(self.title)\n\n labels = [com.pprint_thing(key) for key in self.data.index]\n labels = dict(zip(range(len(self.data.index)), labels))\n\n for ax in self.axes:\n if self.orientation == 'vertical' or self.orientation is None:\n if self._need_to_set_index:\n xticklabels = [labels.get(x, '') for x in ax.get_xticks()]\n ax.set_xticklabels(xticklabels)\n self._apply_axis_properties(ax.xaxis, rot=self.rot,\n fontsize=self.fontsize)\n elif self.orientation == 'horizontal':\n if self._need_to_set_index:\n yticklabels = [labels.get(y, '') for y in ax.get_yticks()]\n ax.set_yticklabels(yticklabels)\n self._apply_axis_properties(ax.yaxis, rot=self.rot,\n fontsize=self.fontsize)\n\n def _apply_axis_properties(self, axis, rot=None, fontsize=None):\n labels = axis.get_majorticklabels() + axis.get_minorticklabels()\n for label in labels:\n if rot is not None:\n label.set_rotation(rot)\n if fontsize is not None:\n label.set_fontsize(fontsize)\n\n @property\n def legend_title(self):\n if hasattr(self.data, 'columns'):\n if not isinstance(self.data.columns, MultiIndex):\n name = self.data.columns.name\n if name is not None:\n name = com.pprint_thing(name)\n return name\n else:\n stringified = map(com.pprint_thing,\n self.data.columns.names)\n return ','.join(stringified)\n else:\n return None\n\n def _add_legend_handle(self, handle, label, index=None):\n if not label is None:\n if self.mark_right and index is not None:\n if self.on_right(index):\n label = label + ' (right)'\n self.legend_handles.append(handle)\n self.legend_labels.append(label)\n\n def _make_legend(self):\n ax, leg = self._get_ax_legend(self.axes[0])\n\n handles = []\n labels = []\n title = ''\n\n if not self.subplots:\n if not leg is None:\n title = leg.get_title().get_text()\n handles = leg.legendHandles\n labels = [x.get_text() for x in leg.get_texts()]\n\n if self.legend:\n if self.legend == 'reverse':\n self.legend_handles = reversed(self.legend_handles)\n self.legend_labels = reversed(self.legend_labels)\n\n handles += self.legend_handles\n labels += self.legend_labels\n if not self.legend_title is None:\n title = self.legend_title\n\n if len(handles) > 0:\n ax.legend(handles, labels, loc='best', title=title)\n\n elif self.subplots and self.legend:\n for ax in self.axes:\n ax.legend(loc='best')\n\n def _get_ax_legend(self, ax):\n leg = ax.get_legend()\n other_ax = (getattr(ax, 'right_ax', None) or\n getattr(ax, 'left_ax', None))\n other_leg = None\n if other_ax is not None:\n other_leg = other_ax.get_legend()\n if leg is None and other_leg is not None:\n leg = other_leg\n ax = other_ax\n return ax, leg\n\n @cache_readonly\n def plt(self):\n import matplotlib.pyplot as plt\n return plt\n\n _need_to_set_index = False\n\n def _get_xticks(self, convert_period=False):\n index = self.data.index\n is_datetype = index.inferred_type in ('datetime', 'date',\n 'datetime64', 'time')\n\n if self.use_index:\n if convert_period and isinstance(index, PeriodIndex):\n self.data = self.data.reindex(index=index.order())\n x = self.data.index.to_timestamp()._mpl_repr()\n elif index.is_numeric():\n \"\"\"\n Matplotlib supports numeric values or datetime objects as\n xaxis values. Taking LBYL approach here, by the time\n matplotlib raises exception when using non numeric/datetime\n values for xaxis, several actions are already taken by plt.\n \"\"\"\n x = index._mpl_repr()\n elif is_datetype:\n self.data = self.data.sort_index()\n x = self.data.index._mpl_repr()\n else:\n self._need_to_set_index = True\n x = lrange(len(index))\n else:\n x = lrange(len(index))\n\n return x\n\n def _is_datetype(self):\n index = self.data.index\n return (isinstance(index, (PeriodIndex, DatetimeIndex)) or\n index.inferred_type in ('datetime', 'date', 'datetime64',\n 'time'))\n\n def _get_plot_function(self):\n '''\n Returns the matplotlib plotting function (plot or errorbar) based on\n the presence of errorbar keywords.\n '''\n errorbar = any(e is not None for e in self.errors.values())\n def plotf(ax, x, y, style=None, **kwds):\n mask = com.isnull(y)\n if mask.any():\n y = np.ma.array(y)\n y = np.ma.masked_where(mask, y)\n\n if errorbar:\n return self.plt.Axes.errorbar(ax, x, y, **kwds)\n else:\n # prevent style kwarg from going to errorbar, where it is unsupported\n if style is not None:\n args = (ax, x, y, style)\n else:\n args = (ax, x, y)\n return self.plt.Axes.plot(*args, **kwds)\n return plotf\n\n def _get_index_name(self):\n if isinstance(self.data.index, MultiIndex):\n name = self.data.index.names\n if any(x is not None for x in name):\n name = ','.join([com.pprint_thing(x) for x in name])\n else:\n name = None\n else:\n name = self.data.index.name\n if name is not None:\n name = com.pprint_thing(name)\n\n return name\n\n def _get_ax(self, i):\n # get the twinx ax if appropriate\n if self.subplots:\n ax = self.axes[i]\n\n if self.on_right(i):\n ax = self._maybe_right_yaxis(ax)\n self.axes[i] = ax\n else:\n ax = self.axes[0]\n\n if self.on_right(i):\n ax = self._maybe_right_yaxis(ax)\n\n sec_true = isinstance(self.secondary_y, bool) and self.secondary_y\n all_sec = (com.is_list_like(self.secondary_y) and\n len(self.secondary_y) == self.nseries)\n if sec_true or all_sec:\n self.axes[0] = ax\n\n ax.get_yaxis().set_visible(True)\n return ax\n\n def on_right(self, i):\n from pandas.core.frame import DataFrame\n if isinstance(self.secondary_y, bool):\n return self.secondary_y\n\n if (isinstance(self.data, DataFrame) and\n isinstance(self.secondary_y, (tuple, list, np.ndarray, Index))):\n return self.data.columns[i] in self.secondary_y\n\n def _get_style(self, i, col_name):\n style = ''\n if self.subplots:\n style = 'k'\n\n if self.style is not None:\n if isinstance(self.style, list):\n try:\n style = self.style[i]\n except IndexError:\n pass\n elif isinstance(self.style, dict):\n style = self.style.get(col_name, style)\n else:\n style = self.style\n\n return style or None\n\n def _get_colors(self, num_colors=None, color_kwds='color'):\n from pandas.core.frame import DataFrame\n if num_colors is None:\n num_colors = self.nseries\n\n return _get_standard_colors(num_colors=num_colors,\n colormap=self.colormap,\n color=self.kwds.get(color_kwds))\n\n def _maybe_add_color(self, colors, kwds, style, i):\n has_color = 'color' in kwds or self.colormap is not None\n if has_color and (style is None or re.match('[a-z]+', style) is None):\n kwds['color'] = colors[i % len(colors)]\n\n def _parse_errorbars(self, label, err):\n '''\n Look for error keyword arguments and return the actual errorbar data\n or return the error DataFrame/dict\n\n Error bars can be specified in several ways:\n Series: the user provides a pandas.Series object of the same\n length as the data\n ndarray: provides a np.ndarray of the same length as the data\n DataFrame/dict: error values are paired with keys matching the\n key in the plotted DataFrame\n str: the name of the column within the plotted DataFrame\n '''\n\n if err is None:\n return None\n\n from pandas import DataFrame, Series\n\n def match_labels(data, e):\n e = e.reindex_axis(data.index)\n return e\n\n # key-matched DataFrame\n if isinstance(err, DataFrame):\n\n err = match_labels(self.data, err)\n # key-matched dict\n elif isinstance(err, dict):\n pass\n\n # Series of error values\n elif isinstance(err, Series):\n # broadcast error series across data\n err = match_labels(self.data, err)\n err = np.atleast_2d(err)\n err = np.tile(err, (self.nseries, 1))\n\n # errors are a column in the dataframe\n elif isinstance(err, string_types):\n evalues = self.data[err].values\n self.data = self.data[self.data.columns.drop(err)]\n err = np.atleast_2d(evalues)\n err = np.tile(err, (self.nseries, 1))\n\n elif com.is_list_like(err):\n if com.is_iterator(err):\n err = np.atleast_2d(list(err))\n else:\n # raw error values\n err = np.atleast_2d(err)\n\n err_shape = err.shape\n\n # asymmetrical error bars\n if err.ndim == 3:\n if (err_shape[0] != self.nseries) or \\\n (err_shape[1] != 2) or \\\n (err_shape[2] != len(self.data)):\n msg = \"Asymmetrical error bars should be provided \" + \\\n \"with the shape (%u, 2, %u)\" % \\\n (self.nseries, len(self.data))\n raise ValueError(msg)\n\n # broadcast errors to each data series\n if len(err) == 1:\n err = np.tile(err, (self.nseries, 1))\n\n elif com.is_number(err):\n err = np.tile([err], (self.nseries, len(self.data)))\n\n else:\n msg = \"No valid %s detected\" % label\n raise ValueError(msg)\n\n return err\n\n def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):\n from pandas import DataFrame\n errors = {}\n\n for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):\n if flag:\n err = self.errors[kw]\n # user provided label-matched dataframe of errors\n if isinstance(err, (DataFrame, dict)):\n if label is not None and label in err.keys():\n err = err[label]\n else:\n err = None\n elif index is not None and err is not None:\n err = err[index]\n\n if err is not None:\n errors[kw] = err\n return errors\n\n\nclass ScatterPlot(MPLPlot):\n _layout_type = 'single'\n\n def __init__(self, data, x, y, **kwargs):\n MPLPlot.__init__(self, data, **kwargs)\n self.kwds.setdefault('c', self.plt.rcParams['patch.facecolor'])\n if x is None or y is None:\n raise ValueError( 'scatter requires and x and y column')\n if com.is_integer(x) and not self.data.columns.holds_integer():\n x = self.data.columns[x]\n if com.is_integer(y) and not self.data.columns.holds_integer():\n y = self.data.columns[y]\n self.x = x\n self.y = y\n\n @property\n def nseries(self):\n return 1\n\n def _make_plot(self):\n x, y, data = self.x, self.y, self.data\n ax = self.axes[0]\n\n if self.legend and hasattr(self, 'label'):\n label = self.label\n else:\n label = None\n scatter = ax.scatter(data[x].values, data[y].values, label=label,\n **self.kwds)\n self._add_legend_handle(scatter, label)\n\n errors_x = self._get_errorbars(label=x, index=0, yerr=False)\n errors_y = self._get_errorbars(label=y, index=0, xerr=False)\n if len(errors_x) > 0 or len(errors_y) > 0:\n err_kwds = dict(errors_x, **errors_y)\n err_kwds['ecolor'] = scatter.get_facecolor()[0]\n ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)\n\n def _post_plot_logic(self):\n ax = self.axes[0]\n x, y = self.x, self.y\n ax.set_ylabel(com.pprint_thing(y))\n ax.set_xlabel(com.pprint_thing(x))\n\n\nclass HexBinPlot(MPLPlot):\n _layout_type = 'single'\n\n def __init__(self, data, x, y, C=None, **kwargs):\n MPLPlot.__init__(self, data, **kwargs)\n\n if x is None or y is None:\n raise ValueError('hexbin requires and x and y column')\n if com.is_integer(x) and not self.data.columns.holds_integer():\n x = self.data.columns[x]\n if com.is_integer(y) and not self.data.columns.holds_integer():\n y = self.data.columns[y]\n\n if com.is_integer(C) and not self.data.columns.holds_integer():\n C = self.data.columns[C]\n\n self.x = x\n self.y = y\n self.C = C\n\n @property\n def nseries(self):\n return 1\n\n def _make_plot(self):\n import matplotlib.pyplot as plt\n\n x, y, data, C = self.x, self.y, self.data, self.C\n ax = self.axes[0]\n # pandas uses colormap, matplotlib uses cmap.\n cmap = self.colormap or 'BuGn'\n cmap = plt.cm.get_cmap(cmap)\n cb = self.kwds.pop('colorbar', True)\n\n if C is None:\n c_values = None\n else:\n c_values = data[C].values\n\n ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,\n **self.kwds)\n if cb:\n img = ax.collections[0]\n self.fig.colorbar(img, ax=ax)\n\n def _post_plot_logic(self):\n ax = self.axes[0]\n x, y = self.x, self.y\n ax.set_ylabel(com.pprint_thing(y))\n ax.set_xlabel(com.pprint_thing(x))\n\n\nclass LinePlot(MPLPlot):\n\n _default_rot = 30\n orientation = 'vertical'\n\n def __init__(self, data, **kwargs):\n MPLPlot.__init__(self, data, **kwargs)\n if self.stacked:\n self.data = self.data.fillna(value=0)\n self.x_compat = plot_params['x_compat']\n if 'x_compat' in self.kwds:\n self.x_compat = bool(self.kwds.pop('x_compat'))\n\n def _index_freq(self):\n from pandas.core.frame import DataFrame\n if isinstance(self.data, (Series, DataFrame)):\n freq = getattr(self.data.index, 'freq', None)\n if freq is None:\n freq = getattr(self.data.index, 'inferred_freq', None)\n if freq == 'B':\n weekdays = np.unique(self.data.index.dayofweek)\n if (5 in weekdays) or (6 in weekdays):\n freq = None\n return freq\n\n def _is_dynamic_freq(self, freq):\n if isinstance(freq, DateOffset):\n freq = freq.rule_code\n else:\n freq = frequencies.get_base_alias(freq)\n freq = frequencies.get_period_alias(freq)\n return freq is not None and self._no_base(freq)\n\n def _no_base(self, freq):\n # hack this for 0.10.1, creating more technical debt...sigh\n from pandas.core.frame import DataFrame\n if (isinstance(self.data, (Series, DataFrame))\n and isinstance(self.data.index, DatetimeIndex)):\n base = frequencies.get_freq(freq)\n x = self.data.index\n if (base <= frequencies.FreqGroup.FR_DAY):\n return x[:1].is_normalized\n\n return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]\n return True\n\n def _use_dynamic_x(self):\n freq = self._index_freq()\n\n ax = self._get_ax(0)\n ax_freq = getattr(ax, 'freq', None)\n if freq is None: # convert irregular if axes has freq info\n freq = ax_freq\n else: # do not use tsplot if irregular was plotted first\n if (ax_freq is None) and (len(ax.get_lines()) > 0):\n return False\n\n return (freq is not None) and self._is_dynamic_freq(freq)\n\n def _is_ts_plot(self):\n # this is slightly deceptive\n return not self.x_compat and self.use_index and self._use_dynamic_x()\n\n def _make_plot(self):\n self._initialize_prior(len(self.data))\n\n if self._is_ts_plot():\n data = self._maybe_convert_index(self.data)\n x = data.index # dummy, not used\n plotf = self._get_ts_plot_function()\n it = self._iter_data(data=data, keep_index=True)\n else:\n x = self._get_xticks(convert_period=True)\n plotf = self._get_plot_function()\n it = self._iter_data()\n\n colors = self._get_colors()\n for i, (label, y) in enumerate(it):\n ax = self._get_ax(i)\n style = self._get_style(i, label)\n kwds = self.kwds.copy()\n self._maybe_add_color(colors, kwds, style, i)\n\n errors = self._get_errorbars(label=label, index=i)\n kwds = dict(kwds, **errors)\n\n label = com.pprint_thing(label) # .encode('utf-8')\n kwds['label'] = label\n\n newlines = plotf(ax, x, y, style=style, column_num=i, **kwds)\n self._add_legend_handle(newlines[0], label, index=i)\n\n lines = _get_all_lines(ax)\n left, right = _get_xlim(lines)\n ax.set_xlim(left, right)\n\n def _get_stacked_values(self, y, label):\n if self.stacked:\n if (y >= 0).all():\n return self._pos_prior + y\n elif (y <= 0).all():\n return self._neg_prior + y\n else:\n raise ValueError('When stacked is True, each column must be either all positive or negative.'\n '{0} contains both positive and negative values'.format(label))\n else:\n return y\n\n def _get_plot_function(self):\n f = MPLPlot._get_plot_function(self)\n def plotf(ax, x, y, style=None, column_num=None, **kwds):\n # column_num is used to get the target column from protf in line and area plots\n if column_num == 0:\n self._initialize_prior(len(self.data))\n y_values = self._get_stacked_values(y, kwds['label'])\n lines = f(ax, x, y_values, style=style, **kwds)\n self._update_prior(y)\n return lines\n return plotf\n\n def _get_ts_plot_function(self):\n from pandas.tseries.plotting import tsplot\n plotf = self._get_plot_function()\n def _plot(ax, x, data, style=None, **kwds):\n # accept x to be consistent with normal plot func,\n # x is not passed to tsplot as it uses data.index as x coordinate\n lines = tsplot(data, plotf, ax=ax, style=style, **kwds)\n return lines\n return _plot\n\n def _initialize_prior(self, n):\n self._pos_prior = np.zeros(n)\n self._neg_prior = np.zeros(n)\n\n def _update_prior(self, y):\n if self.stacked and not self.subplots:\n # tsplot resample may changedata length\n if len(self._pos_prior) != len(y):\n self._initialize_prior(len(y))\n if (y >= 0).all():\n self._pos_prior += y\n elif (y <= 0).all():\n self._neg_prior += y\n\n def _maybe_convert_index(self, data):\n # tsplot converts automatically, but don't want to convert index\n # over and over for DataFrames\n from pandas.core.frame import DataFrame\n if (isinstance(data.index, DatetimeIndex) and\n isinstance(data, DataFrame)):\n freq = getattr(data.index, 'freq', None)\n\n if freq is None:\n freq = getattr(data.index, 'inferred_freq', None)\n if isinstance(freq, DateOffset):\n freq = freq.rule_code\n freq = frequencies.get_base_alias(freq)\n freq = frequencies.get_period_alias(freq)\n\n if freq is None:\n ax = self._get_ax(0)\n freq = getattr(ax, 'freq', None)\n\n if freq is None:\n raise ValueError('Could not get frequency alias for plotting')\n\n data = DataFrame(data.values,\n index=data.index.to_period(freq=freq),\n columns=data.columns)\n return data\n\n def _post_plot_logic(self):\n df = self.data\n\n condition = (not self._use_dynamic_x()\n and df.index.is_all_dates\n and not self.subplots\n or (self.subplots and self.sharex))\n\n index_name = self._get_index_name()\n\n for ax in self.axes:\n if condition:\n format_date_labels(ax, rot=self.rot)\n\n if index_name is not None:\n ax.set_xlabel(index_name)\n\n\nclass AreaPlot(LinePlot):\n\n def __init__(self, data, **kwargs):\n kwargs.setdefault('stacked', True)\n data = data.fillna(value=0)\n LinePlot.__init__(self, data, **kwargs)\n\n if not self.stacked:\n # use smaller alpha to distinguish overlap\n self.kwds.setdefault('alpha', 0.5)\n\n def _get_plot_function(self):\n if self.logy or self.loglog:\n raise ValueError(\"Log-y scales are not supported in area plot\")\n else:\n f = MPLPlot._get_plot_function(self)\n def plotf(ax, x, y, style=None, column_num=0, **kwds):\n if column_num == 0:\n self._initialize_prior(len(self.data))\n y_values = self._get_stacked_values(y, kwds['label'])\n lines = f(ax, x, y_values, style=style, **kwds)\n\n # get data from the line to get coordinates for fill_between\n xdata, y_values = lines[0].get_data(orig=False)\n\n if (y >= 0).all():\n start = self._pos_prior\n elif (y <= 0).all():\n start = self._neg_prior\n else:\n start = np.zeros(len(y))\n\n if not 'color' in kwds:\n kwds['color'] = lines[0].get_color()\n\n self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds)\n self._update_prior(y)\n return lines\n\n return plotf\n\n def _add_legend_handle(self, handle, label, index=None):\n from matplotlib.patches import Rectangle\n # Because fill_between isn't supported in legend,\n # specifically add Rectangle handle here\n alpha = self.kwds.get('alpha', None)\n handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)\n LinePlot._add_legend_handle(self, handle, label, index=index)\n\n def _post_plot_logic(self):\n LinePlot._post_plot_logic(self)\n\n if self.ylim is None:\n if (self.data >= 0).all().all():\n for ax in self.axes:\n ax.set_ylim(0, None)\n elif (self.data <= 0).all().all():\n for ax in self.axes:\n ax.set_ylim(None, 0)\n\n\nclass BarPlot(MPLPlot):\n\n _default_rot = {'bar': 90, 'barh': 0}\n\n def __init__(self, data, **kwargs):\n self.bar_width = kwargs.pop('width', 0.5)\n pos = kwargs.pop('position', 0.5)\n kwargs.setdefault('align', 'center')\n self.tick_pos = np.arange(len(data))\n\n self.bottom = kwargs.pop('bottom', None)\n self.left = kwargs.pop('left', None)\n\n self.log = kwargs.pop('log',False)\n MPLPlot.__init__(self, data, **kwargs)\n\n if self.stacked or self.subplots:\n self.tickoffset = self.bar_width * pos\n if kwargs['align'] == 'edge':\n self.lim_offset = self.bar_width / 2\n else:\n self.lim_offset = 0\n else:\n if kwargs['align'] == 'edge':\n w = self.bar_width / self.nseries\n self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5\n self.lim_offset = w * 0.5\n else:\n self.tickoffset = self.bar_width * pos\n self.lim_offset = 0\n\n self.ax_pos = self.tick_pos - self.tickoffset\n\n def _args_adjust(self):\n if com.is_list_like(self.bottom):\n self.bottom = np.array(self.bottom)\n if com.is_list_like(self.left):\n self.left = np.array(self.left)\n\n def _get_plot_function(self):\n if self.kind == 'bar':\n def f(ax, x, y, w, start=None, **kwds):\n if self.bottom is not None:\n start = start + self.bottom\n return ax.bar(x, y, w, bottom=start,log=self.log, **kwds)\n elif self.kind == 'barh':\n def f(ax, x, y, w, start=None, log=self.log, **kwds):\n if self.left is not None:\n start = start + self.left\n return ax.barh(x, y, w, left=start, **kwds)\n else:\n raise NotImplementedError\n\n return f\n\n def _make_plot(self):\n import matplotlib as mpl\n # mpl decided to make their version string unicode across all Python\n # versions for mpl >= 1.3 so we have to call str here for python 2\n mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')\n\n colors = self._get_colors()\n ncolors = len(colors)\n\n bar_f = self._get_plot_function()\n pos_prior = neg_prior = np.zeros(len(self.data))\n K = self.nseries\n\n for i, (label, y) in enumerate(self._iter_data(fillna=0)):\n ax = self._get_ax(i)\n kwds = self.kwds.copy()\n kwds['color'] = colors[i % ncolors]\n\n errors = self._get_errorbars(label=label, index=i)\n kwds = dict(kwds, **errors)\n\n label = com.pprint_thing(label)\n\n if (('yerr' in kwds) or ('xerr' in kwds)) \\\n and (kwds.get('ecolor') is None):\n kwds['ecolor'] = mpl.rcParams['xtick.color']\n\n start = 0\n if self.log:\n start = 1\n if any(y < 1):\n # GH3254\n start = 0 if mpl_le_1_2_1 else None\n\n if self.subplots:\n w = self.bar_width / 2\n rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,\n start=start, label=label, **kwds)\n ax.set_title(label)\n elif self.stacked:\n mask = y > 0\n start = np.where(mask, pos_prior, neg_prior)\n w = self.bar_width / 2\n rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,\n start=start, label=label, **kwds)\n pos_prior = pos_prior + np.where(mask, y, 0)\n neg_prior = neg_prior + np.where(mask, 0, y)\n else:\n w = self.bar_width / K\n rect = bar_f(ax, self.ax_pos + (i + 0.5) * w, y, w,\n start=start, label=label, **kwds)\n self._add_legend_handle(rect, label, index=i)\n\n def _post_plot_logic(self):\n for ax in self.axes:\n if self.use_index:\n str_index = [com.pprint_thing(key) for key in self.data.index]\n else:\n str_index = [com.pprint_thing(key) for key in\n range(self.data.shape[0])]\n name = self._get_index_name()\n\n s_edge = self.ax_pos[0] - 0.25 + self.lim_offset\n e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset\n\n if self.kind == 'bar':\n ax.set_xlim((s_edge, e_edge))\n ax.set_xticks(self.tick_pos)\n ax.set_xticklabels(str_index)\n if not self.log: # GH3254+\n ax.axhline(0, color='k', linestyle='--')\n if name is not None:\n ax.set_xlabel(name)\n elif self.kind == 'barh':\n # horizontal bars\n ax.set_ylim((s_edge, e_edge))\n ax.set_yticks(self.tick_pos)\n ax.set_yticklabels(str_index)\n ax.axvline(0, color='k', linestyle='--')\n if name is not None:\n ax.set_ylabel(name)\n else:\n raise NotImplementedError(self.kind)\n\n @property\n def orientation(self):\n if self.kind == 'bar':\n return 'vertical'\n elif self.kind == 'barh':\n return 'horizontal'\n else:\n raise NotImplementedError(self.kind)\n\n\nclass HistPlot(LinePlot):\n\n def __init__(self, data, bins=10, bottom=0, **kwargs):\n self.bins = bins # use mpl default\n self.bottom = bottom\n # Do not call LinePlot.__init__ which may fill nan\n MPLPlot.__init__(self, data, **kwargs)\n\n def _args_adjust(self):\n if com.is_integer(self.bins):\n # create common bin edge\n values = np.ravel(self.data.values)\n values = values[~com.isnull(values)]\n\n hist, self.bins = np.histogram(values, bins=self.bins,\n range=self.kwds.get('range', None),\n weights=self.kwds.get('weights', None))\n\n if com.is_list_like(self.bottom):\n self.bottom = np.array(self.bottom)\n\n def _get_plot_function(self):\n def plotf(ax, y, style=None, column_num=None, **kwds):\n if column_num == 0:\n self._initialize_prior(len(self.bins) - 1)\n y = y[~com.isnull(y)]\n bottom = self._pos_prior + self.bottom\n # ignore style\n n, bins, patches = self.plt.Axes.hist(ax, y, bins=self.bins,\n bottom=bottom, **kwds)\n self._update_prior(n)\n return patches\n return plotf\n\n def _make_plot(self):\n plotf = self._get_plot_function()\n colors = self._get_colors()\n for i, (label, y) in enumerate(self._iter_data()):\n ax = self._get_ax(i)\n style = self._get_style(i, label)\n label = com.pprint_thing(label)\n\n kwds = self.kwds.copy()\n kwds['label'] = label\n self._maybe_add_color(colors, kwds, style, i)\n\n if style is not None:\n kwds['style'] = style\n\n artists = plotf(ax, y, column_num=i, **kwds)\n self._add_legend_handle(artists[0], label)\n\n def _post_plot_logic(self):\n if self.orientation == 'horizontal':\n for ax in self.axes:\n ax.set_xlabel('Degree')\n else:\n for ax in self.axes:\n ax.set_ylabel('Degree')\n\n @property\n def orientation(self):\n if self.kwds.get('orientation', None) == 'horizontal':\n return 'horizontal'\n else:\n return 'vertical'\n\n\nclass KdePlot(HistPlot):\n orientation = 'vertical'\n\n def __init__(self, data, bw_method=None, ind=None, **kwargs):\n MPLPlot.__init__(self, data, **kwargs)\n self.bw_method = bw_method\n self.ind = ind\n\n def _args_adjust(self):\n pass\n\n def _get_ind(self, y):\n if self.ind is None:\n sample_range = max(y) - min(y)\n ind = np.linspace(min(y) - 0.5 * sample_range,\n max(y) + 0.5 * sample_range, 1000)\n else:\n ind = self.ind\n return ind\n\n def _get_plot_function(self):\n from scipy.stats import gaussian_kde\n from scipy import __version__ as spv\n f = MPLPlot._get_plot_function(self)\n def plotf(ax, y, style=None, column_num=None, **kwds):\n y = remove_na(y)\n if LooseVersion(spv) >= '0.11.0':\n gkde = gaussian_kde(y, bw_method=self.bw_method)\n else:\n gkde = gaussian_kde(y)\n if self.bw_method is not None:\n msg = ('bw_method was added in Scipy 0.11.0.' +\n ' Scipy version in use is %s.' % spv)\n warnings.warn(msg)\n\n ind = self._get_ind(y)\n y = gkde.evaluate(ind)\n lines = f(ax, ind, y, style=style, **kwds)\n return lines\n return plotf\n\n def _post_plot_logic(self):\n for ax in self.axes:\n ax.set_ylabel('Density')\n\n\nclass PiePlot(MPLPlot):\n _layout_type = 'horizontal'\n\n def __init__(self, data, kind=None, **kwargs):\n data = data.fillna(value=0)\n if (data < 0).any().any():\n raise ValueError(\"{0} doesn't allow negative values\".format(kind))\n MPLPlot.__init__(self, data, kind=kind, **kwargs)\n\n def _args_adjust(self):\n self.grid = False\n self.logy = False\n self.logx = False\n self.loglog = False\n\n def _validate_color_args(self):\n pass\n\n def _make_plot(self):\n self.kwds.setdefault('colors', self._get_colors(num_colors=len(self.data),\n color_kwds='colors'))\n\n for i, (label, y) in enumerate(self._iter_data()):\n ax = self._get_ax(i)\n if label is not None:\n label = com.pprint_thing(label)\n ax.set_ylabel(label)\n\n kwds = self.kwds.copy()\n\n idx = [com.pprint_thing(v) for v in self.data.index]\n labels = kwds.pop('labels', idx)\n # labels is used for each wedge's labels\n results = ax.pie(y, labels=labels, **kwds)\n\n if kwds.get('autopct', None) is not None:\n patches, texts, autotexts = results\n else:\n patches, texts = results\n autotexts = []\n\n if self.fontsize is not None:\n for t in texts + autotexts:\n t.set_fontsize(self.fontsize)\n\n # leglabels is used for legend labels\n leglabels = labels if labels is not None else idx\n for p, l in zip(patches, leglabels):\n self._add_legend_handle(p, l)\n\n\nclass BoxPlot(LinePlot):\n _layout_type = 'horizontal'\n\n _valid_return_types = (None, 'axes', 'dict', 'both')\n # namedtuple to hold results\n BP = namedtuple(\"Boxplot\", ['ax', 'lines'])\n\n def __init__(self, data, return_type=None, **kwargs):\n # Do not call LinePlot.__init__ which may fill nan\n if return_type not in self._valid_return_types:\n raise ValueError(\"return_type must be {None, 'axes', 'dict', 'both'}\")\n\n self.return_type = return_type\n MPLPlot.__init__(self, data, **kwargs)\n\n def _args_adjust(self):\n if self.subplots:\n # Disable label ax sharing. Otherwise, all subplots shows last column label\n if self.orientation == 'vertical':\n self.sharex = False\n else:\n self.sharey = False\n\n def _get_plot_function(self):\n def plotf(ax, y, column_num=None, **kwds):\n if y.ndim == 2:\n y = [remove_na(v) for v in y]\n else:\n y = remove_na(y)\n bp = ax.boxplot(y, **kwds)\n\n if self.return_type == 'dict':\n return bp, bp\n elif self.return_type == 'both':\n return self.BP(ax=ax, lines=bp), bp\n else:\n return ax, bp\n return plotf\n\n def _validate_color_args(self):\n if 'color' in self.kwds:\n if self.colormap is not None:\n warnings.warn(\"'color' and 'colormap' cannot be used \"\n \"simultaneously. Using 'color'\")\n self.color = self.kwds.pop('color')\n\n if isinstance(self.color, dict):\n valid_keys = ['boxes', 'whiskers', 'medians', 'caps']\n for key, values in compat.iteritems(self.color):\n if key not in valid_keys:\n raise ValueError(\"color dict contains invalid key '{0}' \"\n \"The key must be either {1}\".format(key, valid_keys))\n else:\n self.color = None\n\n # get standard colors for default\n colors = _get_standard_colors(num_colors=3,\n colormap=self.colormap,\n color=None)\n # use 2 colors by default, for box/whisker and median\n # flier colors isn't needed here\n # because it can be specified by ``sym`` kw\n self._boxes_c = colors[0]\n self._whiskers_c = colors[0]\n self._medians_c = colors[2]\n self._caps_c = 'k' # mpl default\n\n def _get_colors(self, num_colors=None, color_kwds='color'):\n pass\n\n def maybe_color_bp(self, bp):\n if isinstance(self.color, dict):\n boxes = self.color.get('boxes', self._boxes_c)\n whiskers = self.color.get('whiskers', self._whiskers_c)\n medians = self.color.get('medians', self._medians_c)\n caps = self.color.get('caps', self._caps_c)\n else:\n # Other types are forwarded to matplotlib\n # If None, use default colors\n boxes = self.color or self._boxes_c\n whiskers = self.color or self._whiskers_c\n medians = self.color or self._medians_c\n caps = self.color or self._caps_c\n\n from matplotlib.artist import setp\n setp(bp['boxes'], color=boxes, alpha=1)\n setp(bp['whiskers'], color=whiskers, alpha=1)\n setp(bp['medians'], color=medians, alpha=1)\n setp(bp['caps'], color=caps, alpha=1)\n\n def _make_plot(self):\n plotf = self._get_plot_function()\n if self.subplots:\n self._return_obj = compat.OrderedDict()\n\n for i, (label, y) in enumerate(self._iter_data()):\n ax = self._get_ax(i)\n kwds = self.kwds.copy()\n\n ret, bp = plotf(ax, y, column_num=i, **kwds)\n self.maybe_color_bp(bp)\n self._return_obj[label] = ret\n\n label = [com.pprint_thing(label)]\n self._set_ticklabels(ax, label)\n else:\n y = self.data.values.T\n ax = self._get_ax(0)\n kwds = self.kwds.copy()\n\n ret, bp = plotf(ax, y, column_num=0, **kwds)\n self.maybe_color_bp(bp)\n self._return_obj = ret\n\n labels = [l for l, y in self._iter_data()]\n labels = [com.pprint_thing(l) for l in labels]\n if not self.use_index:\n labels = [com.pprint_thing(key) for key in range(len(labels))]\n self._set_ticklabels(ax, labels)\n\n def _set_ticklabels(self, ax, labels):\n if self.orientation == 'vertical':\n ax.set_xticklabels(labels)\n else:\n ax.set_yticklabels(labels)\n\n def _post_plot_logic(self):\n pass\n\n @property\n def orientation(self):\n if self.kwds.get('vert', True):\n return 'vertical'\n else:\n return 'horizontal'\n\n @property\n def result(self):\n if self.return_type is None:\n return super(BoxPlot, self).result\n else:\n return self._return_obj\n\n\n# kinds supported by both dataframe and series\n_common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area', 'hist', 'box']\n# kinds supported by dataframe\n_dataframe_kinds = ['scatter', 'hexbin']\n# kinds supported only by series or dataframe single column\n_series_kinds = ['pie']\n_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds\n\n_plot_klass = {'line': LinePlot, 'bar': BarPlot, 'barh': BarPlot,\n 'kde': KdePlot, 'hist': HistPlot, 'box': BoxPlot,\n 'scatter': ScatterPlot, 'hexbin': HexBinPlot,\n 'area': AreaPlot, 'pie': PiePlot}\n\n\ndef plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True,\n sharey=False, use_index=True, figsize=None, grid=None,\n legend=True, rot=None, ax=None, style=None, title=None,\n xlim=None, ylim=None, logx=False, logy=False, xticks=None,\n yticks=None, kind='line', sort_columns=False, fontsize=None,\n secondary_y=False, layout=None, **kwds):\n\n \"\"\"\n Make line, bar, or scatter plots of DataFrame series with the index on the x-axis\n using matplotlib / pylab.\n\n Parameters\n ----------\n frame : DataFrame\n x : label or position, default None\n y : label or position, default None\n Allows plotting of one column versus another\n yerr : DataFrame (with matching labels), Series, list-type (tuple, list,\n ndarray), or str of column name containing y error values\n xerr : similar functionality as yerr, but for x error values\n subplots : boolean, default False\n Make separate subplots for each time series\n sharex : boolean, default True\n In case subplots=True, share x axis\n sharey : boolean, default False\n In case subplots=True, share y axis\n use_index : boolean, default True\n Use index as ticks for x axis\n stacked : boolean, default False\n If True, create stacked bar plot. Only valid for DataFrame input\n sort_columns: boolean, default False\n Sort column names to determine plot ordering\n title : string\n Title to use for the plot\n grid : boolean, default None (matlab style default)\n Axis grid lines\n legend : False/True/'reverse'\n Place legend on axis subplots\n\n ax : matplotlib axis object, default None\n style : list or dict\n matplotlib line style per column\n kind : {'line', 'bar', 'barh', 'hist', 'kde', 'density', 'area', 'box', 'scatter', 'hexbin'}\n line : line plot\n bar : vertical bar plot\n barh : horizontal bar plot\n hist : histogram\n kde/density : Kernel Density Estimation plot\n area : area plot\n box : box plot\n scatter : scatter plot\n hexbin : hexbin plot\n logx : boolean, default False\n Use log scaling on x axis\n logy : boolean, default False\n Use log scaling on y axis\n loglog : boolean, default False\n Use log scaling on both x and y axes\n xticks : sequence\n Values to use for the xticks\n yticks : sequence\n Values to use for the yticks\n xlim : 2-tuple/list\n ylim : 2-tuple/list\n rot : int, default None\n Rotation for ticks\n secondary_y : boolean or sequence, default False\n Whether to plot on the secondary y-axis\n If a list/tuple, which columns to plot on secondary y-axis\n mark_right: boolean, default True\n When using a secondary_y axis, should the legend label the axis of\n the various columns automatically\n colormap : str or matplotlib colormap object, default None\n Colormap to select colors from. If string, load colormap with that name\n from matplotlib.\n position : float\n Specify relative alignments for bar plot layout.\n From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)\n layout : tuple (optional)\n (rows, columns) for the layout of the plot\n table : boolean, Series or DataFrame, default False\n If True, draw a table using the data in the DataFrame and the data will\n be transposed to meet matplotlib's default layout.\n If a Series or DataFrame is passed, use passed data to draw a table.\n kwds : keywords\n Options to pass to matplotlib plotting method\n\n Returns\n -------\n ax_or_axes : matplotlib.AxesSubplot or list of them\n\n Notes\n -----\n\n If `kind`='hexbin', you can control the size of the bins with the\n `gridsize` argument. By default, a histogram of the counts around each\n `(x, y)` point is computed. You can specify alternative aggregations\n by passing values to the `C` and `reduce_C_function` arguments.\n `C` specifies the value at each `(x, y)` point and `reduce_C_function`\n is a function of one argument that reduces all the values in a bin to\n a single number (e.g. `mean`, `max`, `sum`, `std`).\n \"\"\"\n\n kind = _get_standard_kind(kind.lower().strip())\n if kind in _all_kinds:\n klass = _plot_klass[kind]\n else:\n raise ValueError('Invalid chart type given %s' % kind)\n\n if kind in _dataframe_kinds:\n plot_obj = klass(frame, x=x, y=y, kind=kind, subplots=subplots,\n rot=rot,legend=legend, ax=ax, style=style,\n fontsize=fontsize, use_index=use_index, sharex=sharex,\n sharey=sharey, xticks=xticks, yticks=yticks,\n xlim=xlim, ylim=ylim, title=title, grid=grid,\n figsize=figsize, logx=logx, logy=logy,\n sort_columns=sort_columns, secondary_y=secondary_y,\n layout=layout, **kwds)\n elif kind in _series_kinds:\n if y is None and subplots is False:\n msg = \"{0} requires either y column or 'subplots=True'\"\n raise ValueError(msg.format(kind))\n elif y is not None:\n if com.is_integer(y) and not frame.columns.holds_integer():\n y = frame.columns[y]\n frame = frame[y] # converted to series actually\n frame.index.name = y\n\n plot_obj = klass(frame, kind=kind, subplots=subplots,\n rot=rot,legend=legend, ax=ax, style=style,\n fontsize=fontsize, use_index=use_index, sharex=sharex,\n sharey=sharey, xticks=xticks, yticks=yticks,\n xlim=xlim, ylim=ylim, title=title, grid=grid,\n figsize=figsize, layout=layout,\n sort_columns=sort_columns, **kwds)\n else:\n if x is not None:\n if com.is_integer(x) and not frame.columns.holds_integer():\n x = frame.columns[x]\n frame = frame.set_index(x)\n\n if y is not None:\n if com.is_integer(y) and not frame.columns.holds_integer():\n y = frame.columns[y]\n label = x if x is not None else frame.index.name\n label = kwds.pop('label', label)\n ser = frame[y]\n ser.index.name = label\n\n for kw in ['xerr', 'yerr']:\n if (kw in kwds) and \\\n (isinstance(kwds[kw], string_types) or com.is_integer(kwds[kw])):\n try:\n kwds[kw] = frame[kwds[kw]]\n except (IndexError, KeyError, TypeError):\n pass\n\n return plot_series(ser, label=label, kind=kind,\n use_index=use_index,\n rot=rot, xticks=xticks, yticks=yticks,\n xlim=xlim, ylim=ylim, ax=ax, style=style,\n grid=grid, logx=logx, logy=logy,\n secondary_y=secondary_y, title=title,\n figsize=figsize, fontsize=fontsize, **kwds)\n\n else:\n plot_obj = klass(frame, kind=kind, subplots=subplots, rot=rot,\n legend=legend, ax=ax, style=style, fontsize=fontsize,\n use_index=use_index, sharex=sharex, sharey=sharey,\n xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,\n title=title, grid=grid, figsize=figsize, logx=logx,\n logy=logy, sort_columns=sort_columns,\n secondary_y=secondary_y, layout=layout, **kwds)\n\n plot_obj.generate()\n plot_obj.draw()\n return plot_obj.result\n\n\ndef plot_series(series, label=None, kind='line', use_index=True, rot=None,\n xticks=None, yticks=None, xlim=None, ylim=None,\n ax=None, style=None, grid=None, legend=False, logx=False,\n logy=False, secondary_y=False, **kwds):\n \"\"\"\n Plot the input series with the index on the x-axis using matplotlib\n\n Parameters\n ----------\n label : label argument to provide to plot\n kind : {'line', 'bar', 'barh', 'hist', 'kde', 'density', 'area', 'box'}\n line : line plot\n bar : vertical bar plot\n barh : horizontal bar plot\n hist : histogram\n kde/density : Kernel Density Estimation plot\n area : area plot\n box : box plot\n use_index : boolean, default True\n Plot index as axis tick labels\n rot : int, default None\n Rotation for tick labels\n xticks : sequence\n Values to use for the xticks\n yticks : sequence\n Values to use for the yticks\n xlim : 2-tuple/list\n ylim : 2-tuple/list\n ax : matplotlib axis object\n If not passed, uses gca()\n style : string, default matplotlib default\n matplotlib line style to use\n grid : matplotlib grid\n legend: matplotlib legend\n logx : boolean, default False\n Use log scaling on x axis\n logy : boolean, default False\n Use log scaling on y axis\n loglog : boolean, default False\n Use log scaling on both x and y axes\n secondary_y : boolean or sequence of ints, default False\n If True then y-axis will be on the right\n figsize : a tuple (width, height) in inches\n position : float\n Specify relative alignments for bar plot layout.\n From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)\n table : boolean, Series or DataFrame, default False\n If True, draw a table using the data in the Series and the data will\n be transposed to meet matplotlib's default layout.\n If a Series or DataFrame is passed, use passed data to draw a table.\n kwds : keywords\n Options to pass to matplotlib plotting method\n\n Notes\n -----\n See matplotlib documentation online for more on this subject\n \"\"\"\n\n kind = _get_standard_kind(kind.lower().strip())\n if kind in _common_kinds or kind in _series_kinds:\n klass = _plot_klass[kind]\n else:\n raise ValueError('Invalid chart type given %s' % kind)\n\n \"\"\"\n If no axis is specified, we check whether there are existing figures.\n If so, we get the current axis and check whether yaxis ticks are on the\n right. Ticks for the plot of the series will be on the right unless\n there is at least one axis with ticks on the left.\n\n If we do not check for whether there are existing figures, _gca() will\n create a figure with the default figsize, causing the figsize= parameter to\n be ignored.\n \"\"\"\n import matplotlib.pyplot as plt\n if ax is None and len(plt.get_fignums()) > 0:\n ax = _gca()\n ax = getattr(ax, 'left_ax', ax)\n\n # is there harm in this?\n if label is None:\n label = series.name\n\n plot_obj = klass(series, kind=kind, rot=rot, logx=logx, logy=logy,\n ax=ax, use_index=use_index, style=style,\n xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,\n legend=legend, grid=grid, label=label,\n secondary_y=secondary_y, **kwds)\n\n plot_obj.generate()\n plot_obj.draw()\n\n # plot_obj.ax is None if we created the first figure\n return plot_obj.result\n\n\n_shared_docs['boxplot'] = \"\"\"\n Make a box plot from DataFrame column optionally grouped by some columns or\n other inputs\n\n Parameters\n ----------\n data : the pandas object holding the data\n column : column name or list of names, or vector\n Can be any valid input to groupby\n by : string or sequence\n Column in the DataFrame to group by\n ax : Matplotlib axes object, optional\n fontsize : int or string\n rot : label rotation angle\n figsize : A tuple (width, height) in inches\n grid : Setting this to True will show the grid\n layout : tuple (optional)\n (rows, columns) for the layout of the plot\n return_type : {'axes', 'dict', 'both'}, default 'dict'\n The kind of object to return. 'dict' returns a dictionary\n whose values are the matplotlib Lines of the boxplot;\n 'axes' returns the matplotlib axes the boxplot is drawn on;\n 'both' returns a namedtuple with the axes and dict.\n\n When grouping with ``by``, a dict mapping columns to ``return_type``\n is returned.\n\n kwds : other plotting keyword arguments to be passed to matplotlib boxplot\n function\n\n Returns\n -------\n lines : dict\n ax : matplotlib Axes\n (ax, lines): namedtuple\n\n Notes\n -----\n Use ``return_type='dict'`` when you want to tweak the appearance\n of the lines after plotting. In this case a dict containing the Lines\n making up the boxes, caps, fliers, medians, and whiskers is returned.\n \"\"\"\n\n\n@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)\ndef boxplot(data, column=None, by=None, ax=None, fontsize=None,\n rot=0, grid=True, figsize=None, layout=None, return_type=None,\n **kwds):\n\n # validate return_type:\n valid_types = (None, 'axes', 'dict', 'both')\n if return_type not in BoxPlot._valid_return_types:\n raise ValueError(\"return_type must be {None, 'axes', 'dict', 'both'}\")\n\n from pandas import Series, DataFrame\n if isinstance(data, Series):\n data = DataFrame({'x': data})\n column = 'x'\n\n def _get_colors():\n return _get_standard_colors(color=kwds.get('color'), num_colors=1)\n\n def maybe_color_bp(bp):\n if 'color' not in kwds :\n from matplotlib.artist import setp\n setp(bp['boxes'],color=colors[0],alpha=1)\n setp(bp['whiskers'],color=colors[0],alpha=1)\n setp(bp['medians'],color=colors[2],alpha=1)\n\n def plot_group(keys, values, ax):\n keys = [com.pprint_thing(x) for x in keys]\n values = [remove_na(v) for v in values]\n bp = ax.boxplot(values, **kwds)\n if kwds.get('vert', 1):\n ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)\n else:\n ax.set_yticklabels(keys, rotation=rot, fontsize=fontsize)\n maybe_color_bp(bp)\n\n # Return axes in multiplot case, maybe revisit later # 985\n if return_type == 'dict':\n return bp\n elif return_type == 'both':\n return BoxPlot.BP(ax=ax, lines=bp)\n else:\n return ax\n\n colors = _get_colors()\n if column is None:\n columns = None\n else:\n if isinstance(column, (list, tuple)):\n columns = column\n else:\n columns = [column]\n\n if by is not None:\n result = _grouped_plot_by_column(plot_group, data, columns=columns,\n by=by, grid=grid, figsize=figsize,\n ax=ax, layout=layout, return_type=return_type)\n else:\n if layout is not None:\n raise ValueError(\"The 'layout' keyword is not supported when \"\n \"'by' is None\")\n\n if return_type is None:\n msg = (\"\\nThe default value for 'return_type' will change to \"\n \"'axes' in a future release.\\n To use the future behavior \"\n \"now, set return_type='axes'.\\n To keep the previous \"\n \"behavior and silence this warning, set \"\n \"return_type='dict'.\")\n warnings.warn(msg, FutureWarning)\n return_type = 'dict'\n if ax is None:\n ax = _gca()\n data = data._get_numeric_data()\n if columns is None:\n columns = data.columns\n else:\n data = data[columns]\n\n result = plot_group(columns, data.values.T, ax)\n ax.grid(grid)\n\n return result\n\n\ndef format_date_labels(ax, rot):\n # mini version of autofmt_xdate\n try:\n for label in ax.get_xticklabels():\n label.set_ha('right')\n label.set_rotation(rot)\n fig = ax.get_figure()\n fig.subplots_adjust(bottom=0.2)\n except Exception: # pragma: no cover\n pass\n\n\ndef scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, **kwargs):\n \"\"\"\n Make a scatter plot from two DataFrame columns\n\n Parameters\n ----------\n data : DataFrame\n x : Column name for the x-axis values\n y : Column name for the y-axis values\n ax : Matplotlib axis object\n figsize : A tuple (width, height) in inches\n grid : Setting this to True will show the grid\n kwargs : other plotting keyword arguments\n To be passed to scatter function\n\n Returns\n -------\n fig : matplotlib.Figure\n \"\"\"\n import matplotlib.pyplot as plt\n\n # workaround because `c='b'` is hardcoded in matplotlibs scatter method\n kwargs.setdefault('c', plt.rcParams['patch.facecolor'])\n\n def plot_group(group, ax):\n xvals = group[x].values\n yvals = group[y].values\n ax.scatter(xvals, yvals, **kwargs)\n ax.grid(grid)\n\n if by is not None:\n fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)\n else:\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n else:\n fig = ax.get_figure()\n plot_group(data, ax)\n ax.set_ylabel(com.pprint_thing(y))\n ax.set_xlabel(com.pprint_thing(x))\n\n ax.grid(grid)\n\n return fig\n\n\ndef hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,\n xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,\n sharey=False, figsize=None, layout=None, bins=10, **kwds):\n \"\"\"\n Draw histogram of the DataFrame's series using matplotlib / pylab.\n\n Parameters\n ----------\n data : DataFrame\n column : string or sequence\n If passed, will be used to limit data to a subset of columns\n by : object, optional\n If passed, then used to form histograms for separate groups\n grid : boolean, default True\n Whether to show axis grid lines\n xlabelsize : int, default None\n If specified changes the x-axis label size\n xrot : float, default None\n rotation of x axis labels\n ylabelsize : int, default None\n If specified changes the y-axis label size\n yrot : float, default None\n rotation of y axis labels\n ax : matplotlib axes object, default None\n sharex : bool, if True, the X axis will be shared amongst all subplots.\n sharey : bool, if True, the Y axis will be shared amongst all subplots.\n figsize : tuple\n The size of the figure to create in inches by default\n layout: (optional) a tuple (rows, columns) for the layout of the histograms\n bins: integer, default 10\n Number of histogram bins to be used\n kwds : other plotting keyword arguments\n To be passed to hist function\n \"\"\"\n\n if by is not None:\n axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize,\n sharex=sharex, sharey=sharey, layout=layout, bins=bins,\n xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,\n **kwds)\n return axes\n\n if column is not None:\n if not isinstance(column, (list, np.ndarray, Index)):\n column = [column]\n data = data[column]\n data = data._get_numeric_data()\n naxes = len(data.columns)\n\n fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,\n sharex=sharex, sharey=sharey, figsize=figsize,\n layout=layout)\n _axes = _flatten(axes)\n\n for i, col in enumerate(com._try_sort(data.columns)):\n ax = _axes[i]\n ax.hist(data[col].dropna().values, bins=bins, **kwds)\n ax.set_title(col)\n ax.grid(grid)\n\n _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,\n ylabelsize=ylabelsize, yrot=yrot)\n fig.subplots_adjust(wspace=0.3, hspace=0.3)\n\n return axes\n\n\ndef hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,\n xrot=None, ylabelsize=None, yrot=None, figsize=None, bins=10, **kwds):\n \"\"\"\n Draw histogram of the input series using matplotlib\n\n Parameters\n ----------\n by : object, optional\n If passed, then used to form histograms for separate groups\n ax : matplotlib axis object\n If not passed, uses gca()\n grid : boolean, default True\n Whether to show axis grid lines\n xlabelsize : int, default None\n If specified changes the x-axis label size\n xrot : float, default None\n rotation of x axis labels\n ylabelsize : int, default None\n If specified changes the y-axis label size\n yrot : float, default None\n rotation of y axis labels\n figsize : tuple, default None\n figure size in inches by default\n bins: integer, default 10\n Number of histogram bins to be used\n kwds : keywords\n To be passed to the actual plotting function\n\n Notes\n -----\n See matplotlib documentation online for more on this\n\n \"\"\"\n import matplotlib.pyplot as plt\n\n if by is None:\n if kwds.get('layout', None) is not None:\n raise ValueError(\"The 'layout' keyword is not supported when \"\n \"'by' is None\")\n # hack until the plotting interface is a bit more unified\n fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else\n plt.figure(figsize=figsize))\n if (figsize is not None and tuple(figsize) !=\n tuple(fig.get_size_inches())):\n fig.set_size_inches(*figsize, forward=True)\n if ax is None:\n ax = fig.gca()\n elif ax.get_figure() != fig:\n raise AssertionError('passed axis not bound to passed figure')\n values = self.dropna().values\n\n ax.hist(values, bins=bins, **kwds)\n ax.grid(grid)\n axes = np.array([ax])\n\n _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,\n ylabelsize=ylabelsize, yrot=yrot)\n\n else:\n if 'figure' in kwds:\n raise ValueError(\"Cannot pass 'figure' when using the \"\n \"'by' argument, since a new 'Figure' instance \"\n \"will be created\")\n axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize, bins=bins,\n xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,\n **kwds)\n\n if axes.ndim == 1 and len(axes) == 1:\n return axes[0]\n return axes\n\n\ndef grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,\n layout=None, sharex=False, sharey=False, rot=90, grid=True,\n xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,\n **kwargs):\n \"\"\"\n Grouped histogram\n\n Parameters\n ----------\n data: Series/DataFrame\n column: object, optional\n by: object, optional\n ax: axes, optional\n bins: int, default 50\n figsize: tuple, optional\n layout: optional\n sharex: boolean, default False\n sharey: boolean, default False\n rot: int, default 90\n grid: bool, default True\n kwargs: dict, keyword arguments passed to matplotlib.Axes.hist\n\n Returns\n -------\n axes: collection of Matplotlib Axes\n \"\"\"\n def plot_group(group, ax):\n ax.hist(group.dropna().values, bins=bins, **kwargs)\n\n xrot = xrot or rot\n\n fig, axes = _grouped_plot(plot_group, data, column=column,\n by=by, sharex=sharex, sharey=sharey, ax=ax,\n figsize=figsize, layout=layout, rot=rot)\n\n _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,\n ylabelsize=ylabelsize, yrot=yrot)\n\n fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,\n hspace=0.5, wspace=0.3)\n return axes\n\n\ndef boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,\n rot=0, grid=True, ax=None, figsize=None,\n layout=None, **kwds):\n \"\"\"\n Make box plots from DataFrameGroupBy data.\n\n Parameters\n ----------\n grouped : Grouped DataFrame\n subplots :\n * ``False`` - no subplots will be used\n * ``True`` - create a subplot for each group\n column : column name or list of names, or vector\n Can be any valid input to groupby\n fontsize : int or string\n rot : label rotation angle\n grid : Setting this to True will show the grid\n figsize : A tuple (width, height) in inches\n layout : tuple (optional)\n (rows, columns) for the layout of the plot\n kwds : other plotting keyword arguments to be passed to matplotlib boxplot\n function\n\n Returns\n -------\n dict of key/value = group key/DataFrame.boxplot return value\n or DataFrame.boxplot return value in case subplots=figures=False\n\n Examples\n --------\n >>> import pandas\n >>> import numpy as np\n >>> import itertools\n >>>\n >>> tuples = [t for t in itertools.product(range(1000), range(4))]\n >>> index = pandas.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])\n >>> data = np.random.randn(len(index),4)\n >>> df = pandas.DataFrame(data, columns=list('ABCD'), index=index)\n >>>\n >>> grouped = df.groupby(level='lvl1')\n >>> boxplot_frame_groupby(grouped)\n >>>\n >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)\n >>> boxplot_frame_groupby(grouped, subplots=False)\n \"\"\"\n if subplots is True:\n naxes = len(grouped)\n fig, axes = _subplots(naxes=naxes, squeeze=False,\n ax=ax, sharex=False, sharey=True, figsize=figsize,\n layout=layout)\n axes = _flatten(axes)\n\n ret = compat.OrderedDict()\n for (key, group), ax in zip(grouped, axes):\n d = group.boxplot(ax=ax, column=column, fontsize=fontsize,\n rot=rot, grid=grid, **kwds)\n ax.set_title(com.pprint_thing(key))\n ret[key] = d\n fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)\n else:\n from pandas.tools.merge import concat\n keys, frames = zip(*grouped)\n if grouped.axis == 0:\n df = concat(frames, keys=keys, axis=1)\n else:\n if len(frames) > 1:\n df = frames[0].join(frames[1::])\n else:\n df = frames[0]\n ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,\n grid=grid, ax=ax, figsize=figsize, layout=layout, **kwds)\n return ret\n\n\ndef _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,\n figsize=None, sharex=True, sharey=True, layout=None,\n rot=0, ax=None, **kwargs):\n from pandas import DataFrame\n\n if figsize == 'default':\n # allowed to specify mpl default with 'default'\n warnings.warn(\"figsize='default' is deprecated. Specify figure\"\n \"size by tuple instead\", FutureWarning)\n figsize = None\n\n grouped = data.groupby(by)\n if column is not None:\n grouped = grouped[column]\n\n naxes = len(grouped)\n fig, axes = _subplots(naxes=naxes, figsize=figsize,\n sharex=sharex, sharey=sharey, ax=ax,\n layout=layout)\n\n _axes = _flatten(axes)\n\n for i, (key, group) in enumerate(grouped):\n ax = _axes[i]\n if numeric_only and isinstance(group, DataFrame):\n group = group._get_numeric_data()\n plotf(group, ax, **kwargs)\n ax.set_title(com.pprint_thing(key))\n\n return fig, axes\n\n\ndef _grouped_plot_by_column(plotf, data, columns=None, by=None,\n numeric_only=True, grid=False,\n figsize=None, ax=None, layout=None, return_type=None,\n **kwargs):\n grouped = data.groupby(by)\n if columns is None:\n if not isinstance(by, (list, tuple)):\n by = [by]\n columns = data._get_numeric_data().columns - by\n naxes = len(columns)\n fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True,\n figsize=figsize, ax=ax, layout=layout)\n\n _axes = _flatten(axes)\n\n result = compat.OrderedDict()\n for i, col in enumerate(columns):\n ax = _axes[i]\n gp_col = grouped[col]\n keys, values = zip(*gp_col)\n re_plotf = plotf(keys, values, ax, **kwargs)\n ax.set_title(col)\n ax.set_xlabel(com.pprint_thing(by))\n result[col] = re_plotf\n ax.grid(grid)\n\n # Return axes in multiplot case, maybe revisit later # 985\n if return_type is None:\n result = axes\n\n byline = by[0] if len(by) == 1 else by\n fig.suptitle('Boxplot grouped by %s' % byline)\n fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)\n\n return result\n\n\ndef table(ax, data, rowLabels=None, colLabels=None,\n **kwargs):\n\n \"\"\"\n Helper function to convert DataFrame and Series to matplotlib.table\n\n Parameters\n ----------\n `ax`: Matplotlib axes object\n `data`: DataFrame or Series\n data for table contents\n `kwargs`: keywords, optional\n keyword arguments which passed to matplotlib.table.table.\n If `rowLabels` or `colLabels` is not specified, data index or column name will be used.\n\n Returns\n -------\n matplotlib table object\n \"\"\"\n from pandas import DataFrame\n if isinstance(data, Series):\n data = DataFrame(data, columns=[data.name])\n elif isinstance(data, DataFrame):\n pass\n else:\n raise ValueError('Input data must be DataFrame or Series')\n\n if rowLabels is None:\n rowLabels = data.index\n\n if colLabels is None:\n colLabels = data.columns\n\n cellText = data.values\n\n import matplotlib.table\n table = matplotlib.table.table(ax, cellText=cellText,\n rowLabels=rowLabels, colLabels=colLabels, **kwargs)\n return table\n\n\ndef _get_layout(nplots, layout=None, layout_type='box'):\n if layout is not None:\n if not isinstance(layout, (tuple, list)) or len(layout) != 2:\n raise ValueError('Layout must be a tuple of (rows, columns)')\n\n nrows, ncols = layout\n if nrows * ncols < nplots:\n raise ValueError('Layout of %sx%s must be larger than required size %s' %\n (nrows, ncols, nplots))\n\n return layout\n\n if layout_type == 'single':\n return (1, 1)\n elif layout_type == 'horizontal':\n return (1, nplots)\n elif layout_type == 'vertical':\n return (nplots, 1)\n\n layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}\n try:\n return layouts[nplots]\n except KeyError:\n k = 1\n while k ** 2 < nplots:\n k += 1\n\n if (k - 1) * k >= nplots:\n return k, (k - 1)\n else:\n return k, k\n\n# copied from matplotlib/pyplot.py and modified for pandas.plotting\n\n\ndef _subplots(naxes=None, sharex=False, sharey=False, squeeze=True,\n subplot_kw=None, ax=None, layout=None, layout_type='box', **fig_kw):\n \"\"\"Create a figure with a set of subplots already made.\n\n This utility wrapper makes it convenient to create common layouts of\n subplots, including the enclosing figure object, in a single call.\n\n Keyword arguments:\n\n naxes : int\n Number of required axes. Exceeded axes are set invisible. Default is nrows * ncols.\n\n sharex : bool\n If True, the X axis will be shared amongst all subplots.\n\n sharey : bool\n If True, the Y axis will be shared amongst all subplots.\n\n squeeze : bool\n\n If True, extra dimensions are squeezed out from the returned axis object:\n - if only one subplot is constructed (nrows=ncols=1), the resulting\n single Axis object is returned as a scalar.\n - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object\n array of Axis objects are returned as numpy 1-d arrays.\n - for NxM subplots with N>1 and M>1 are returned as a 2d array.\n\n If False, no squeezing at all is done: the returned axis object is always\n a 2-d array containing Axis instances, even if it ends up being 1x1.\n\n subplot_kw : dict\n Dict with keywords passed to the add_subplot() call used to create each\n subplots.\n\n ax : Matplotlib axis object, optional\n\n layout : tuple\n Number of rows and columns of the subplot grid.\n If not specified, calculated from naxes and layout_type\n\n layout_type : {'box', 'horziontal', 'vertical'}, default 'box'\n Specify how to layout the subplot grid.\n\n fig_kw : Other keyword arguments to be passed to the figure() call.\n Note that all keywords not recognized above will be\n automatically included here.\n\n Returns:\n\n fig, ax : tuple\n - fig is the Matplotlib Figure object\n - ax can be either a single axis object or an array of axis objects if\n more than one subplot was created. The dimensions of the resulting array\n can be controlled with the squeeze keyword, see above.\n\n **Examples:**\n\n x = np.linspace(0, 2*np.pi, 400)\n y = np.sin(x**2)\n\n # Just a figure and one subplot\n f, ax = plt.subplots()\n ax.plot(x, y)\n ax.set_title('Simple plot')\n\n # Two subplots, unpack the output array immediately\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n ax1.plot(x, y)\n ax1.set_title('Sharing Y axis')\n ax2.scatter(x, y)\n\n # Four polar axes\n plt.subplots(2, 2, subplot_kw=dict(polar=True))\n \"\"\"\n import matplotlib.pyplot as plt\n\n if subplot_kw is None:\n subplot_kw = {}\n\n if ax is None:\n fig = plt.figure(**fig_kw)\n else:\n if com.is_list_like(ax):\n ax = _flatten(ax)\n if layout is not None:\n warnings.warn(\"When passing multiple axes, layout keyword is ignored\", UserWarning)\n if sharex or sharey:\n warnings.warn(\"When passing multiple axes, sharex and sharey are ignored.\"\n \"These settings must be specified when creating axes\", UserWarning)\n if len(ax) == naxes:\n fig = ax[0].get_figure()\n return fig, ax\n else:\n raise ValueError(\"The number of passed axes must be {0}, the same as \"\n \"the output plot\".format(naxes))\n\n fig = ax.get_figure()\n # if ax is passed and a number of subplots is 1, return ax as it is\n if naxes == 1:\n if squeeze:\n return fig, ax\n else:\n return fig, _flatten(ax)\n else:\n warnings.warn(\"To output multiple subplots, the figure containing the passed axes \"\n \"is being cleared\", UserWarning)\n fig.clear()\n\n nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)\n nplots = nrows * ncols\n\n # Create empty object array to hold all axes. It's easiest to make it 1-d\n # so we can just append subplots upon creation, and then\n axarr = np.empty(nplots, dtype=object)\n\n # Create first subplot separately, so we can share it if requested\n ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)\n\n if sharex:\n subplot_kw['sharex'] = ax0\n if sharey:\n subplot_kw['sharey'] = ax0\n axarr[0] = ax0\n\n # Note off-by-one counting because add_subplot uses the MATLAB 1-based\n # convention.\n for i in range(1, nplots):\n kwds = subplot_kw.copy()\n # Set sharex and sharey to None for blank/dummy axes, these can\n # interfere with proper axis limits on the visible axes if\n # they share axes e.g. issue #7528\n if i >= naxes:\n kwds['sharex'] = None\n kwds['sharey'] = None\n ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)\n axarr[i] = ax\n\n if nplots > 1:\n\n if sharex and nrows > 1:\n for ax in axarr[:naxes][:-ncols]: # only bottom row\n for label in ax.get_xticklabels():\n label.set_visible(False)\n try:\n # set_visible will not be effective if\n # minor axis has NullLocator and NullFormattor (default)\n import matplotlib.ticker as ticker\n ax.xaxis.set_minor_locator(ticker.AutoLocator())\n ax.xaxis.set_minor_formatter(ticker.FormatStrFormatter(''))\n for label in ax.get_xticklabels(minor=True):\n label.set_visible(False)\n except Exception: # pragma no cover\n pass\n ax.xaxis.get_label().set_visible(False)\n if sharey and ncols > 1:\n for i, ax in enumerate(axarr):\n if (i % ncols) != 0: # only first column\n for label in ax.get_yticklabels():\n label.set_visible(False)\n try:\n import matplotlib.ticker as ticker\n ax.yaxis.set_minor_locator(ticker.AutoLocator())\n ax.yaxis.set_minor_formatter(ticker.FormatStrFormatter(''))\n for label in ax.get_yticklabels(minor=True):\n label.set_visible(False)\n except Exception: # pragma no cover\n pass\n ax.yaxis.get_label().set_visible(False)\n\n if naxes != nplots:\n for ax in axarr[naxes:]:\n ax.set_visible(False)\n\n if squeeze:\n # Reshape the array to have the final desired dimension (nrow,ncol),\n # though discarding unneeded dimensions that equal 1. If we only have\n # one subplot, just return it instead of a 1-element array.\n if nplots == 1:\n axes = axarr[0]\n else:\n axes = axarr.reshape(nrows, ncols).squeeze()\n else:\n # returned axis array will be always 2-d, even if nrows=ncols=1\n axes = axarr.reshape(nrows, ncols)\n\n return fig, axes\n\n\ndef _flatten(axes):\n if not com.is_list_like(axes):\n return np.array([axes])\n elif isinstance(axes, (np.ndarray, Index)):\n return axes.ravel()\n return np.array(axes)\n\n\ndef _get_all_lines(ax):\n lines = ax.get_lines()\n\n # check for right_ax, which can oddly sometimes point back to ax\n if hasattr(ax, 'right_ax') and ax.right_ax != ax:\n lines += ax.right_ax.get_lines()\n\n # no such risk with left_ax\n if hasattr(ax, 'left_ax'):\n lines += ax.left_ax.get_lines()\n\n return lines\n\n\ndef _get_xlim(lines):\n left, right = np.inf, -np.inf\n for l in lines:\n x = l.get_xdata(orig=False)\n left = min(x[0], left)\n right = max(x[-1], right)\n return left, right\n\n\ndef _set_ticks_props(axes, xlabelsize=None, xrot=None,\n ylabelsize=None, yrot=None):\n import matplotlib.pyplot as plt\n\n for ax in _flatten(axes):\n if xlabelsize is not None:\n plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)\n if xrot is not None:\n plt.setp(ax.get_xticklabels(), rotation=xrot)\n if ylabelsize is not None:\n plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)\n if yrot is not None:\n plt.setp(ax.get_yticklabels(), rotation=yrot)\n return axes\n\n\nif __name__ == '__main__':\n # import pandas.rpy.common as com\n # sales = com.load_data('sanfrancisco.home.sales', package='nutshell')\n # top10 = sales['zip'].value_counts()[:10].index\n # sales2 = sales[sales.zip.isin(top10)]\n # _ = scatter_plot(sales2, 'squarefeet', 'price', by='zip')\n\n # plt.show()\n\n import matplotlib.pyplot as plt\n\n import pandas.tools.plotting as plots\n import pandas.core.frame as fr\n reload(plots)\n reload(fr)\n from pandas.core.frame import DataFrame\n\n data = DataFrame([[3, 6, -5], [4, 8, 2], [4, 9, -6],\n [4, 9, -3], [2, 5, -1]],\n columns=['A', 'B', 'C'])\n data.plot(kind='barh', stacked=True)\n\n plt.show()\n" ]
[ [ "pandas.core.common.is_list_like", "pandas.util.decorators.deprecate_kwarg", "numpy.expand_dims", "numpy.sqrt", "numpy.linspace", "numpy.asarray", "matplotlib.ticker.AutoLocator", "pandas.tseries.frequencies.get_freq", "numpy.max", "pandas.compat.map", "matplotlib.artist.setp", "numpy.mean", "scipy.stats.gaussian_kde", "pandas.compat.iteritems", "numpy.ma.array", "pandas.core.frame.DataFrame", "matplotlib.cm.get_cmap", "pandas.tseries.plotting.tsplot", "numpy.ma.masked_where", "pandas.core.common.notnull", "numpy.where", "matplotlib.pyplot.gca", "pandas.core.common.is_number", "pandas.compat.OrderedDict", "numpy.unique", "numpy.arange", "pandas.core.common.is_iterator", "pandas.tseries.frequencies.get_period_alias", "matplotlib.pyplot.gcf", "numpy.sin", "pandas.tseries.period.Period", "pandas.compat.lmap", "numpy.ravel", "matplotlib.ticker.FormatStrFormatter", "numpy.zeros", "pandas.util.decorators.Appender", "matplotlib.pyplot.figure", "pandas.core.common._try_sort", "matplotlib.pyplot.cm.get_cmap", "numpy.min", "numpy.median", "matplotlib.patches.Circle", "pandas.tools.merge.concat", "pandas.core.common.is_integer", "numpy.atleast_2d", "matplotlib.pyplot.get_fignums", "pandas.tseries.converter.register", "matplotlib.pyplot.show", "numpy.array", "numpy.sum", "pandas.tseries.frequencies.get_base_alias", "numpy.isreal", "pandas.core.series.remove_na", "numpy.cos", "numpy.tile", "pandas.compat.zip", "pandas.compat.lrange", "pandas.core.common.pprint_thing", "pandas.core.common.isnull", "numpy.empty", "pandas.compat.range" ] ]
eman/censusacs
[ "602c98d29c83462761f436c9cab6872e54079f6a" ]
[ "censusacs.py" ]
[ "import json\nimport os\n\nimport pandas as pd\nimport requests\n\nACS_ENDPOINT = \"https://api.census.gov/data/{year}/{program}/{frequency}\"\nVARIABLES = {\n \"NAME\": \"geography_name\",\n \"B01001_001E\": \"total_population\",\n \"B19013_001E\": \"median_household_income\",\n \"B11011_001E\": \"total_households\",\n \"B25001_001E\": \"housing_units\",\n \"B25075_001E\": \"owner_occupied_housing_units\",\n \"B25003_003E\": \"renter_occupied_housing_units\",\n \"B25002_003E\": \"vacant_housing_units\",\n \"B17026_010E\": \"two_times_fpl\",\n \"B17026_011E\": \"three_times_fpl\",\n \"B17026_012E\": \"four_times_fpl\",\n \"B17026_013E\": \"five_times_fpl\",\n \"B17026_001E\": \"total_income_to_poverty\",\n \"B19326_001E\": \"median_income_last_12_months\",\n}\n\nALTERNATE_KEYS = {\n \"census_tract\": \"tract\",\n \"municipality\": \"county+subdivision\",\n \"state_legislative_district_lower\": \"state+legislative+district+(lower+chamber)\",\n \"state_legislative_district_upper\": \"state+legislative+district+(upper+chamber)\",\n \"zcta\": \"zip+code+tabulation+area\",\n}\n\n\nclass ACSError(Exception):\n def __init__(self, error, url, variables):\n self.error = error\n self.url = url\n self.variables = variables\n\n def __str__(self):\n return self.error\n\n\nclass CensusACS(object):\n def __init__(self, year, frequency=\"acs5\", program=\"acs\", variables=None):\n if variables is None:\n variables = VARIABLES.keys()\n self.year = year\n self.frequency = frequency\n self.program = program\n self.api_key = os.environ.get(\"CENSUS_API_KEY\", None)\n if isinstance(variables, str):\n variables = [variables]\n self.variables = list(variables)\n\n @property\n def acs_endpoint(self):\n return ACS_ENDPOINT.format(\n year=self.year, frequency=self.frequency, program=self.program\n )\n\n def get_querystring(self, geography, geography_type, **kwargs):\n variables = \",\".join(self.variables)\n geographies = \"{0}:{1}\".format(geography_type, geography)\n within = \"+\".join([\"{0}:{1}\".format(k, v) for k, v in kwargs.items()])\n params = {\"get\": variables, \"for\": geographies, \"in\": within}\n if self.api_key is not None:\n params[\"key\"] = self.api_key\n return \"&\".join(\"{0}={1}\".format(k, v) for k, v in params.items())\n\n @staticmethod\n def format_response(response, columns):\n df = pd.DataFrame(response, columns=columns)\n df.rename(columns=VARIABLES, inplace=True)\n df.owner_occupied_housing_units = pd.to_numeric(\n df.owner_occupied_housing_units\n )\n df.renter_occupied_housing_units = pd.to_numeric(\n df.renter_occupied_housing_units\n )\n df.housing_units = pd.to_numeric(df.housing_units)\n df.vacant_housing_units = pd.to_numeric(df.vacant_housing_units)\n df[\"owner_occupied_percent\"] = (\n 100 * df[\"owner_occupied_housing_units\"] / df[\"housing_units\"]\n )\n df.owner_occupied_percent = df.owner_occupied_percent.round(0)\n df[\"renter_occupied_percent\"] = (\n 100 * df[\"renter_occupied_housing_units\"] / df[\"housing_units\"]\n )\n df.renter_occupied_percent = df.renter_occupied_percent.round()\n df[\"vacant_housing_units_percent\"] = (\n 100 * df[\"vacant_housing_units\"] / df[\"housing_units\"]\n )\n df.vacant_housing_units_percent = (\n df.vacant_housing_units_percent.round()\n )\n records = df.to_dict(orient=\"records\")\n if len(records) == 1:\n return records[0]\n return records\n\n def get_data(self, state, geography_type, geography=\"*\", **kwargs):\n if not isinstance(geography, str):\n geography = \",\".join(geography)\n if geography_type in ALTERNATE_KEYS:\n geography_type = ALTERNATE_KEYS[geography_type]\n variables = \",\".join(self.variables)\n state = \"state:{0}\".format(state)\n geographies = \"{0}:{1}\".format(geography_type, geography)\n params = {\"get\": variables, \"for\": geographies, \"in\": state}\n if geography_type == \"state\":\n params.pop(\"in\")\n params[\"for\"] = state\n if os.environ.get(\"CENSUS_API_KEY\"):\n params[\"key\"] = os.environ.get(\"CENSUS_API_KEY\")\n params_str = \"&\".join(\n \"{0}={1}\".format(k, v) for k, v in params.items()\n )\n response = requests.get(self.acs_endpoint, params_str)\n print(response.url)\n if not response.ok:\n raise ACSError(response.text, response.url, self.variables)\n response = response.json()\n columns = response.pop(0)\n df = pd.DataFrame(response, columns=columns)\n df.rename(columns=VARIABLES, inplace=True)\n df.owner_occupied_housing_units = pd.to_numeric(\n df.owner_occupied_housing_units\n )\n df.renter_occupied_housing_units = pd.to_numeric(\n df.renter_occupied_housing_units\n )\n df.housing_units = pd.to_numeric(df.housing_units)\n df.vacant_housing_units = pd.to_numeric(df.vacant_housing_units)\n df[\"owner_occupied_percent\"] = (\n 100 * df[\"owner_occupied_housing_units\"] / df[\"housing_units\"]\n )\n df.owner_occupied_percent = df.owner_occupied_percent.round(0)\n df[\"renter_occupied_percent\"] = (\n 100 * df[\"renter_occupied_housing_units\"] / df[\"housing_units\"]\n )\n df.renter_occupied_percent = df.renter_occupied_percent.round()\n df[\"vacant_housing_units_percent\"] = (\n 100 * df[\"vacant_housing_units\"] / df[\"housing_units\"]\n )\n df.vacant_housing_units_percent = (\n df.vacant_housing_units_percent.round()\n )\n records = df.to_dict(orient=\"records\")\n if len(records) == 1:\n return records[0]\n return records\n\n def get_zcta(self, state, zcta=\"*\"):\n geography_type = \"zip+code+tabulation+area\"\n return self.get_data(state, geography_type, zcta)\n\n def get_congressional_districts(self, state, district=\"*\"):\n geography_type = \"congressional+district\"\n return self.get_data(state, geography_type, district)\n\n def get_counties(self, state, county=\"*\"):\n return self.get_data(state, \"county\", county)\n\n def get_county_subdivisions(self, state, subdivision=\"*\"):\n geography_type = \"county+subdivision\"\n subdivisions = self.get_data(state, geography_type, subdivision=\"*\")\n if subdivision != \"*\":\n subdivision = subdivision[-5:]\n divisions = [\n d\n for d in subdivisions\n if d[\"county subdivision\"] == subdivision\n ]\n if divisions:\n return divisions[0]\n return divisions\n\n def get_places(self, state, place=\"*\"):\n return self.get_data(state, \"place\", place)\n\n def get_census_tracts(self, state, tract=\"*\"):\n tracts = self.get_data(state, \"tract\", \"*\")\n if tract != \"*\":\n tract = tract[-6:]\n tracts = [t for t in tracts if t[\"tract\"] == tract]\n if tracts:\n return tracts[0]\n return tracts\n\n def get_state_legislative_districts_upper(self, state, district=\"*\"):\n geography_type = \"state+legislative+district+(upper+chamber)\"\n return self.get_data(state, geography_type, district)\n\n def get_state_legislative_districts_lower(self, state, district=\"*\"):\n geography_type = \"state+legislative+district+(lower+chamber)\"\n return self.get_data(state, geography_type, district)\n\n\nif __name__ == \"__main__\":\n c = CensusACS(\"2015\")\n response = c.get_census_tracts(\"09\", \"09001240200\")\n print(json.dumps(response, indent=2))\n" ]
[ [ "pandas.to_numeric", "pandas.DataFrame" ] ]
rushic24/Multi-Language-RTVC
[ "f61f79ea119d10c876bd69b825f5cb84c9b66ac8" ]
[ "mlrtvc/src/core/encoder/data_objects/speaker_batch.py" ]
[ "import numpy as np\nfrom typing import List\nfrom core.encoder.data_objects.speaker import Speaker\n\n\nclass SpeakerBatch:\n def __init__(\n self, speakers: List[Speaker], utterances_per_speaker: int, n_frames: int\n ):\n self.speakers = speakers\n self.partials = {\n s: s.random_partial(utterances_per_speaker, n_frames) for s in speakers\n }\n\n # Array of shape (n_speakers * n_utterances, n_frames, mel_n), e.g. for 3 speakers with\n # 4 utterances each of 160 frames of 40 mel coefficients: (12, 160, 40)\n self.data = np.array(\n [frames for s in speakers for _, frames, _ in self.partials[s]]\n )\n" ]
[ [ "numpy.array" ] ]
crizCraig/baselines
[ "4a8219c73282f459c75b7b2a5284b7215fa336e5", "4a8219c73282f459c75b7b2a5284b7215fa336e5" ]
[ "baselines/acktr/utils.py", "baselines/acktr/kfac_utils.py" ]
[ "import os\nimport numpy as np\nimport tensorflow as tf\nimport baselines.common.tf_util as U\nfrom collections import deque\n\ndef sample(logits):\n noise = tf.random_uniform(tf.shape(logits))\n return tf.argmax(logits - tf.log(-tf.log(noise)), 1)\n\ndef std(x):\n mean = tf.reduce_mean(x)\n var = tf.reduce_mean(tf.square(x-mean))\n return tf.sqrt(var)\n\ndef cat_entropy(logits):\n a0 = logits - tf.reduce_max(logits, 1, keep_dims=True)\n ea0 = tf.exp(a0)\n z0 = tf.reduce_sum(ea0, 1, keep_dims=True)\n p0 = ea0 / z0\n return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1)\n\ndef cat_entropy_softmax(p0):\n return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1)\n\ndef mse(pred, target):\n return tf.square(pred-target)/2.\n\ndef ortho_init(scale=1.0):\n def _ortho_init(shape, dtype, partition_info=None):\n #lasagne ortho init for tf\n shape = tuple(shape)\n if len(shape) == 2:\n flat_shape = shape\n elif len(shape) == 4: # assumes NHWC\n flat_shape = (np.prod(shape[:-1]), shape[-1])\n else:\n raise NotImplementedError\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v # pick the one with the correct shape\n q = q.reshape(shape)\n return (scale * q[:shape[0], :shape[1]]).astype(np.float32)\n return _ortho_init\n\ndef conv(x, scope, nf, rf, stride, pad='VALID', act=tf.nn.relu, init_scale=1.0):\n with tf.variable_scope(scope):\n nin = x.get_shape()[3].value\n w = tf.get_variable(\"w\", [rf, rf, nin, nf], initializer=ortho_init(init_scale))\n b = tf.get_variable(\"b\", [nf], initializer=tf.constant_initializer(0.0))\n z = tf.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding=pad)+b\n h = act(z)\n return h\n\ndef fc(x, scope, nh, act=tf.nn.relu, init_scale=1.0):\n with tf.variable_scope(scope):\n nin = x.get_shape()[1].value\n w = tf.get_variable(\"w\", [nin, nh], initializer=ortho_init(init_scale))\n b = tf.get_variable(\"b\", [nh], initializer=tf.constant_initializer(0.0))\n z = tf.matmul(x, w)+b\n h = act(z)\n return h\n\ndef dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):\n with tf.variable_scope(name, reuse=reuse):\n assert (len(U.scope_name().split('/')) == 2)\n\n w = tf.get_variable(\"w\", [x.get_shape()[1], size], initializer=weight_init)\n b = tf.get_variable(\"b\", [size], initializer=tf.constant_initializer(bias_init))\n weight_decay_fc = 3e-4\n\n if weight_loss_dict is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')\n if weight_loss_dict is not None:\n weight_loss_dict[w] = weight_decay_fc\n weight_loss_dict[b] = 0.0\n\n tf.add_to_collection(U.scope_name().split('/')[0] + '_' + 'losses', weight_decay)\n\n return tf.nn.bias_add(tf.matmul(x, w), b)\n\ndef conv_to_fc(x):\n nh = np.prod([v.value for v in x.get_shape()[1:]])\n x = tf.reshape(x, [-1, nh])\n return x\n\ndef kl_div(action_dist1, action_dist2, action_size):\n mean1, std1 = action_dist1[:, :action_size], action_dist1[:, action_size:]\n mean2, std2 = action_dist2[:, :action_size], action_dist2[:, action_size:]\n\n numerator = tf.square(mean1 - mean2) + tf.square(std1) - tf.square(std2)\n denominator = 2 * tf.square(std2) + 1e-8\n return tf.reduce_sum(\n numerator/denominator + tf.log(std2) - tf.log(std1),reduction_indices=-1)\n\ndef discount_with_dones(rewards, dones, gamma):\n discounted = []\n r = 0\n for reward, done in zip(rewards[::-1], dones[::-1]):\n r = reward + gamma*r*(1.-done) # fixed off by one bug\n discounted.append(r)\n return discounted[::-1]\n\ndef find_trainable_variables(key):\n with tf.variable_scope(key):\n return tf.trainable_variables()\n\ndef make_path(f):\n return os.makedirs(f, exist_ok=True)\n\ndef constant(p):\n return 1\n\ndef linear(p):\n return 1-p\n\n\ndef middle_drop(p):\n eps = 0.75\n if 1-p<eps:\n return eps*0.1\n return 1-p\n\ndef double_linear_con(p):\n p *= 2\n eps = 0.125\n if 1-p<eps:\n return eps\n return 1-p\n\n\ndef double_middle_drop(p):\n eps1 = 0.75\n eps2 = 0.25\n if 1-p<eps1:\n if 1-p<eps2:\n return eps2*0.5\n return eps1*0.1\n return 1-p\n\n\nschedules = {\n 'linear':linear,\n 'constant':constant,\n 'double_linear_con':double_linear_con,\n 'middle_drop':middle_drop,\n 'double_middle_drop':double_middle_drop\n}\n\nclass Scheduler(object):\n\n def __init__(self, v, nvalues, schedule):\n self.n = 0.\n self.v = v\n self.nvalues = nvalues\n self.schedule = schedules[schedule]\n\n def value(self):\n current_value = self.v*self.schedule(self.n/self.nvalues)\n self.n += 1.\n return current_value\n\n def value_steps(self, steps):\n return self.v*self.schedule(steps/self.nvalues)\n\n\nclass EpisodeStats:\n def __init__(self, nsteps, nenvs):\n self.episode_rewards = []\n for i in range(nenvs):\n self.episode_rewards.append([])\n self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths\n self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards\n self.nsteps = nsteps\n self.nenvs = nenvs\n\n def feed(self, rewards, masks):\n rewards = np.reshape(rewards, [self.nenvs, self.nsteps])\n masks = np.reshape(masks, [self.nenvs, self.nsteps])\n for i in range(0, self.nenvs):\n for j in range(0, self.nsteps):\n self.episode_rewards[i].append(rewards[i][j])\n if masks[i][j]:\n l = len(self.episode_rewards[i])\n s = sum(self.episode_rewards[i])\n self.lenbuffer.append(l)\n self.rewbuffer.append(s)\n self.episode_rewards[i] = []\n\n def mean_length(self):\n if self.lenbuffer:\n return np.mean(self.lenbuffer)\n else:\n return 0 # on the first params dump, no episodes are finished\n\n def mean_reward(self):\n if self.rewbuffer:\n return np.mean(self.rewbuffer)\n else:\n return 0\n", "import tensorflow as tf\nimport numpy as np\n\n\ndef gmatmul(a, b, transpose_a=False, transpose_b=False, reduce_dim=None):\n if reduce_dim == None:\n # general batch matmul\n if len(a.get_shape()) == 3 and len(b.get_shape()) == 3:\n return tf.batch_matmul(a, b, adj_x=transpose_a, adj_y=transpose_b)\n elif len(a.get_shape()) == 3 and len(b.get_shape()) == 2:\n if transpose_b:\n N = b.get_shape()[0].value\n else:\n N = b.get_shape()[1].value\n B = a.get_shape()[0].value\n if transpose_a:\n K = a.get_shape()[1].value\n a = tf.reshape(tf.transpose(a, [0, 2, 1]), [-1, K])\n else:\n K = a.get_shape()[-1].value\n a = tf.reshape(a, [-1, K])\n result = tf.matmul(a, b, transpose_b=transpose_b)\n result = tf.reshape(result, [B, -1, N])\n return result\n elif len(a.get_shape()) == 2 and len(b.get_shape()) == 3:\n if transpose_a:\n M = a.get_shape()[1].value\n else:\n M = a.get_shape()[0].value\n B = b.get_shape()[0].value\n if transpose_b:\n K = b.get_shape()[-1].value\n b = tf.transpose(tf.reshape(b, [-1, K]), [1, 0])\n else:\n K = b.get_shape()[1].value\n b = tf.transpose(tf.reshape(\n tf.transpose(b, [0, 2, 1]), [-1, K]), [1, 0])\n result = tf.matmul(a, b, transpose_a=transpose_a)\n result = tf.transpose(tf.reshape(result, [M, B, -1]), [1, 0, 2])\n return result\n else:\n return tf.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)\n else:\n # weird batch matmul\n if len(a.get_shape()) == 2 and len(b.get_shape()) > 2:\n # reshape reduce_dim to the left most dim in b\n b_shape = b.get_shape()\n if reduce_dim != 0:\n b_dims = list(range(len(b_shape)))\n b_dims.remove(reduce_dim)\n b_dims.insert(0, reduce_dim)\n b = tf.transpose(b, b_dims)\n b_t_shape = b.get_shape()\n b = tf.reshape(b, [int(b_shape[reduce_dim]), -1])\n result = tf.matmul(a, b, transpose_a=transpose_a,\n transpose_b=transpose_b)\n result = tf.reshape(result, b_t_shape)\n if reduce_dim != 0:\n b_dims = list(range(len(b_shape)))\n b_dims.remove(0)\n b_dims.insert(reduce_dim, 0)\n result = tf.transpose(result, b_dims)\n return result\n\n elif len(a.get_shape()) > 2 and len(b.get_shape()) == 2:\n # reshape reduce_dim to the right most dim in a\n a_shape = a.get_shape()\n outter_dim = len(a_shape) - 1\n reduce_dim = len(a_shape) - reduce_dim - 1\n if reduce_dim != outter_dim:\n a_dims = list(range(len(a_shape)))\n a_dims.remove(reduce_dim)\n a_dims.insert(outter_dim, reduce_dim)\n a = tf.transpose(a, a_dims)\n a_t_shape = a.get_shape()\n a = tf.reshape(a, [-1, int(a_shape[reduce_dim])])\n result = tf.matmul(a, b, transpose_a=transpose_a,\n transpose_b=transpose_b)\n result = tf.reshape(result, a_t_shape)\n if reduce_dim != outter_dim:\n a_dims = list(range(len(a_shape)))\n a_dims.remove(outter_dim)\n a_dims.insert(reduce_dim, outter_dim)\n result = tf.transpose(result, a_dims)\n return result\n\n elif len(a.get_shape()) == 2 and len(b.get_shape()) == 2:\n return tf.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)\n\n assert False, 'something went wrong'\n\n\ndef clipoutNeg(vec, threshold=1e-6):\n mask = tf.cast(vec > threshold, tf.float32)\n return mask * vec\n\n\ndef detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False):\n eigen_min = tf.reduce_min(input_mat)\n eigen_max = tf.reduce_max(input_mat)\n eigen_ratio = eigen_max / eigen_min\n input_mat_clipped = clipoutNeg(input_mat, threshold)\n\n if debug:\n input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print(\n input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio]))\n\n return input_mat_clipped\n\n\ndef factorReshape(Q, e, grad, facIndx=0, ftype='act'):\n grad_shape = grad.get_shape()\n if ftype == 'act':\n assert e.get_shape()[0] == grad_shape[facIndx]\n expanded_shape = [1, ] * len(grad_shape)\n expanded_shape[facIndx] = -1\n e = tf.reshape(e, expanded_shape)\n if ftype == 'grad':\n assert e.get_shape()[0] == grad_shape[len(grad_shape) - facIndx - 1]\n expanded_shape = [1, ] * len(grad_shape)\n expanded_shape[len(grad_shape) - facIndx - 1] = -1\n e = tf.reshape(e, expanded_shape)\n\n return Q, e\n" ]
[ [ "tensorflow.reduce_sum", "tensorflow.nn.l2_loss", "numpy.mean", "tensorflow.nn.conv2d", "numpy.linalg.svd", "numpy.reshape", "tensorflow.square", "tensorflow.trainable_variables", "tensorflow.matmul", "tensorflow.shape", "tensorflow.exp", "tensorflow.reduce_max", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.constant_initializer", "numpy.random.normal", "tensorflow.log", "numpy.prod", "tensorflow.variable_scope", "tensorflow.sqrt" ], [ "tensorflow.convert_to_tensor", "tensorflow.batch_matmul", "tensorflow.reduce_max", "tensorflow.matmul", "tensorflow.transpose", "tensorflow.greater", "tensorflow.less", "tensorflow.reshape", "tensorflow.cast", "tensorflow.reduce_min" ] ]
pasin30055/planning-evaluation-framework
[ "ba5fc3b553fee0b4f5beb50076ecaa7b634dac23" ]
[ "src/driver/experimental_trial.py" ]
[ "# Copyright 2021 The Private Cardinality Estimation Framework Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Defines one experimental trial.\n\nAn experimental trial represents running a specific ModelingStrategy\nagainst a specific DataSet, with specific SystemParameters.\n\"\"\"\n\nfrom absl import logging\nfrom datetime import datetime\nimport hashlib\nimport numpy as np\nimport pandas as pd\nfrom os.path import isfile, join\nfrom pathlib import Path\nimport traceback\nfrom typing import List\nfrom typing import NamedTuple\n\nfrom wfa_planning_evaluation_framework.data_generators.data_design import (\n DataDesign,\n)\nfrom wfa_planning_evaluation_framework.simulator.modeling_strategy import (\n ModelingStrategy,\n)\nfrom wfa_planning_evaluation_framework.simulator.privacy_tracker import (\n PrivacyBudget,\n PrivacyTracker,\n)\nfrom wfa_planning_evaluation_framework.simulator.halo_simulator import (\n HaloSimulator,\n)\nfrom wfa_planning_evaluation_framework.simulator.system_parameters import (\n SystemParameters,\n)\nfrom wfa_planning_evaluation_framework.driver.experiment_parameters import (\n ExperimentParameters,\n)\nfrom wfa_planning_evaluation_framework.driver.modeling_strategy_descriptor import (\n ModelingStrategyDescriptor,\n)\nfrom wfa_planning_evaluation_framework.driver.trial_descriptor import (\n TrialDescriptor,\n)\nfrom wfa_planning_evaluation_framework.driver.test_point_aggregator import (\n aggregate,\n aggregate_on_exception,\n)\n\n# The output dataframe will contain the estimation error for each of the\n# following relative spend fractions. In other words, if r is one of the\n# values below and s is the spend fraction associated to the training point,\n# then evaluate the relative error at r * s.\nSINGLE_PUBLISHER_FRACTIONS = np.arange(1, 31) * 0.1\n\nSINGLE_PUB_ANALYSIS = \"single_pub\"\n\n\nclass ExperimentalTrial:\n \"\"\"A run of a ModelingStrategy against a DataSet.\"\"\"\n\n def __init__(\n self,\n experiment_dir: str,\n data_design: DataDesign,\n data_set_name: str,\n trial_descriptor: TrialDescriptor,\n analysis_type: str = \"\",\n ):\n \"\"\"Constructs an object representing a trial.\n\n A trial represents a run of a specific ModelingStrategy against a\n specific DataSet, with specific SystemParameters and ExperimentParameters.\n\n Args:\n experiment_dir: The name of a directory where intermediate results\n are stored. The results for this specific trial will be stored in\n the file {experiment_dir}/{data_set_name}/{trial_name}. The\n trial_name is constructed from the ModelingStrategyDescriptor and the\n SystemParameters.\n data_design: A DataDesign object specifying the source of the data\n that will be used for this trial.\n data_set_name: The name of the specific DataSet within the DataDesign\n that will be used for this trial.\n trial_descriptor: A descriptor that specifies the configuration\n of this experimental trial.\n analysis_type: Type of analysis. Can be empty of \"single_pub\". If\n \"single_pub\" is specified, then additional columns are added to the\n output that are specific to single publisher analysis.\n \"\"\"\n self._experiment_dir = experiment_dir\n self._data_design = data_design\n self._data_set_name = data_set_name\n self._trial_descriptor = trial_descriptor\n self._analysis_type = analysis_type\n\n def evaluate(self, seed: int) -> pd.DataFrame:\n \"\"\"Executes a trial.\n\n 1. Check if the results for the trial have already been computed.\n 2. Load the DataSet.\n 3. Instantiate Halo Simulator.\n 4. Instantiate Modeling Strategy.\n 5. Fit model.\n 6. Generate set of test points.\n 7. Compute metrics.\n 8. Construct output DataFrame.\n 9. Save to disk.\n\n Args:\n seed: A seed value that is used to initialize the random\n number generator.\n\n Returns:\n A single row DataFrame containing the results of the evaluation\n of this trial.\n \"\"\"\n logging.vlog(2, f\"Dataset {self._data_set_name}\")\n logging.vlog(2, f\"Trial {self._trial_descriptor}\")\n\n rng = np.random.default_rng(seed=seed)\n np.random.seed(seed)\n\n trial_results_path = self._compute_trial_results_path()\n if isfile(trial_results_path):\n logging.vlog(2, \" --> Returning previously computed result\")\n return pd.read_csv(trial_results_path)\n\n # The pending directory contains one entry for each currently executing\n # experimental trial. If a computation appears to hang, this can be\n # used to check which evaluations are still pending.\n experiment_dir_parent = Path(self._experiment_dir).parent\n pending_path = Path(\n f\"{experiment_dir_parent}/pending/{hashlib.md5(trial_results_path.encode()).hexdigest()}\"\n )\n Path(pending_path).parent.absolute().mkdir(parents=True, exist_ok=True)\n Path(pending_path).write_text(\n f\"{datetime.now()}\\n{self._data_set_name}\\n{self._trial_descriptor}\\n\\n\"\n )\n\n self._dataset = self._data_design.by_name(self._data_set_name)\n self._privacy_tracker = PrivacyTracker()\n halo = HaloSimulator(\n self._dataset, self._trial_descriptor.system_params, self._privacy_tracker\n )\n privacy_budget = self._trial_descriptor.experiment_params.privacy_budget\n modeling_strategy = (\n self._trial_descriptor.modeling_strategy.instantiate_strategy()\n )\n single_publisher_dataframe = pd.DataFrame()\n try:\n reach_surface = modeling_strategy.fit(\n halo, self._trial_descriptor.system_params, privacy_budget\n )\n test_points = list(\n self._trial_descriptor.experiment_params.generate_test_points(\n self._dataset, rng\n )\n )\n true_reach = [\n halo.true_reach_by_spend(\n t, self._trial_descriptor.experiment_params.max_frequency\n )\n for t in test_points\n ]\n fitted_reach = [\n reach_surface.by_spend(\n t, self._trial_descriptor.experiment_params.max_frequency\n )\n for t in test_points\n ]\n metrics = aggregate(true_reach, fitted_reach)\n if self._analysis_type == SINGLE_PUB_ANALYSIS:\n single_publisher_dataframe = (\n self._compute_single_publisher_fractions_dataframe(\n halo, reach_surface\n )\n )\n except Exception as inst:\n if not logging.vlog_is_on(2):\n logging.vlog(1, f\"Dataset {self._data_set_name}\")\n logging.vlog(1, f\"Trial {self._trial_descriptor}\")\n logging.vlog(1, f\"Modeling failure: {inst}\")\n logging.vlog(2, traceback.format_exc())\n metrics = aggregate_on_exception(inst)\n if self._analysis_type == SINGLE_PUB_ANALYSIS:\n single_publisher_dataframe = (\n self._single_publisher_fractions_dataframe_on_exception()\n )\n\n independent_vars = self._make_independent_vars_dataframe()\n privacy_tracking_vars = self._make_privacy_tracking_vars_dataframe(\n self._privacy_tracker\n )\n result = pd.concat(\n [\n independent_vars,\n privacy_tracking_vars,\n metrics,\n single_publisher_dataframe,\n ],\n axis=1,\n )\n Path(trial_results_path).parent.absolute().mkdir(parents=True, exist_ok=True)\n result.to_csv(trial_results_path)\n\n Path(pending_path).unlink()\n\n return result\n\n def _compute_trial_results_path(self) -> str:\n \"\"\"Returns path of file where the results of this trial are stored.\"\"\"\n return f\"{self._experiment_dir}/{self._data_set_name}/{self._trial_descriptor}\"\n\n def _make_independent_vars_dataframe(self) -> pd.DataFrame:\n \"\"\"Returns a 1-row DataFrame of independent variables for this trial.\"\"\"\n data_set = self._data_design.by_name(self._data_set_name)\n independent_vars = pd.DataFrame(\n {\n \"dataset\": [self._data_set_name],\n \"trial\": [f\"{self._trial_descriptor}\"],\n \"replica_id\": [self._trial_descriptor.experiment_params.replica_id],\n \"single_pub_model\": [\n self._trial_descriptor.modeling_strategy.single_pub_model\n ],\n \"multi_pub_model\": [\n self._trial_descriptor.modeling_strategy.multi_pub_model\n ],\n \"strategy\": [self._trial_descriptor.modeling_strategy.strategy],\n \"liquid_legions_sketch_size\": [\n self._trial_descriptor.system_params.liquid_legions.sketch_size\n ],\n \"liquid_legions_decay_rate\": [\n self._trial_descriptor.system_params.liquid_legions.decay_rate\n ],\n \"maximum_reach\": [data_set.maximum_reach],\n \"ncampaigns\": [data_set.publisher_count],\n \"largest_pub_reach\": [max([p.max_reach for p in data_set._data])],\n \"max_frequency\": [\n self._trial_descriptor.experiment_params.max_frequency\n ],\n \"average_spend_fraction\": [\n np.mean(\n self._trial_descriptor.system_params.campaign_spend_fractions\n )\n ],\n }\n )\n return independent_vars\n\n def _make_privacy_tracking_vars_dataframe(\n self, privacy_tracker: PrivacyTracker\n ) -> pd.DataFrame:\n \"\"\"Returns a 1-row DataFrame of privacy-related data for this trial.\"\"\"\n mechanisms_string = \"/\".join(sorted(set(privacy_tracker.mechanisms)))\n\n privacy_vars = pd.DataFrame(\n {\n \"privacy_budget_epsilon\": [\n self._trial_descriptor.experiment_params.privacy_budget.epsilon\n ],\n \"privacy_budget_delta\": [\n self._trial_descriptor.experiment_params.privacy_budget.delta\n ],\n \"privacy_used_epsilon\": [privacy_tracker.privacy_consumption.epsilon],\n \"privacy_used_delta\": [privacy_tracker.privacy_consumption.delta],\n \"privacy_mechanisms\": [mechanisms_string],\n }\n )\n return privacy_vars\n\n def _compute_single_publisher_fractions_dataframe(\n self, halo, reach_surface\n ) -> pd.DataFrame:\n results = {}\n for r in SINGLE_PUBLISHER_FRACTIONS:\n spend = halo.campaign_spends[0] * r\n true_reach = halo.true_reach_by_spend([spend], 1).reach()\n fitted_reach = reach_surface.by_spend([spend], 1).reach()\n if true_reach:\n relative_error = np.abs((true_reach - fitted_reach) / true_reach)\n else:\n relative_error = np.NaN\n column_name = f\"relative_error_at_{int(r*100):03d}\"\n results[column_name] = [relative_error]\n\n # Also, record the maximum frequency in the actual data and the\n # data produced by Halo.\n training_point = reach_surface._data[0]\n results[\"max_nonzero_frequency_from_halo\"] = [\n max(\n [(i + 1) for i, f in enumerate(training_point._kplus_reaches) if f != 0]\n )\n ]\n data_point = halo.true_reach_by_spend(halo.campaign_spends)\n results[\"max_nonzero_frequency_from_data\"] = [\n max([(i + 1) for i, f in enumerate(data_point._kplus_reaches) if f != 0])\n ]\n return pd.DataFrame(results)\n\n def _single_publisher_fractions_dataframe_on_exception(self) -> pd.DataFrame:\n results = {}\n for r in SINGLE_PUBLISHER_FRACTIONS:\n column_name = f\"relative_error_at_{int(r*100):03d}\"\n results[column_name] = [np.NaN]\n results[\"max_nonzero_frequency_from_halo\"] = [np.NaN]\n results[\"max_nonzero_frequency_from_data\"] = [np.NaN]\n return pd.DataFrame(results)\n" ]
[ [ "pandas.concat", "pandas.read_csv", "numpy.abs", "numpy.random.seed", "numpy.arange", "pandas.DataFrame", "numpy.mean", "numpy.random.default_rng" ] ]
JPchico/aiida-lammps
[ "8f618541784bbd6360efc653350570cf76398e83", "8f618541784bbd6360efc653350570cf76398e83" ]
[ "aiida_lammps/calculations/lammps/combinate.py", "conftest.py" ]
[ "# Not working with Aiida 1.0\n\nfrom aiida.common.exceptions import InputValidationError\nfrom aiida.orm import ArrayData, Dict\nfrom aiida_phonopy.common.raw_parsers import (\n get_force_constants,\n get_FORCE_SETS_txt,\n get_poscar_txt,\n)\nimport numpy as np\n\nfrom aiida_lammps.calculations.lammps import BaseLammpsCalculation\n\n\ndef generate_dynaphopy_input(\n parameters_object,\n poscar_name=\"POSCAR\",\n force_constants_name=\"FORCE_CONSTANTS\",\n force_sets_filename=\"FORCE_SETS\",\n use_sets=False,\n):\n\n parameters = parameters_object.get_dict()\n input_file = \"STRUCTURE FILE POSCAR\\n{}\\n\\n\".format(poscar_name)\n\n if use_sets:\n input_file += \"FORCE SETS\\n{}\\n\\n\".format(force_sets_filename)\n else:\n input_file += \"FORCE CONSTANTS\\n{}\\n\\n\".format(force_constants_name)\n\n input_file += \"PRIMITIVE MATRIX\\n\"\n input_file += \"{} {} {} \\n\".format(*np.array(parameters[\"primitive\"])[0])\n input_file += \"{} {} {} \\n\".format(*np.array(parameters[\"primitive\"])[1])\n input_file += \"{} {} {} \\n\".format(*np.array(parameters[\"primitive\"])[2])\n input_file += \"\\n\"\n input_file += \"SUPERCELL MATRIX PHONOPY\\n\"\n input_file += \"{} {} {} \\n\".format(*np.array(parameters[\"supercell\"])[0])\n input_file += \"{} {} {} \\n\".format(*np.array(parameters[\"supercell\"])[1])\n input_file += \"{} {} {} \\n\".format(*np.array(parameters[\"supercell\"])[2])\n input_file += \"\\n\"\n\n return input_file\n\n\nclass CombinateCalculation(BaseLammpsCalculation):\n\n _POSCAR_NAME = \"POSCAR\"\n _INPUT_FORCE_CONSTANTS = \"FORCE_CONSTANTS\"\n _INPUT_FORCE_SETS = \"FORCE_SETS\"\n _INPUT_FILE_NAME_DYNA = \"input_dynaphopy\"\n _OUTPUT_FORCE_CONSTANTS = \"FORCE_CONSTANTS_OUT\"\n _OUTPUT_QUASIPARTICLES = \"quasiparticles_data.yaml\"\n _OUTPUT_FILE_NAME = \"OUTPUT\"\n\n # self._retrieve_list = [self._OUTPUT_QUASIPARTICLES, self._OUTPUT_FORCE_CONSTANTS, self._OUTPUT_FILE_NAME]\n\n @classmethod\n def define(cls, spec):\n super(CombinateCalculation, cls).define(spec)\n spec.input(\n \"metadata.options.parser_name\",\n valid_type=str,\n default=\"dynaphopy\",\n )\n spec.input(\"parameters_dynaphopy\", valid_type=Dict, help=\"dynaphopy parameters\")\n spec.input(\n \"force_constants\", valid_type=ArrayData, help=\"harmonic force constants\"\n )\n spec.input(\"force_sets\", valid_type=ArrayData, help=\"phonopy force sets\")\n\n # spec.input('settings', valid_type=str, default='lammps.optimize')\n\n @staticmethod\n def create_main_input_content(\n parameter_data,\n potential_data,\n structure_data,\n structure_filename,\n trajectory_filename,\n system_filename,\n restart_filename,\n ):\n\n random_number = np.random.randint(10000000)\n\n lammps_input_file = \"units {0}\\n\".format(potential_data.default_units)\n lammps_input_file += \"boundary p p p\\n\"\n lammps_input_file += \"box tilt large\\n\"\n lammps_input_file += \"atom_style {0}\\n\".format(potential_data.atom_style)\n lammps_input_file += \"read_data {}\\n\".format(structure_filename)\n\n lammps_input_file += potential_data.get_input_lines(structure_data)\n\n lammps_input_file += \"neighbor 0.3 bin\\n\"\n lammps_input_file += \"neigh_modify every 1 delay 0 check no\\n\"\n\n lammps_input_file += (\n \"velocity all create {0} {1} dist gaussian mom yes\\n\".format(\n parameter_data.dict.temperature, random_number\n )\n )\n lammps_input_file += \"velocity all scale {}\\n\".format(\n parameter_data.dict.temperature\n )\n\n lammps_input_file += \"fix int all nvt temp {0} {0} {1}\\n\".format(\n parameter_data.dict.temperature, parameter_data.dict.thermostat_variable\n )\n\n return lammps_input_file\n\n def prepare_extra_files(self, tempfolder, potential_object):\n\n if \"fore_constants\" in self.inputs:\n force_constants = self.inputs.force_constants\n else:\n force_constants = None\n\n if \"fore_constants\" in self.inputs:\n force_sets = self.inputs.force_sets\n else:\n force_sets = None\n\n cell_txt = get_poscar_txt(self.inputs.structure)\n\n cell_filename = tempfolder(self._POSCAR_NAME)\n with open(cell_filename, \"w\") as infile:\n infile.write(cell_txt)\n\n if force_constants is not None:\n force_constants_txt = get_force_constants(force_constants)\n force_constants_filename = tempfolder.get_abs_path(\n self._INPUT_FORCE_CONSTANTS\n )\n with open(force_constants_filename, \"w\") as infile:\n infile.write(force_constants_txt)\n\n elif force_sets is not None:\n force_sets_txt = get_FORCE_SETS_txt(force_sets)\n force_sets_filename = tempfolder.get_abs_path(self._INPUT_FORCE_SETS)\n with open(force_sets_filename, \"w\") as infile:\n infile.write(force_sets_txt)\n else:\n raise InputValidationError(\n \"no force_sets nor force_constants are specified for this calculation\"\n )\n\n try:\n parameters_data_dynaphopy = Dict.pop(\n self.get_linkname(\"parameters_dynaphopy\")\n )\n except KeyError:\n raise InputValidationError(\n \"No dynaphopy parameters specified for this calculation\"\n )\n\n parameters_dynaphopy_txt = generate_dynaphopy_input(\n parameters_data_dynaphopy,\n poscar_name=self._POSCAR_NAME,\n force_constants_name=self._INPUT_FORCE_CONSTANTS,\n force_sets_filename=self._INPUT_FORCE_SETS,\n use_sets=force_sets is not None,\n )\n\n dynaphopy_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME_DYNA)\n with open(dynaphopy_filename, \"w\") as infile:\n infile.write(parameters_dynaphopy_txt)\n\n md_supercell = parameters_data_dynaphopy.dict.md_supercell\n\n time_step = self._parameters_data.dict.timestep\n equilibrium_time = self._parameters_data.dict.equilibrium_steps * time_step\n total_time = self._parameters_data.dict.total_steps * time_step\n\n self._cmdline_params = [\n self._INPUT_FILE_NAME_DYNA,\n \"--run_lammps\",\n self._INPUT_FILE_NAME,\n \"{}\".format(total_time),\n \"{}\".format(time_step),\n \"{}\".format(equilibrium_time),\n \"--dim\",\n \"{}\".format(md_supercell[0]),\n \"{}\".format(md_supercell[1]),\n \"{}\".format(md_supercell[2]),\n \"--silent\",\n \"-sfc\",\n self._OUTPUT_FORCE_CONSTANTS,\n \"-thm\", # '--resolution 0.01',\n \"-psm\",\n \"2\",\n \"--normalize_dos\",\n \"-sdata\",\n \"--velocity_only\",\n \"--temperature\",\n \"{}\".format(self._parameters_data.dict.temperature),\n ]\n\n if \"md_commensurate\" in parameters_data_dynaphopy.get_dict():\n if parameters_data_dynaphopy.dict.md_commensurate:\n self._cmdline_params.append(\"--MD_commensurate\")\n", "\"\"\"\ninitialise a text database and profile\n\"\"\"\nfrom collections import namedtuple\nimport io\nimport os\nimport shutil\nimport tempfile\n\nfrom aiida.plugins import DataFactory\nimport numpy as np\nimport pytest\n\nfrom aiida_lammps.tests.utils import TEST_DIR, AiidaTestApp\n\npytest_plugins = [\"aiida.manage.tests.pytest_fixtures\"]\n\n\ndef pytest_addoption(parser):\n \"\"\"Define pytest command-line.\"\"\"\n group = parser.getgroup(\"aiida_lammps\")\n\n group.addoption(\n \"--lammps-workdir\",\n dest=\"lammps_workdir\",\n default=None,\n help=(\n \"Specify a work directory path for aiida calcjob execution. \"\n \"If not specified, \"\n \"a temporary directory is used and deleted after tests execution.\"\n ),\n )\n group.addoption(\n \"--lammps-exec\",\n dest=\"lammps_exec\",\n default=None,\n help=(\"Specify a the lammps executable to run (default: lammps).\"),\n )\n\n\ndef get_work_directory(config):\n \"\"\"Return the aiida work directory to use.\"\"\"\n if config.getoption(\"lammps_workdir\") is not None:\n return config.getoption(\"lammps_workdir\")\n return None\n\n\ndef pytest_report_header(config):\n \"\"\"Add header information for pytest execution.\"\"\"\n return [\n \"LAMMPS Executable: {}\".format(\n shutil.which(config.getoption(\"lammps_exec\") or \"lammps\")\n ),\n \"LAMMPS Work Directory: {}\".format(\n config.getoption(\"lammps_workdir\") or \"<TEMP>\"\n ),\n ]\n\n\n@pytest.fixture(scope=\"function\")\ndef db_test_app(aiida_profile, pytestconfig):\n \"\"\"Clear the database after each test.\"\"\"\n exec_name = pytestconfig.getoption(\"lammps_exec\") or \"lammps\"\n executables = {\n \"lammps.md\": exec_name,\n \"lammps.md.multi\": exec_name,\n \"lammps.optimize\": exec_name,\n \"lammps.force\": exec_name,\n \"lammps.combinate\": exec_name,\n }\n\n test_workdir = get_work_directory(pytestconfig)\n if test_workdir:\n work_directory = test_workdir\n else:\n work_directory = tempfile.mkdtemp()\n\n yield AiidaTestApp(work_directory, executables, environment=aiida_profile)\n aiida_profile.reset_db()\n\n if not test_workdir:\n shutil.rmtree(work_directory)\n\n\n@pytest.fixture(scope=\"function\")\ndef get_structure_data():\n def _get_structure_data(pkey):\n \"\"\"return test structure data\"\"\"\n if pkey == \"Fe\":\n\n cell = [\n [2.848116, 0.000000, 0.000000],\n [0.000000, 2.848116, 0.000000],\n [0.000000, 0.000000, 2.848116],\n ]\n\n positions = [\n (0.0000000, 0.0000000, 0.0000000),\n (0.5000000, 0.5000000, 0.5000000),\n ]\n fractional = True\n\n symbols = [\"Fe\", \"Fe\"]\n names = [\"Fe1\", \"Fe2\"]\n\n elif pkey == \"Ar\":\n\n cell = [\n [3.987594, 0.000000, 0.000000],\n [-1.993797, 3.453358, 0.000000],\n [0.000000, 0.000000, 6.538394],\n ]\n\n symbols = names = [\"Ar\"] * 2\n\n positions = [(0.33333, 0.66666, 0.25000), (0.66667, 0.33333, 0.75000)]\n fractional = True\n\n elif pkey == \"GaN\":\n\n cell = [\n [3.1900000572, 0, 0],\n [-1.5950000286, 2.762621076, 0],\n [0.0, 0, 5.1890001297],\n ]\n\n positions = [\n (0.6666669, 0.3333334, 0.0000000),\n (0.3333331, 0.6666663, 0.5000000),\n (0.6666669, 0.3333334, 0.3750000),\n (0.3333331, 0.6666663, 0.8750000),\n ]\n fractional = True\n\n symbols = names = [\"Ga\", \"Ga\", \"N\", \"N\"]\n\n elif pkey == \"pyrite\":\n\n cell = [\n [5.38, 0.000000, 0.000000],\n [0.000000, 5.38, 0.000000],\n [0.000000, 0.000000, 5.38],\n ]\n\n positions = [\n [0.0, 0.0, 0.0],\n [0.5, 0.0, 0.5],\n [0.0, 0.5, 0.5],\n [0.5, 0.5, 0.0],\n [0.338, 0.338, 0.338],\n [0.662, 0.662, 0.662],\n [0.162, 0.662, 0.838],\n [0.838, 0.338, 0.162],\n [0.662, 0.838, 0.162],\n [0.338, 0.162, 0.838],\n [0.838, 0.162, 0.662],\n [0.162, 0.838, 0.338],\n ]\n fractional = True\n\n symbols = names = [\"Fe\"] * 4 + [\"S\"] * 8\n\n elif pkey == \"fes_cubic-zincblende\":\n cell = [[2.71, -2.71, 0.0], [2.71, 0.0, 2.71], [0.0, -2.71, 2.71]]\n symbols = names = [\"Fe\", \"S\"]\n positions = [[0, 0, 0], [4.065, -4.065, 4.065]]\n fractional = False\n elif pkey == \"greigite\":\n cell = [[0.0, 4.938, 4.938], [4.938, 0.0, 4.938], [4.938, 4.938, 0.0]]\n positions = [\n (1.2345, 1.2345, 1.2345),\n (8.6415, 8.6415, 8.6415),\n (4.938, 4.938, 4.938),\n (2.469, 4.938, 2.469),\n (4.938, 2.469, 2.469),\n (2.469, 2.469, 4.938),\n (2.473938, 2.473938, 2.473938),\n (4.942938, 7.402062, 4.942938),\n (4.933062, 2.473938, 4.933062),\n (2.473938, 4.933062, 4.933062),\n (7.402062, 4.942938, 4.942938),\n (7.402062, 7.402062, 7.402062),\n (4.942938, 4.942938, 7.402062),\n (4.933062, 4.933062, 2.473938),\n ]\n fractional = False\n symbols = names = [\n \"Fe\",\n \"Fe\",\n \"Fe\",\n \"Fe\",\n \"Fe\",\n \"Fe\",\n \"S\",\n \"S\",\n \"S\",\n \"S\",\n \"S\",\n \"S\",\n \"S\",\n \"S\",\n ]\n\n else:\n raise ValueError(\"Unknown structure key: {}\".format(pkey))\n\n # create structure\n structure = DataFactory(\"structure\")(cell=cell)\n for position, symbol, name in zip(positions, symbols, names):\n if fractional:\n position = np.dot(position, cell).tolist()\n structure.append_atom(position=position, symbols=symbol, name=name)\n\n return structure\n\n return _get_structure_data\n\n\npotential_data = namedtuple(\n \"PotentialTestData\", [\"type\", \"data\", \"structure\", \"output\"]\n)\n\n\n@pytest.fixture(scope=\"function\")\ndef get_potential_data(get_structure_data):\n def _get_potential_data(pkey):\n \"\"\"return data to create a potential,\n and accompanying structure data and expected output data to test it with\n \"\"\"\n if pkey == \"eam\":\n pair_style = \"eam\"\n with io.open(\n os.path.join(TEST_DIR, \"input_files\", \"Fe_mm.eam.fs\")\n ) as handle:\n potential_dict = {\n \"type\": \"fs\",\n \"file_contents\": handle.readlines(),\n \"element_names\": [\"Fe\"],\n }\n structure = get_structure_data(\"Fe\")\n output_dict = {\"initial_energy\": -8.2441284, \"energy\": -8.2448702}\n\n elif pkey == \"lennard-jones\":\n\n structure = get_structure_data(\"Ar\")\n\n # Example LJ parameters for Argon. These may not be accurate at all\n pair_style = \"lennard_jones\"\n potential_dict = {\n \"1 1\": \"0.01029 3.4 3.5\",\n # '2 2': '1.0 1.0 2.5',\n # '1 2': '1.0 1.0 2.5'\n }\n\n output_dict = {\n \"initial_energy\": 0.0,\n \"energy\": 0.0, # TODO should LJ energy be 0?\n }\n\n elif pkey == \"tersoff\":\n\n structure = get_structure_data(\"GaN\")\n\n potential_dict = {\n \"Ga Ga Ga\": \"1.0 0.007874 1.846 1.918000 0.75000 -0.301300 1.0 1.0 1.44970 410.132 2.87 0.15 1.60916 535.199\",\n \"N N N\": \"1.0 0.766120 0.000 0.178493 0.20172 -0.045238 1.0 1.0 2.38426 423.769 2.20 0.20 3.55779 1044.77\",\n \"Ga Ga N\": \"1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 0.0 0.00000 0.00000 2.90 0.20 0.00000 0.00000\",\n \"Ga N N\": \"1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 1.0 2.63906 3864.27 2.90 0.20 2.93516 6136.44\",\n \"N Ga Ga\": \"1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 1.0 2.63906 3864.27 2.90 0.20 2.93516 6136.44\",\n \"N Ga N \": \"1.0 0.766120 0.000 0.178493 0.20172 -0.045238 1.0 0.0 0.00000 0.00000 2.20 0.20 0.00000 0.00000\",\n \"N N Ga\": \"1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 0.0 0.00000 0.00000 2.90 0.20 0.00000 0.00000\",\n \"Ga N Ga\": \"1.0 0.007874 1.846 1.918000 0.75000 -0.301300 1.0 0.0 0.00000 0.00000 2.87 0.15 0.00000 0.00000\",\n }\n\n pair_style = \"tersoff\"\n\n output_dict = {\"initial_energy\": -18.109886, \"energy\": -18.110852}\n\n elif pkey == \"reaxff\":\n\n from aiida_lammps.common.reaxff_convert import (\n filter_by_species,\n read_lammps_format,\n )\n\n pair_style = \"reaxff\"\n with io.open(\n os.path.join(TEST_DIR, \"input_files\", \"FeCrOSCH.reaxff\")\n ) as handle:\n potential_dict = read_lammps_format(\n handle.read().splitlines(), tolerances={\"hbonddist\": 7.0}\n )\n potential_dict = filter_by_species(\n potential_dict, [\"Fe core\", \"S core\"]\n )\n for n in [\"anglemin\", \"angleprod\", \"hbondmin\", \"torsionprod\"]:\n potential_dict[\"global\"].pop(n)\n potential_dict[\"control\"] = {\"safezone\": 1.6}\n # potential_dict = {\n # \"file_contents\": handle.readlines(),\n # \"control\": {\"safezone\": 1.6},\n # \"global\": {\"hbonddist\": 7.0},\n # }\n\n structure = get_structure_data(\"pyrite\")\n\n output_dict = {\n \"initial_energy\": -1027.9739,\n \"energy\": -1030.3543,\n \"units\": \"real\",\n }\n\n else:\n raise ValueError(\"Unknown potential key: {}\".format(pkey))\n\n return potential_data(pair_style, potential_dict, structure, output_dict)\n\n return _get_potential_data\n" ]
[ [ "numpy.array", "numpy.random.randint" ], [ "numpy.dot" ] ]
anajikadam17/nlp-dl-prework
[ "cc19eb08d08843a0c64a77032edd3c46c91d9629" ]
[ "PageRank/code.py" ]
[ "# --------------\n# Code starts here\n\nimport numpy as np\n\n# Code starts here\n\n# Adjacency matrix\nadj_mat = np.array([[0,0,0,0,0,0,1/3,0],\n [1/2,0,1/2,1/3,0,0,0,0],\n [1/2,0,0,0,0,0,0,0],\n [0,1,0,0,0,0,0,0],\n [0,0,1/2,1/3,0,0,1/3,0],\n [0,0,0,1/3,1/3,0,0,1/2],\n [0,0,0,0,1/3,0,0,1/2],\n [0,0,0,0,1/3,1,1/3,0]])\n\n# Compute eigenvalues and eigencevectrs\neigenvalues,eigenvectors=np.linalg.eig(adj_mat)\nabs(eigenvectors[:,0])\neigen_1=abs(eigenvectors[:,0])/np.linalg.norm(eigenvectors[:,0],1)\npage=np.where(np.max(eigen_1)== eigen_1)[0][0]+1\nprint(page)\n# Eigen vector corresponding to 1\n\n\n# most important page\n\n\n# Code ends here\n\n\n# --------------\n\n# Initialize stationary vector I\ninit_I = np.array([1,0,0,0,0,0,0,0])\n\n# Perform iterations for power method\nfor _ in range(10):\n init_I = np.dot(adj_mat, init_I)\n init_I /= np.linalg.norm(init_I, 1)\n\n\nprint(init_I)\npower_page = np.where(np.max(init_I) == init_I)[0][0] + 1\nprint(power_page)\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# New Adjancency matrix\n# New Adjancency matrix\nnew_adj_mat = np.array([[0,0,0,0,0,0,0,0],\n [1/2,0,1/2,1/3,0,0,0,0],\n [1/2,0,0,0,0,0,0,0],\n [0,1,0,0,0,0,0,0],\n [0,0,1/2,1/3,0,0,1/2,0],\n [0,0,0,1/3,1/3,0,0,1/2],\n [0,0,0,0,1/3,0,0,1/2],\n [0,0,0,0,1/3,1,1/2,0]])\n\n# Initialize stationary vector I\nnew_init_I= np.array([1,0,0,0,0,0,0,0])\n\n# Perform iterations for power method\nfor _ in range(10):\n new_init_I = np.dot(new_adj_mat, new_init_I)\n new_init_I /= np.linalg.norm(new_init_I, 1)\n\nprint(new_init_I)\n \n\n\n\n\n# Code ends here\n\n\n# --------------\n# Alpha value\nalpha = 0.85\n\n# Code starts here\n\n# Modified adjancency matrix\nG=alpha + (np.ones(new_adj_mat.shape)-alpha)*1/len(new_adj_mat)*np.ones(new_adj_mat.shape)\n\n# Initialize stationary vector I\nfinal_init_I=np.array([1,0,0,0,0,0,0,0])\n\n# Perform iterations for power method\nfor _ in range(1000):\n final_init_I = np.dot(new_adj_mat, final_init_I)\n final_init_I /= np.linalg.norm(final_init_I, 1)\n\nprint(final_init_I)\n\n# Code ends here\n\n\n" ]
[ [ "numpy.dot", "numpy.linalg.eig", "numpy.linalg.norm", "numpy.ones", "numpy.max", "numpy.array" ] ]
solve-fx/fxdatalamda
[ "73fec613c78545cfcef7072eb4066085904cc1c8" ]
[ "lambda_ETL.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom datetime import date, timedelta\n\ndef price_resampler(data, timeframe, timeframe_label):\n\n resampled_data = data.resample(timeframe).agg({'Open': 'first', \n 'High': 'max', \n 'Low': 'min', \n 'Close': 'last'}).dropna() \n\n resampled_data_closed = resampled_data[:-1]\n\n resampled_data_closed['timeframe'] = timeframe_label\n\n return resampled_data_closed\n\n\ndef lambda_handler(event, context): \n current_year = date.today().year\n current_month = date.today().month\n current_day = date.today().day\n\n extract_start_day = str(max(current_day - 6,1)).zfill(2)\n extract_start = str(current_year)+\"-\"+str(current_month)+\"-\"+extract_start_day\n extract_end = date.today() + timedelta(days=10)\n\n years = [current_year - 1, current_year]\n months = range(1,13)\n\n filenames = []\n for year in years:\n for month in months:\n if (year < current_year) | (month <= current_month):\n filenames.append(str(year)+\"_\"+str(month).zfill(2)+\".csv\")\n\n filenames = filenames[-6:]\n\n\n for i in range(len(filenames)):\n file_loc = 's3://bucket-fxdata/gbpjpy/fiveminute/'+filenames[i]\n df = pd.read_csv(file_loc)\n if i == 0:\n data_5m = df.copy()\n else:\n data_5m = data_5m.append(df)\n\n data_5m['datetime'] = pd.to_datetime(data_5m.datetime, format = '%Y/%m/%d %H:%M:%S')\n data_5m.index = data_5m['datetime']\n data_5m = data_5m.drop(columns=['datetime'])\n data_5m['timeframe'] = '5m'\n\n data_15m = price_resampler(data_5m, '15T', '15m')\n data_30m = price_resampler(data_5m, '30T', '30m')\n data_1h = price_resampler(data_5m, '1H', '1h')\n data_5m_offset = data_5m.copy()\n data_5m_offset.index = data_5m_offset.index + pd.DateOffset(hours=2)\n data_4h = price_resampler(data_5m_offset, '4H', '4h')\n data_4h.index = data_4h.index - pd.DateOffset(hours=2)\n\n data = data_5m.append(data_15m).append(data_30m).append(data_1h).append(data_4h)\n\n data['hour'] = data.index.hour\n data['hour'] = data['hour'].astype(str).str.zfill(2)\n\n data['minute'] = data.index.minute\n data['minute'] = data['minute'].astype(str).str.zfill(2)\n\n data['time'] = data['hour']+\" \"+data['minute']\n\n data['candle_size_h'] = abs(data['Open'] - data['High'])\n data['candle_size_l'] = abs(data['Open'] - data['Low'])\n data['candle_size'] = data[['candle_size_h', 'candle_size_l']].max(axis=1) * 100\n\n data['days_since'] = (pd.to_datetime(date.today()) - data.index).days\n data['days_since'] = data['days_since'] - data['days_since'].min()\n\n data['bullish'] = np.where(data['Close'] > data['Open'], 1, 0)\n\n data_last100 = data[data['days_since'] <= 100]\n\n data_last100['date'] = data_last100.index\n\n data_last100 = data_last100[['date','timeframe','hour','minute','time','candle_size','bullish','days_since']]\n data_last100.to_csv('s3://bucket-fxdata/gbpjpy_last100.csv', index=False)\n\n return \"done!\"" ]
[ [ "pandas.to_datetime", "numpy.where", "pandas.read_csv", "pandas.DateOffset" ] ]
imwillhang/multimodal-healthcare
[ "4959fa1a8c99e23334c926b73202c944f1eda457" ]
[ "util/BatcherMassager.py" ]
[ "import dicom\nimport numpy as np\nfrom PIL import Image\nimport csv\nimport os\nfrom scipy.misc import imresize, imsave\nimport matplotlib.pyplot as plt\n\npathology_dict = {'MALIGNANT': 1, 'BENIGN': 0, 'BENIGN_WITHOUT_CALLBACK': 0}\nclass Batcher:\n def __init__(self, batch_sz, metadata, indices, mass_headers, calc_headers, root, attr2onehot, mean=0, std=0, new_batch=False):\n '''\n This batcher takes in rows of metadata formatted\n specifically for DDSM images with the image directory\n structure downloaded from TCIA\n '''\n self.batch_sz = batch_sz\n self.metadata = metadata\n self.mass_headers = mass_headers\n self.calc_headers = calc_headers\n self.indices = indices\n self.root = root\n self.attr2onehot = attr2onehot\n self.mean = mean\n self.new_batch = new_batch\n self.std = std\n \n def visualize(self, img):\n '''\n debug tool: visualize the grey scale image for sanity check.\n '''\n plt.imshow(img, cmap='gray')\n plt.show()\n\n def to_uint8(self, img):\n '''\n converts an image from uint16 to uint8 format.\n uses lookup table to reduce memory usage\n also more efficient\n '''\n lut = np.arange(2**16, dtype='uint16')\n lut -= np.amin(img)\n lut //= int((np.amax(img) - np.amin(img) + 1) / 256)\n return np.take(lut, img)\n\n def resize(self, img, H = 299, W = 299):\n '''\n resize all images to 4000 x 3000 to be batchable\n '''\n return imresize(img, (H, W))\n\n def get_train_stats(self):\n '''\n first pass of images to get mean pixel value for preprocessing\n '''\n it = self.get_iterator(mean_process=True) \n mean = 0.0\n counter = 0\n stds = []\n for imgs, labels, _, paths in it:\n for im, image_path in zip(imgs, paths):\n counter += 1\n im = self.to_uint8(im)\n im = self.resize(im)\n im = im.astype(np.float64)\n print('saving {} inside get_train_mean'.format(image_path))\n np.save(image_path, im)\n #im /= 255\n H, W = im.shape[0], im.shape[1]\n # incremental mean update for numerical stability\n mean += (np.sum(im) - mean * H * W) / (counter * H * W) \n stds += list(np.ravel(im))\n std = np.std(stds)\n return mean, std\n\n def preprocess(self, img, unseen=False):\n '''\n Preprocessing step:\n convert to 8 bit, resize to H X W, reduce by mean\n '''\n if unseen:\n img = self.to_uint8(img)\n img = self.resize(img)\n img = img.astype(np.float64)\n #if self.mean != 0: \n # img /= 255\n # img -= self.mean \n #if self.std != 0: \n # img /= self.std\n return img\n\n def get_image_from_path(self, path):\n path += '/' + next(os.walk(os.path.expanduser(path)))[1][0]\n path += '/' + next(os.walk(os.path.expanduser(path)))[1][0]\n path += '/' + next(os.walk(os.path.expanduser(path)))[2][0]\n # 4. read image from DICOM format into 16bit pixel value\n DCM_img = dicom.read_file(path)\n img = np.asarray(DCM_img.pixel_array)\n return img\n\n def generate_attribute(self, row, is_mass):\n generic_field = ['breast_density', 'abn_num','assessment', 'subtlety']\n mass_fields = ['mass_shape', 'mass_margins']\n calc_fields = ['calc_type', 'calc_distribution']\n attribute = [] #[0] * 4\n\n # mass: shape 10, margin 7\n # calc: type 15, distrib 6\n # feat1: 17 feat2: 21\n if is_mass:\n for field in generic_field:\n attribute.append(self.attr2onehot['mass'][field][row[self.mass_headers[field]]])\n for field, pad in zip(mass_fields, [46, 10]):\n mass_feature = [0] * len(self.attr2onehot['mass'][field])\n parts = row[self.mass_headers[field]].split('-')\n for part in parts:\n mass_feature[self.attr2onehot['mass'][field][part] - 1] = 1\n #mass_feature[self.attr2onehot['mass'][field][row[self.mass_headers[field]]] - 1] = 1\n attribute += mass_feature\n attribute += [0] * pad\n else:\n for field in generic_field:\n attribute.append(self.attr2onehot['calc'][field][row[self.calc_headers[field]]])\n for field, pad in zip(calc_fields, [20, 17]):\n attribute += [0] * pad\n calc_feature = [0] * len(self.attr2onehot['calc'][field])\n parts = row[self.calc_headers[field]].split('-')\n for part in parts:\n calc_feature[self.attr2onehot['calc'][field][part] - 1] = 1\n #calc_feature[self.attr2onehot['calc'][field][row[self.calc_headers[field]]] - 1] = 1\n attribute += calc_feature\n\n return attribute\n \n def get_iterator(self, mean_process=False):\n '''\n Data iterator. get_iterator returns all batches\n in the form of (X, y) tuples\n '''\n X = []\n y = []\n attributes = []\n new_image_flag = False\n paths = []\n counter = 0\n already_seen = set()\n for i in range(len(self.indices)):\n \n row = self.metadata[self.indices[i]]\n path = self.root + '/'\n # 1. figure out if this image is a mass or a calc\n if 'Mass' in row[self.mass_headers['od_img_path']]:\n path += 'Mass-Training'\n else:\n path += 'Calc-Training'\n # 2. build the image path\n path += '_' + row[self.mass_headers['patient_id']] \\\n + '_' + row[self.mass_headers['side']] + '_' \\\n + row[self.mass_headers['view']]\n \n if not os.path.exists(path):# or path in already_seen:\n continue\n\n already_seen.add(path)\n # 3. wade through two layers of useless directories\n down_a_level = next(os.walk(os.path.expanduser(path)))\n image_name = 'image_{}'.format(row[self.mass_headers['patient_id']])\n image_path = path + '/' + image_name\n # if we're trying to just get images for mean calculations\n if mean_process:\n img = self.get_image_from_path(path)\n elif self.new_batch:\n # this means we're relying on mean-processed images to do further processing on\n try:\n img = np.load(image_path + '.npy')\n except:\n raise Exception('Most likely a file read error, or that somehow we tried to read a file we haven\\'t preprocessed before')\n # no unseen flag because we've already seen these images!\n img = self.preprocess(img)\n print('saving {} inside get_iterator'.format(image_path))\n np.save(image_path, img)\n else:\n # we are assuming that all the images have been processed before\n try:\n # print('opening completely preprocessed {} inside get_iterator'.format(image_path))\n img = np.load(image_path + '.npy')\n except:\n img = self.get_image_from_path(path)\n img = self.preprocess(img, unseen=True)\n print('saving {} inside get_iterator'.format(image_path))\n np.save(image_path, img)\n # 6. add the image to the batch \n X.append(img)\n #imsave(\"out/{}.png\".format(row[0]), img)\n # 7. do some mojo with the label and append to y\n # probably gonna be one hot or something\n label = pathology_dict[row[self.mass_headers['pathology']]]\n y.append(label)\n\n # 8. Get those attributes\n if 'Mass' in row[self.mass_headers['od_img_path']]: \n attributes.append(self.generate_attribute(row, True))\n else:\n attributes.append(self.generate_attribute(row, False))\n # only do this if we are trying to make a new batch, and if we are catering to get_train_mean()\n if mean_process:\n paths.append(image_path)\n # 8. check if our batch is ready\n counter += 1\n if counter >= self.batch_sz: \n if mean_process:\n yield (np.asarray(X), np.asarray(y), np.asarray(attributes), paths)\n else:\n yield (np.asarray(X), np.asarray(y), np.asarray(attributes))\n X = []\n y = []\n paths = []\n attributes = []\n counter = 0\n if not X or not y:\n return\n\n if mean_process:\n yield (np.asarray(X), np.asarray(y), np.asarray(attributes), paths)\n else:\n X_out = np.asarray(X)\n y_out = np.asarray(y)\n attrib_out = np.asarray(attributes)\n yield (X_out, y_out, attrib_out)\n" ]
[ [ "matplotlib.pyplot.imshow", "scipy.misc.imresize", "numpy.amax", "numpy.take", "numpy.amin", "numpy.arange", "numpy.asarray", "numpy.load", "numpy.save", "numpy.std", "numpy.ravel", "matplotlib.pyplot.show", "numpy.sum" ] ]
ProjectCaelus/DesignLiquidEngine
[ "a7e4a4f0146bbf0f056efc92a931a08ffff4f3a5" ]
[ "helpers/PropSim2/PropSimPython/helpers/n2o.py" ]
[ "# Nitrous-related helper methods for PropSimPython\n# Project Caelus, Aphlex 1C Engine\n# Liam West, Anya Mischel, & Jason Chen, 10 February, 2021\n\n\nimport numpy as np\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nfrom classes import Struct\n\n\ndef n2o_properties(temp: int or float) -> Struct:\n \"\"\"\n Calculates an array of properties of nitrous oxide given a temperature in K.\n WARNING: if temperature input is outside of -90 to 30 C, properties will\n be generated for boundary (-90 C or 30 C).\n \"\"\"\n properties = dict() # Creates properties (output array) as a dictionary for which data from text file can be entered\n properties[\"Pvap\"] = None # Nitrous vapor pressure\n properties[\"rho_l\"] = None # Liquid density of nitrous\n properties[\"rho_g\"] = None # Gas denisty of nitrous\n properties[\"deltaH_vap\"] = None # Vaportization enthalpy\n properties[\"cp_l\"] = None # Specific heat of liquid nitrous\n properties[\"cv_l\"] = None # Specific volume of liquid nitrous\n properties[\"cp_g\"] = None # Specific heat of gaseous nitrous\n properties[\"cv_g\"] = None # Specific volume of gaseous nitrous \n properties[\"h_l\"] = None # Enthalpy of liquid nitrous\n properties[\"h_g\"] = None # Enthalpy of gaseous nitrous\n properties[\"s_l\"] = None # Specific entropy of liquid nitrous\n properties[\"s_g\"] = None # Specific entropy of gaseous nitrous\n properties[\"mu_l\"] = None # Dynamic viscosity of nitrous liquid\n properties[\"mu_g\"] = None # Dynamic viscosity of nitrous gas\n properties[\"e_l\"] = None # Specific internal energy of liquid\n properties[\"e_g\"] = None # Specific internal energy of gas\n \n R_u = 8.3144621 # Universal gas constant [J/mol*K]\n M_n2o = 0.044013 # Molecular mass of nitrous oxide [kg/mol]\n R_n2o_0 = R_u/M_n2o # Specific gas constant of nitrous oxide [J/kg*K]\n\n # Range-check temperature\n if temp < (-90 + 273.15): # If temperature is less that bottom bound\n temp = -90 + 273.150001 # Temperature is bottom bound for interpolate\n elif temp > (30 + 273.150001): # If temperature greater than top bound, \n temp = 30 + 273.150001 # Temperature equal to top bound for interpolate\n\n Tcrit = 309.57 # K\n Pcrit = 7251 # kPa\n rhocrit = 452 # kg/m^3\n # Possibly add critical compressibility factor \"Z\"\n Tr = temp/Tcrit\n\n # Calculate vapor pressure, valid -90 to 36C\n b1 = -6.71893\n b2 = 1.3596\n b3 = -1.3779\n b4 = -4.051\n\n properties[\"Pvap\"] = np.exp((1/Tr)*(b1*(1-Tr) + b2*(1-Tr)**(3/2) + b3*(1-Tr)**(5/2) + b4*(1-Tr)**5))*Pcrit\n properties[\"Pvap\"] = properties[\"Pvap\"]*1000\n\n # Calculate Density of Liquid, valid -90C to 36C\n b1 = 1.72328\n b2 = -0.83950\n b3 = 0.51060\n b4 = -0.10412\n\n properties[\"rho_l\"] = np.exp(b1*(1-Tr)**(1/3) + b2*(1-Tr)**(2/3) + b3*(1-Tr) + b4*(1-Tr)**(4/3))*rhocrit\n\n # Calculate Density of Gas, valid -90C to 36C\n b1 = -1.00900\n b2 = -6.28792\n b3 = 7.50332\n b4 = -7.90463\n b5 = 0.629427\n Trinv = 1./Tr\n\n properties[\"rho_g\"] = np.exp(b1*(Trinv-1)**(1/3) + b2*(Trinv-1)**(2/3) + b3*(Trinv-1) + b4*(Trinv-1)**(4/3) + b5*(Trinv-1)**(5/3))*rhocrit\n\n # Calculate dynamic viscosity of saturated liquid, valid from -90C to 30C\n b1 = 1.6089\n b2 = 2.0439\n b3 = 5.24\n b4 = 0.0293423\n theta = (Tcrit-b3)/(temp-b3)\n\n properties[\"mu_l\"] = b4*np.exp(b1*(theta-1)**(1/3) + b2*(theta-1)**(4/3))\n\n # Calculate dynamic viscosity of saturated vapor, valid from -90C to 30C\n b1 = 3.3281\n b2 = -1.18237\n b3 = -0.055155\n Trinv = 1./Tr\n\n properties[\"mu_g\"] = np.exp(b1 + b2*(Trinv-1)**(1/3) + b3*(Trinv-1)**(4/3))\n NIST_data = dict() # NIST_data is an array that stores variables regarding nitrous\n\n # Read each line in the N2O_Properties.cgi.txt document and enter each line into the dictionary, separated by tabs\n with open(\"./data/N2O_Properties.cgi.txt\", \"r\") as reader:\n for x, line in enumerate(reader):\n if x ==0:\n temp_list = line.split(\"\\t\")\n arr = [list() for x in range(len(temp_list))]\n continue\n temp_list = line.split(\"\\t\") \n for i, val in enumerate(temp_list):\n arr[i].append(val)\n NIST_data[\"T\"] = arr[0]\n NIST_data[\"h_liq\"] = arr[5]\n NIST_data[\"h_gas\"] = arr[17]\n NIST_data[\"e_liq\"] = arr[4]\n NIST_data[\"e_gas\"] = arr[16]\n NIST_data[\"cv_l\"] = arr[7]\n NIST_data[\"cv_g\"] = arr[19]\n NIST_data[\"s_l\"] = arr[6]\n NIST_data[\"s_g\"] = arr[18]\n NIST_data[\"cp_liq\"]= arr[8]\n NIST_data[\"cp_gas\"]= arr[20]\n NIST_data = {key: np.array(NIST_data[key], dtype=\"float64\") for key in NIST_data}\n\n # Gas Specific Enthalpy\n properties[\"h_l\"] = np.interp(temp, NIST_data[\"T\"], NIST_data[\"h_liq\"])*1000 # J/kg, \n properties[\"h_g\"] = np.interp(temp, NIST_data[\"T\"], NIST_data[\"h_gas\"])*1000 # J/kg \n properties[\"e_l\"] = np.interp(temp, NIST_data[\"T\"], NIST_data[\"e_liq\"])*1000 # J/kg \n properties[\"e_g\"] = np.interp(temp, NIST_data[\"T\"], NIST_data[\"e_gas\"])*1000 # J/kg \n properties[\"deltaH_vap\"] = properties[\"h_g\"]-properties[\"h_l\"]\n properties[\"deltaE_vap\"] = properties[\"e_g\"]-properties[\"e_l\"]\n\n # Specific Heat at Constant Volume\n properties[\"cv_l\"] = np.interp(temp, NIST_data[\"T\"], NIST_data[\"cv_l\"])*1000\n properties[\"cv_g\"] = np.interp(temp, NIST_data[\"T\"], NIST_data[\"cv_g\"])*1000\n\n # Specific Heat at Constant Pressure\n properties[\"cp_l\"] = np.interp(temp, NIST_data[\"T\"], NIST_data[\"cp_liq\"])*1000\n properties[\"cp_g\"] = np.interp(temp, NIST_data[\"T\"], NIST_data[\"cp_gas\"])*1000\n\n # Specific Entropy\n properties[\"s_l\"] = np.interp(temp, NIST_data[\"T\"], NIST_data[\"s_l\"])*1000\n properties[\"s_g\"] = np.interp(temp, NIST_data[\"T\"], NIST_data[\"s_g\"])*1000\n\n # Convert Properties to Standard Units\n properties[\"mu_l\"] = properties[\"mu_l\"]*10**-3 # mN*s/(m^2) -> N*s/m^2\n properties[\"mu_g\"] = properties[\"mu_g\"]*10**-6 # uN*s/(m^ 2)-> N*s/m^2\n\n props = Struct(properties) # Converts the output properties into standardized Struct type\n return props\n\n\ndef get_n2o_Pvap(temp: int or float):\n \"\"\"A faster method to get the nitrous vapor pressure based on input temperature.\"\"\"\n\n # Range-check temperature\n if temp < (-90 + 273.15): # If temperature is less that bottom bound\n temp = -90 + 273.150001 # Temperature is bottom bound for interpolate\n elif temp > (30 + 273.150001): # If temperature greater than top bound, \n temp = 30 + 273.150001 # Temperature equal to top bound for interpolate\n\n Tcrit = 309.57 # K\n Pcrit = 7251 # kPa\n rhocrit = 452 # kg/m^3\n Tr = temp/Tcrit\n\n # Calculate vapor pressure, valid -90 to 36C\n b1 = -6.71893\n b2 = 1.3596\n b3 = -1.3779\n b4 = -4.051\n\n Pvap = 1000*(np.exp((1/Tr)*(b1*(1-Tr) + b2*(1-Tr)**(3/2) + b3*(1-Tr)**(5/2) + b4*(1-Tr)**5))*Pcrit)\n return Pvap\n\n\ndef n2o_find_T(p_vap: int or float) -> float:\n \"\"\" Estimate temperature that gives the specified vapor pressure. \"\"\"\n T_min = -90 + 273.15\n T_max = 30 + 273.15\n num_pts = 1000\n T_i = np.linspace(T_min, T_max, num_pts)\n Pvap = np.zeros(T_i.shape)\n\n for x in range(num_pts):\n Pvap[x] = get_n2o_Pvap(T_i[x])\n\n z = InterpolatedUnivariateSpline(Pvap, T_i, k=1) # Best-fit line, Pvap along x, temp along y\n return z(p_vap)\n\n\ndef two_phase_n2o_flow(t_1: int or float): \n \"\"\"\n Calculates the critical mass flow rate using the homogeneous equilibrium \n model. The mass flow rate may not exceed this, regardless of the back pressure.\n\n Inputs:\n T_1: upstream liquid temperature, K\n\n Outputs:\n crit_flow:\n crit_flow.p_up_norm: vector of (upstream pressure / upstream vapor \n pressure) for G_crit calculations\n crit_flow.G_crit: critical oxidizer mass flux at each p_1, kg/m^2*s\n crit_flow.p_down_norm_crit: critical normalized back pressure \n (downstream pressure / upstream pressure) at each p_1, Pa\n mass_flux:\n mass_flux.p_up_norm: array of (upstream pressure / upstream vapor \n pressure)\n mass_flux.p_down_norm: array of (back pressure / upstream pressure)\n mass_flux.G: array of mass flux at corresponding P_1_norm, P_2_norm\n\n NOTE: States are denoted by subscript as follows:\n 1: upstream (stagnation)\n 2: downstream\n \"\"\"\n\n # Numerical options\n n = 200 # Density of calculation points along each dimension\n\n # Saturation properties at upstream temperature\n n2o_prop_1_sat = n2o_properties(t_1)\n\n # Create array of p_1, p_2\n p_1_min = 1.0\n p_1_max = 3.0\n p_1 = np.linspace(p_1_min, p_1_max, n)\n p_2 = np.linspace(0, 1, n)\n\n P_1_norm, P_2_norm = np.meshgrid(p_1, p_2)\n\n # Convert to pressures (from pressure ratio)\n P_1 = np.multiply(P_1_norm, n2o_prop_1_sat.Pvap)\n P_2 = np.multiply(P_2_norm, P_1)\n \n # Upstream liquid enthalpy\n # Calculate enthalpy as extension from saturated state\n enthalpy_1 = n2o_prop_1_sat.h_l + (P_1 - n2o_prop_1_sat.Pvap)/n2o_prop_1_sat.rho_l\n\n # Calculate downstream temperature\n T_2 = n2o_find_T(P_2)\n # n2o_prop_2 = # [n2o_properties(j) for j in i for i in T_2]\n a = T_2.shape[0] \n b = T_2.shape[1]\n n2o_prop_2 = [[0 for b in range(b)] for i in range(a)]\n\n for x in range(a):\n for y in range(b):\n n2o_prop_2[x][y] = n2o_properties(T_2[x, y])\n\n # Calculate gas density\n rho_2_g = np.array([[j.rho_g for j in i] for i in n2o_prop_2])\n rho_2_l = n2o_prop_1_sat.rho_l * np.ones(np.array(P_2).shape)\n\n enthalpy_2_g = np.array([[j.h_g for j in i] for i in n2o_prop_2])\n enthalpy_2_l = np.array([[j.h_l for j in i] for i in n2o_prop_2])\n\n entropy_1_l = n2o_prop_1_sat.s_l * np.ones(np.array(P_2).shape)\n entropy_2_l = np.array([[j.s_l for j in i] for i in n2o_prop_2])\n entropy_2_g = np.array([[j.s_g for j in i] for i in n2o_prop_2])\n\n # entropy difference: liquid downstream - liquid upstream\n entropy_ld_lu = entropy_2_l - entropy_1_l\n # entropy difference: liquid downstream - gas downstream\n entropy_ld_gd = entropy_2_l - entropy_2_g\n\n # calculate mass fraction of gas to conserve entropy\n # massfrac: mass fraction of vapor\n massfrac = np.divide(entropy_ld_lu, entropy_ld_gd)\n\n a = massfrac.shape[0] \n b = massfrac.shape[1]\n for x in range(a):\n for y in range(b):\n if massfrac[x, y] < 0:\n rho_2_l = n2o_prop_1_sat.rho_l\n enthalpy_2_l = n2o_prop_1_sat.h_l + P_2 - n2o_prop_1_sat.Pvap / n2o_prop_1_sat.rho_l\n massfrac[x, y] = 0\n\n # downstream inverse density\n rho_2_inv = np.multiply(massfrac, (1./rho_2_g) + np.multiply((1-massfrac), (1./rho_2_l)))\n\n # downstream equivalent density\n rho_2_equiv = np.ones(rho_2_inv.shape) / rho_2_inv\n\n enthalpy_2 = massfrac * enthalpy_2_g + (np.ones(massfrac.shape) - massfrac) * enthalpy_2_l\n\n # Homogeneous Equilibrium Model\n G = rho_2_equiv * np.sqrt(2 * (enthalpy_1 - enthalpy_2))\n\n # G_crit for each \n G_crit = np.amax(G, axis=0) \n i_crit = np.argmax(G, axis=0)\n\n P_2_crit = np.zeros((1, P_2.shape[1]))\n for ii in range(0, P_2.shape[1]):\n P_2_crit[ii] = P_2[i_crit[ii], ii]\n\n # Create downstream pressure vs. oxidizer mass flux profile\n G_out = np.matlib.repmat(G_crit, [P_2.shape[0], 1])\n # P_2_crit expanded to size of P_2\n P_2_crit_exp = np.matlib.repmat(P_2_crit, [P_2.shape[0], 1])\n G_out = G if P_2 > P_2_crit_exp else None\n\n # Calculate incompressible mass flux\n G_inc = np.sqrt(2*(P_1-P_2)*n2o_prop_1_sat.rho_l)\n\n # Package for output\n crit_flow = {}\n mass_flux = {}\n crit_flow[\"p_up_norm\"] = p_1\n crit_flow[\"G_crit\"] = G_crit\n crit_flow[\"p_down_norm_crit\"] = np.divide(P_2_crit, (p_1*n2o_prop_1_sat.Pvap))\n mass_flux[\"p_up_norm\"] = P_1_norm\n mass_flux[\"p_down_norm\"] = P_2_norm\n mass_flux[\"G\"] = G_out\n\n return crit_flow, mass_flux\n\ntwo_phase_n2o_flow(300)\n" ]
[ [ "numpy.matlib.repmat", "numpy.amax", "scipy.interpolate.InterpolatedUnivariateSpline", "numpy.linspace", "numpy.meshgrid", "numpy.multiply", "numpy.sqrt", "numpy.ones", "numpy.argmax", "numpy.interp", "numpy.array", "numpy.exp", "numpy.zeros", "numpy.divide" ] ]
lukasz-starosta/imputation
[ "64a911a659c37ba415ebca0e21f7ae9f5ed2e2c5" ]
[ "methods/interpolate.py" ]
[ "from utils.extract import extract\nimport pandas as pd\n\ndef interpolate(filename):\n names, headers, numeric_data = extract(filename)\n row_length = len(numeric_data)\n column_length = len(numeric_data[0])\n df = pd.read_csv(filename)\n print(df)\n return df" ]
[ [ "pandas.read_csv" ] ]
maayane/PhotoFit
[ "e5461bc50a9587ed0fe5d323f6b6bbea8aa968d5" ]
[ "PhotoFit/black_body_flux_density.py" ]
[ "\n\nimport astropy\nfrom astropy import constants as const\nimport math\nfrom . import distances_conversions\nfrom . import extinction\nimport numpy as np\nimport pdb\nimport pylab\n\n#def planck(wav, T):\n# a=2*6.626070040e-34*(3e8)**2\n# b=6.626070040e-34*(3e8)/(wav*T*1.38064852e-23)\n# #a = 2*const.h.value*const.c.value**2\n# #b = const.h.value*const.c.value/(wav*const.k_B.value*T) #convert into cgs\n# intensity = a/ ( (wav**5) * (np.exp(b) - 1.0) )\n# return intensity#*u.J/(u.s*u.m*u.m*u.m).cgs\n\n#def planck_cgs(wav,T):\n# a=2*const.h.cgs.value*(const.c.cgs.value)**2\n# b=const.h.cgs.value*const.c.cgs.value/(wav*const.k_B.cgs.value*T)\n# intensity=a/( (wav**5) * (np.exp(b) - 1.0) )\n# return intensity\n\n#def RayleighJeans(wav, T):\n# a = 2*c*k*T\n# intensity = a/wav**4\n# return intensity\n\n#def Wien(wav, T):\n# a = 2*h*c**2\n# b=h*c/(wav*k*T)\n# intensity = (a/wav**5)*np.exp(-b)\n# return intensity\n\ndef black_body_flux_density(Temp,wavelength,type=None,verbose=False,distance_pc=None,Radius=None,Ebv=None,R_ext=None,redshift=0,plot=False):\n \"\"\"Description: Given a temperature, calculates a black body flux density B_lambda.\n If a radius anda distance are given, calculate the apparent flux density (R/d)^2*B_lambda\n Input :- Temperature [K]\n - numpy array of wavelengths [m], tipically np.linspace(1e-10,1e-6,num=1000)\n - type of formula:\n 'P' Planck\n 'RJ' Rayleigh-Jeans approximation\n - Radius (optionnal) in solar radius\n - distance (optionnal) in pc\n - Ebv: (optionnal, default is none) extinction to APPLY to the theoretical bb spectrum\n - redshift: (optionnal, default is none) z to apply to the theoretical spectrum with\n Output :array of numpy.arrays [spectrum_cgs,spectrum_Hz,spectrum_A,spectrum_mJy,spectrum_phot] CAREFULLL! confusing between spectrum_cgs and spectrum_A has caused so much arm in the past!\n - spectrum_cgs: wavelength [m], Emittance (flux density) in erg/sec/cm^2/cm(lambda)\n - spectrum_Hz: wavelength [m], Emittance in erg/sec/cm^2/Hz\n - spectrum_A: wavelength [m], Emittance in erg/sec/cm^2/Ang (lambda), 1e-8*Emittance (flux density) in erg/sec/cm^2/cm(lambda)\n - spectrum_mjy: wavelength [m], Emittance [mJy]\n - spectrum_phot: wavelength [m], number of photons [photons/sec/cm^2/Ang (lambda)]\n Tested : ?\n By : Maayane T. Soumagnac Nov 2016\n URL :\n Example:[E_cgs, E_Hz, E_A,Emjy, E_phot] = black_body_models.black_body_models(3000, wavelengths, 'P')\n Reliable: \"\"\"\n #if Ebv==0.:\n # Ebv=None\n h_cgs=const.h.cgs.value\n c_cgs=const.c.cgs.value\n kB_cgs=const.k_B.cgs.value\n h_USI=const.h.value\n c_USI=const.c.value\n kB_USI=const.k_B.value\n wavelength_in_cm=wavelength*1e2 # wavelength in cgs\n wavelength_in_cm = wavelength_in_cm.astype(float)\n nu=c_cgs/wavelength_in_cm #frequency in s (because c is in cm/s and wavlength in cm)\n if (Radius!=None and distance_pc!=None):\n #print('pouet')\n #print 'the radius and distance were specified: I am calculating the apparent flux density'\n #pdb.set_trace()\n R_pc=distances_conversions.solar_radius_to_pc(Radius)\n coeff=(R_pc/distance_pc)**2\n #print coeff\n #pdb.set_trace()\n else:\n if verbose==True:\n print('the radius or distance or both were not specified')\n coeff=1.\n #print coeff\n #pdb.set_trace()\n if type.lower() in (None,'p'):\n if verbose==True:\n print('formula used for black body: Planck')\n b_cgs=h_cgs*c_cgs/(wavelength_in_cm*kB_cgs*Temp)\n #b_USI=h_USI*c_USI/(wavelength*kB_USI*Temp)\n\n #print 'b_USI is',b_USI\n #print 'exponent is', np.exp(b_cgs)\n #print np.shape(np.exp(b_cgs))\n #print np.isinf(np.exp(b_cgs))\n if verbose == True:\n print('b_cgs is', b_cgs)\n print('be aware that {0} elements in the exponent of the Planck formula lead to an infinite exponent'.format(np.shape(np.exp(b_cgs)[np.isinf(np.exp(b_cgs))==True])[0]))\n print('denom shape is',np.shape(h_cgs*c_cgs/(wavelength_in_cm*kB_cgs*Temp)))\n #np.exp(wavelength_in_cm)\n E_cgs=coeff*2*math.pi*h_cgs*c_cgs**2/(wavelength_in_cm**5 *(np.exp(h_cgs*c_cgs/(np.float64(wavelength_in_cm)*kB_cgs*Temp)) - 1.0))\n E_Hz=coeff*2*math.pi*h_cgs*nu**3/(c_cgs**2*(np.exp(h_cgs*nu/(kB_cgs*Temp))-1.0)) #this is the planck formula in Hz ()\n E_A=E_cgs*1e-8 # because cm-1 =(1e8 A)-1\n E_mjy=1e-26*E_Hz # because 1Jy=1e-26 J/(sec*m^2*Hz) and 1J=1e7erg\n E_phot=coeff*2*math.pi*nu**2/(c_cgs**2*(np.exp(h_cgs*nu/(kB_cgs*Temp))-1.0))\n elif type.lower() == 'rj':\n if verbose == True:\n print('formula used for black body: Rayleigh-Jeans')\n E_cgs=coeff*2*math.pi*c_cgs*kB_cgs*Temp/wavelength_in_cm**4\n E_Hz=coeff*2*math.pi*kB_cgs*Temp*(nu/c_cgs)**2\n E_A = E_cgs * 1e-8 # because cm-1 =(1e8 A)-1\n E_mjy = 1e-26 * E_Hz # because 1Jy=1e-26 J/(sec*m^2*Hz) and 1J=1e7erg\n E_phot=None # I am not sure\n else:\n print('unknown formula')\n pdb.set_trace()\n #if redshift!=None:\n\n wavelength_fixed=wavelength*(redshift+1)\n #print(wavelength_fixed)\n #pdb.set_trace()\n #else:\n wavelength_fixed=wavelength*(redshift+1)\n E_A_fixed=E_A/(redshift+1)\n if Ebv==None:\n spectrum_cgs=np.array(list(zip(wavelength_fixed,E_cgs)))#not sure how z influences\n spectrum_Hz=np.array(list(zip(wavelength_fixed,E_Hz)))#not sure how z influences\n spectrum_A=np.array(list(zip(wavelength_fixed,E_A_fixed)))\n spectrum_mjy=np.array(list(zip(wavelength_fixed,E_mjy)))#not sure how z influences\n spectrum_phot=np.array(list(zip(wavelength_fixed,E_phot)))#not sure how z influences\n else:\n #print(wavelength)\n #print('***')\n #print(wavelength * 1e6)\n #pdb.set_trace()\n spectrum_cgs=np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength*1e6,E_cgs))),Ebv,R=R_ext)[:,1])))# apply_extinction_to_theoretical_flux needs wavelengths in micropmeters\n spectrum_Hz=np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength*1e6,E_Hz))),Ebv,R=R_ext)[:,1])))\n spectrum_A = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_A_fixed))),Ebv,R=R_ext)[:,1])))\n #spextrum_A_befor_E=np.array(list(zip(wavelength_fixed,E_A_fixed)))\n spectrum_mjy = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_mjy))),Ebv,R=R_ext)[:, 1])))\n spectrum_phot = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_phot))),Ebv,R=R_ext)[:, 1])))\n if plot==True:\n pylab.figure()\n pylab.plot(wavelength,E_A,label='sepctrum before applying z and E')\n pylab.plot(wavelength_fixed,E_A_fixed,label='sepctrum redshifted z={0}'.format(redshift))\n pylab.plot(spectrum_A[:,0],spectrum_A[:,1], label='sepctrum redshifted z={0} and extincted'.format(redshift))\n pylab.legend()\n pylab.show()\n #print('managed till here')\n #pdb.set_trace()\n return spectrum_cgs, spectrum_Hz, spectrum_A, spectrum_mjy, spectrum_phot\n\n\n\ndef black_body_flux_density_fast(Temp,wavelength,type=None,verbose=False,distance_pc=None,Radius=None,Ebv=None,R_ext=None,redshift=0,plot=False):\n \"\"\"Description: Given a temperature, calculates a black body flux density B_lambda.\n If a radius anda distance are given, calculate the apparent flux density (R/d)^2*B_lambda\n Input :- Temperature [K]\n - numpy array of wavelengths [m], tipically np.linspace(1e-10,1e-6,num=1000)\n - type of formula:\n 'P' Planck\n 'RJ' Rayleigh-Jeans approximation\n - Radius (optionnal) in solar radius\n - distance (optionnal) in pc\n - Ebv: (optionnal, default is none) extinction to APPLY to the theoretical bb spectrum\n - redshift: (optionnal, default is none) z to apply to the theoretical spectrum with\n Output :array of numpy.arrays [spectrum_cgs,spectrum_Hz,spectrum_A,spectrum_mJy,spectrum_phot] CAREFULLL! confusing between spectrum_cgs and spectrum_A has caused so much arm in the past!\n - spectrum_cgs: wavelength [m], Emittance (flux density) in erg/sec/cm^2/cm(lambda)\n - spectrum_Hz: wavelength [m], Emittance in erg/sec/cm^2/Hz\n - spectrum_A: wavelength [m], Emittance in erg/sec/cm^2/Ang (lambda), 1e-8*Emittance (flux density) in erg/sec/cm^2/cm(lambda)\n - spectrum_mjy: wavelength [m], Emittance [mJy]\n - spectrum_phot: wavelength [m], number of photons [photons/sec/cm^2/Ang (lambda)]\n Tested : ?\n By : Maayane T. Soumagnac Nov 2016\n URL :\n Example:[E_cgs, E_Hz, E_A,Emjy, E_phot] = black_body_models.black_body_models(3000, wavelengths, 'P')\n Reliable: \"\"\"\n #if Ebv==0.:\n # Ebv=None\n h_cgs=const.h.cgs.value\n c_cgs=const.c.cgs.value\n kB_cgs=const.k_B.cgs.value\n h_USI=const.h.value\n c_USI=const.c.value\n kB_USI=const.k_B.value\n wavelength_in_cm=wavelength*1e2 # wavelength in cgs\n wavelength_in_cm = wavelength_in_cm.astype(float)\n nu=c_cgs/wavelength_in_cm #frequency in s (because c is in cm/s and wavlength in cm)\n if (Radius!=None and distance_pc!=None):\n #print('pouet')\n #print 'the radius and distance were specified: I am calculating the apparent flux density'\n #pdb.set_trace()\n R_pc=distances_conversions.solar_radius_to_pc(Radius)\n coeff=(R_pc/distance_pc)**2\n #print coeff\n #pdb.set_trace()\n else:\n if verbose==True:\n print('the radius or distance or both were not specified')\n coeff=1.\n #print coeff\n #pdb.set_trace()\n if type.lower() in (None,'p'):\n if verbose==True:\n print('formula used for black body: Planck')\n b_cgs=h_cgs*c_cgs/(wavelength_in_cm*kB_cgs*Temp)\n #b_USI=h_USI*c_USI/(wavelength*kB_USI*Temp)\n\n #print 'b_USI is',b_USI\n #print 'exponent is', np.exp(b_cgs)\n #print np.shape(np.exp(b_cgs))\n #print np.isinf(np.exp(b_cgs))\n if verbose == True:\n print('b_cgs is', b_cgs)\n print('be aware that {0} elements in the exponent of the Planck formula lead to an infinite exponent'.format(np.shape(np.exp(b_cgs)[np.isinf(np.exp(b_cgs))==True])[0]))\n print('denom shape is',np.shape(h_cgs*c_cgs/(wavelength_in_cm*kB_cgs*Temp)))\n #np.exp(wavelength_in_cm)\n E_cgs=coeff*2*math.pi*h_cgs*c_cgs**2/(wavelength_in_cm**5 *(np.exp(h_cgs*c_cgs/(np.float64(wavelength_in_cm)*kB_cgs*Temp)) - 1.0))\n #E_Hz=coeff*2*math.pi*h_cgs*nu**3/(c_cgs**2*(np.exp(h_cgs*nu/(kB_cgs*Temp))-1.0)) #this is the planck formula in Hz ()\n E_A=E_cgs*1e-8 # because cm-1 =(1e8 A)-1\n #E_mjy=1e-26*E_Hz # because 1Jy=1e-26 J/(sec*m^2*Hz) and 1J=1e7erg\n #E_phot=coeff*2*math.pi*nu**2/(c_cgs**2*(np.exp(h_cgs*nu/(kB_cgs*Temp))-1.0))\n elif type.lower() == 'rj':\n if verbose == True:\n print('formula used for black body: Rayleigh-Jeans')\n #E_cgs=coeff*2*math.pi*c_cgs*kB_cgs*Temp/wavelength_in_cm**4\n #E_Hz=coeff*2*math.pi*kB_cgs*Temp*(nu/c_cgs)**2\n E_A = E_cgs * 1e-8 # because cm-1 =(1e8 A)-1\n #E_mjy = 1e-26 * E_Hz # because 1Jy=1e-26 J/(sec*m^2*Hz) and 1J=1e7erg\n #E_phot=None # I am not sure\n else:\n print('unknown formula')\n pdb.set_trace()\n #if redshift!=None:\n\n wavelength_fixed=wavelength*(redshift+1)\n #print(wavelength_fixed)\n #pdb.set_trace()\n #else:\n wavelength_fixed=wavelength*(redshift+1)\n E_A_fixed=E_A/(redshift+1)\n if Ebv==None:\n #spectrum_cgs=np.array(list(zip(wavelength_fixed,E_cgs)))#not sure how z influences\n #spectrum_Hz=np.array(list(zip(wavelength_fixed,E_Hz)))#not sure how z influences\n spectrum_A=np.array(list(zip(wavelength_fixed,E_A_fixed)))\n #spectrum_mjy=np.array(list(zip(wavelength_fixed,E_mjy)))#not sure how z influences\n #spectrum_phot=np.array(list(zip(wavelength_fixed,E_phot)))#not sure how z influences\n else:\n #print(wavelength)\n #print('***')\n #print(wavelength * 1e6)\n #pdb.set_trace()\n #spectrum_cgs=np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength*1e6,E_cgs))),Ebv,R=R_ext)[:,1])))# apply_extinction_to_theoretical_flux needs wavelengths in micropmeters\n #spectrum_Hz=np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength*1e6,E_Hz))),Ebv,R=R_ext)[:,1])))\n spectrum_A = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_A_fixed))),Ebv,R=R_ext)[:,1])))\n #spextrum_A_befor_E=np.array(list(zip(wavelength_fixed,E_A_fixed)))\n #spectrum_mjy = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_mjy))),Ebv,R=R_ext)[:, 1])))\n #spectrum_phot = np.array(list(zip(wavelength_fixed,extinction.apply_extinction_to_theoretical_flux(np.array(list(zip(wavelength * 1e6, E_phot))),Ebv,R=R_ext)[:, 1])))\n if plot==True:\n pylab.figure()\n pylab.plot(wavelength,E_A,label='sepctrum before applying z and E')\n pylab.plot(wavelength_fixed,E_A_fixed,label='sepctrum redshifted z={0}'.format(redshift))\n pylab.plot(spectrum_A[:,0],spectrum_A[:,1], label='sepctrum redshifted z={0} and extincted'.format(redshift))\n pylab.legend()\n pylab.show()\n #print('managed till here')\n #pdb.set_trace()\n return spectrum_A" ]
[ [ "numpy.exp", "numpy.shape", "numpy.float64" ] ]
madsbk/distributed
[ "2c5d2cf814f13b0efc0fb21acc890476158468da" ]
[ "distributed/dashboard/components/scheduler.py" ]
[ "from collections import defaultdict\nimport logging\nimport math\nfrom numbers import Number\nimport operator\nimport os\n\nfrom bokeh.layouts import column, row\nfrom bokeh.models import (\n ColumnDataSource,\n ColorBar,\n DataRange1d,\n HoverTool,\n ResetTool,\n PanTool,\n WheelZoomTool,\n TapTool,\n OpenURL,\n Range1d,\n value,\n NumeralTickFormatter,\n BoxZoomTool,\n AdaptiveTicker,\n BasicTicker,\n NumberFormatter,\n BoxSelectTool,\n GroupFilter,\n CDSView,\n Tabs,\n Panel,\n Title,\n)\nfrom bokeh.models.widgets import DataTable, TableColumn\nfrom bokeh.plotting import figure\nfrom bokeh.palettes import Viridis11\nfrom bokeh.themes import Theme\nfrom bokeh.transform import factor_cmap, linear_cmap, cumsum\nfrom bokeh.io import curdoc\nimport dask\nfrom dask import config\nfrom dask.utils import format_bytes, key_split\nfrom tlz import pipe\nfrom tlz.curried import map, concat, groupby\nfrom tornado import escape\n\ntry:\n import numpy as np\nexcept ImportError:\n np = False\n\nfrom distributed.dashboard.components import add_periodic_callback\nfrom distributed.dashboard.components.shared import (\n DashboardComponent,\n ProfileTimePlot,\n ProfileServer,\n SystemMonitor,\n)\nfrom distributed.dashboard.utils import (\n transpose,\n BOKEH_VERSION,\n PROFILING,\n without_property_validation,\n update,\n)\nfrom distributed.metrics import time\nfrom distributed.utils import log_errors, format_time, parse_timedelta\nfrom distributed.diagnostics.progress_stream import color_of, progress_quads\nfrom distributed.diagnostics.graph_layout import GraphLayout\nfrom distributed.diagnostics.task_stream import TaskStreamPlugin\nfrom distributed.diagnostics.task_stream import color_of as ts_color_of\nfrom distributed.diagnostics.task_stream import colors as ts_color_lookup\n\nif dask.config.get(\"distributed.dashboard.export-tool\"):\n from distributed.dashboard.export_tool import ExportTool\nelse:\n ExportTool = None\n\nlogger = logging.getLogger(__name__)\n\nfrom jinja2 import Environment, FileSystemLoader\n\nenv = Environment(\n loader=FileSystemLoader(\n os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"http\", \"templates\")\n )\n)\n\nBOKEH_THEME = Theme(os.path.join(os.path.dirname(__file__), \"..\", \"theme.yaml\"))\nTICKS_1024 = {\"base\": 1024, \"mantissas\": [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]}\n\nnan = float(\"nan\")\ninf = float(\"inf\")\n\n\nclass Occupancy(DashboardComponent):\n \"\"\" Occupancy (in time) per worker \"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"occupancy\": [0, 0],\n \"worker\": [\"a\", \"b\"],\n \"x\": [0.0, 0.1],\n \"y\": [1, 2],\n \"ms\": [1, 2],\n \"color\": [\"red\", \"blue\"],\n \"escaped_worker\": [\"a\", \"b\"],\n }\n )\n\n fig = figure(\n title=\"Occupancy\",\n tools=\"\",\n id=\"bk-occupancy-plot\",\n x_axis_type=\"datetime\",\n **kwargs,\n )\n rect = fig.rect(\n source=self.source, x=\"x\", width=\"ms\", y=\"y\", height=1, color=\"color\"\n )\n rect.nonselection_glyph = None\n\n fig.xaxis.minor_tick_line_alpha = 0\n fig.yaxis.visible = False\n fig.ygrid.visible = False\n # fig.xaxis[0].formatter = NumeralTickFormatter(format='0.0s')\n fig.x_range.start = 0\n\n tap = TapTool(callback=OpenURL(url=\"./info/worker/@escaped_worker.html\"))\n\n hover = HoverTool()\n hover.tooltips = \"@worker : @occupancy s.\"\n hover.point_policy = \"follow_mouse\"\n fig.add_tools(hover, tap)\n\n self.root = fig\n\n @without_property_validation\n def update(self):\n with log_errors():\n workers = list(self.scheduler.workers.values())\n\n y = list(range(len(workers)))\n occupancy = [ws.occupancy for ws in workers]\n ms = [occ * 1000 for occ in occupancy]\n x = [occ / 500 for occ in occupancy]\n total = sum(occupancy)\n color = []\n for ws in workers:\n if ws in self.scheduler.idle:\n color.append(\"red\")\n elif ws in self.scheduler.saturated:\n color.append(\"green\")\n else:\n color.append(\"blue\")\n\n if total:\n self.root.title.text = \"Occupancy -- total time: %s wall time: %s\" % (\n format_time(total),\n format_time(total / self.scheduler.total_nthreads),\n )\n else:\n self.root.title.text = \"Occupancy\"\n\n if occupancy:\n result = {\n \"occupancy\": occupancy,\n \"worker\": [ws.address for ws in workers],\n \"ms\": ms,\n \"color\": color,\n \"escaped_worker\": [escape.url_escape(ws.address) for ws in workers],\n \"x\": x,\n \"y\": y,\n }\n\n update(self.source, result)\n\n\nclass ProcessingHistogram(DashboardComponent):\n \"\"\" How many tasks are on each worker \"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\"left\": [1, 2], \"right\": [10, 10], \"top\": [0, 0]}\n )\n\n self.root = figure(\n title=\"Tasks Processing (Histogram)\",\n id=\"bk-nprocessing-histogram-plot\",\n name=\"processing_hist\",\n y_axis_label=\"frequency\",\n tools=\"\",\n **kwargs,\n )\n\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.ygrid.visible = False\n\n self.root.toolbar.logo = None\n self.root.toolbar_location = None\n\n self.root.quad(\n source=self.source,\n left=\"left\",\n right=\"right\",\n bottom=0,\n top=\"top\",\n color=\"deepskyblue\",\n fill_alpha=0.5,\n )\n\n @without_property_validation\n def update(self):\n L = [len(ws.processing) for ws in self.scheduler.workers.values()]\n counts, x = np.histogram(L, bins=40)\n self.source.data.update({\"left\": x[:-1], \"right\": x[1:], \"top\": counts})\n\n\nclass NBytesHistogram(DashboardComponent):\n \"\"\" How many tasks are on each worker \"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\"left\": [1, 2], \"right\": [10, 10], \"top\": [0, 0]}\n )\n\n self.root = figure(\n title=\"Bytes Stored (Histogram)\",\n name=\"nbytes_hist\",\n id=\"bk-nbytes-histogram-plot\",\n y_axis_label=\"frequency\",\n tools=\"\",\n **kwargs,\n )\n\n self.root.xaxis[0].formatter = NumeralTickFormatter(format=\"0.0 b\")\n self.root.xaxis.ticker = AdaptiveTicker(**TICKS_1024)\n self.root.xaxis.major_label_orientation = -math.pi / 12\n\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.ygrid.visible = False\n\n self.root.toolbar.logo = None\n self.root.toolbar_location = None\n\n self.root.quad(\n source=self.source,\n left=\"left\",\n right=\"right\",\n bottom=0,\n top=\"top\",\n color=\"deepskyblue\",\n fill_alpha=0.5,\n )\n\n @without_property_validation\n def update(self):\n nbytes = np.asarray(\n [ws.metrics[\"memory\"] for ws in self.scheduler.workers.values()]\n )\n counts, x = np.histogram(nbytes, bins=40)\n d = {\"left\": x[:-1], \"right\": x[1:], \"top\": counts}\n self.source.data.update(d)\n\n self.root.title.text = \"Bytes stored (Histogram): \" + format_bytes(nbytes.sum())\n\n\nclass BandwidthTypes(DashboardComponent):\n \"\"\" Bar chart showing bandwidth per type \"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"bandwidth\": [1, 2],\n \"bandwidth-half\": [0.5, 1],\n \"type\": [\"a\", \"b\"],\n \"bandwidth_text\": [\"1\", \"2\"],\n }\n )\n\n fig = figure(\n title=\"Bandwidth by Type\",\n tools=\"\",\n id=\"bk-bandwidth-type-plot\",\n name=\"bandwidth_type_histogram\",\n y_range=[\"a\", \"b\"],\n **kwargs,\n )\n fig.xaxis.major_label_orientation = -0.5\n rect = fig.rect(\n source=self.source,\n x=\"bandwidth-half\",\n y=\"type\",\n width=\"bandwidth\",\n height=1,\n color=\"blue\",\n )\n fig.x_range.start = 0\n fig.xaxis[0].formatter = NumeralTickFormatter(format=\"0.0 b\")\n fig.xaxis.ticker = AdaptiveTicker(**TICKS_1024)\n rect.nonselection_glyph = None\n\n fig.xaxis.minor_tick_line_alpha = 0\n fig.ygrid.visible = False\n\n fig.toolbar.logo = None\n fig.toolbar_location = None\n\n hover = HoverTool()\n hover.tooltips = \"@type: @bandwidth_text / s\"\n hover.point_policy = \"follow_mouse\"\n fig.add_tools(hover)\n\n self.fig = fig\n\n @without_property_validation\n def update(self):\n with log_errors():\n bw = self.scheduler.bandwidth_types\n self.fig.y_range.factors = list(sorted(bw))\n result = {\n \"bandwidth\": list(bw.values()),\n \"bandwidth-half\": [b / 2 for b in bw.values()],\n \"type\": list(bw.keys()),\n \"bandwidth_text\": list(map(format_bytes, bw.values())),\n }\n self.fig.title.text = \"Bandwidth: \" + format_bytes(self.scheduler.bandwidth)\n\n update(self.source, result)\n\n\nclass BandwidthWorkers(DashboardComponent):\n \"\"\" How many tasks are on each worker \"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"bandwidth\": [1, 2],\n \"source\": [\"a\", \"b\"],\n \"destination\": [\"a\", \"b\"],\n \"bandwidth_text\": [\"1\", \"2\"],\n }\n )\n\n values = [hex(x)[2:] for x in range(64, 256)][::-1]\n mapper = linear_cmap(\n field_name=\"bandwidth\",\n palette=[\"#\" + x + x + \"FF\" for x in values],\n low=0,\n high=1,\n )\n\n fig = figure(\n title=\"Bandwidth by Worker\",\n tools=\"\",\n id=\"bk-bandwidth-worker-plot\",\n name=\"bandwidth_worker_heatmap\",\n x_range=[\"a\", \"b\"],\n y_range=[\"a\", \"b\"],\n **kwargs,\n )\n fig.xaxis.major_label_orientation = -math.pi / 12\n rect = fig.rect(\n source=self.source,\n x=\"source\",\n y=\"destination\",\n color=mapper,\n height=1,\n width=1,\n )\n\n self.color_map = mapper[\"transform\"]\n color_bar = ColorBar(\n color_mapper=self.color_map,\n label_standoff=12,\n border_line_color=None,\n location=(0, 0),\n )\n color_bar.formatter = NumeralTickFormatter(format=\"0.0 b\")\n color_bar.ticker = AdaptiveTicker(**TICKS_1024)\n fig.add_layout(color_bar, \"right\")\n\n fig.toolbar.logo = None\n fig.toolbar_location = None\n\n hover = HoverTool()\n hover.tooltips = \"\"\"\n <div>\n <p><b>Source:</b> @source </p>\n <p><b>Destination:</b> @destination </p>\n <p><b>Bandwidth:</b> @bandwidth_text / s</p>\n </div>\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n fig.add_tools(hover)\n\n self.fig = fig\n\n @without_property_validation\n def update(self):\n with log_errors():\n bw = self.scheduler.bandwidth_workers\n if not bw:\n return\n\n def name(address):\n try:\n ws = self.scheduler.workers[address]\n except KeyError:\n return address\n if ws.name is not None:\n return str(ws.name)\n else:\n return address\n\n x, y, value = zip(*[(name(a), name(b), c) for (a, b), c in bw.items()])\n\n self.color_map.high = max(value)\n\n factors = list(sorted(set(x + y)))\n self.fig.x_range.factors = factors\n self.fig.y_range.factors = factors[::-1]\n\n result = {\n \"source\": x,\n \"destination\": y,\n \"bandwidth\": value,\n \"bandwidth_text\": list(map(format_bytes, value)),\n }\n self.fig.title.text = \"Bandwidth: \" + format_bytes(self.scheduler.bandwidth)\n\n update(self.source, result)\n\n\nclass ComputePerKey(DashboardComponent):\n \"\"\" Bar chart showing time spend in action by key prefix\"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n\n es = [p for p in self.scheduler.plugins if isinstance(p, TaskStreamPlugin)]\n if not es:\n self.plugin = TaskStreamPlugin(self.scheduler)\n else:\n self.plugin = es[0]\n\n compute_data = {\n \"times\": [0.2, 0.1],\n \"formatted_time\": [\"0.2 ms\", \"2.8 us\"],\n \"angles\": [3.14, 0.785],\n \"color\": [ts_color_lookup[\"transfer\"], ts_color_lookup[\"compute\"]],\n \"names\": [\"sum\", \"sum_partial\"],\n }\n\n self.compute_source = ColumnDataSource(data=compute_data)\n\n fig = figure(\n title=\"Compute Time Per Task\",\n tools=\"\",\n id=\"bk-Compute-by-key-plot\",\n name=\"compute_time_per_key\",\n x_range=[\"a\", \"b\"],\n **kwargs,\n )\n\n rect = fig.vbar(\n source=self.compute_source,\n x=\"names\",\n top=\"times\",\n width=0.7,\n color=\"color\",\n )\n\n fig.y_range.start = 0\n fig.min_border_right = 20\n fig.min_border_bottom = 60\n fig.yaxis.axis_label = \"Time (s)\"\n fig.yaxis[0].formatter = NumeralTickFormatter(format=\"0\")\n fig.yaxis.ticker = AdaptiveTicker(**TICKS_1024)\n fig.xaxis.major_label_orientation = -math.pi / 12\n rect.nonselection_glyph = None\n\n fig.xaxis.minor_tick_line_alpha = 0\n fig.xgrid.visible = False\n\n fig.toolbar.logo = None\n fig.toolbar_location = None\n\n hover = HoverTool()\n hover.tooltips = \"\"\"\n <div>\n <p><b>Name:</b> @names</p>\n <p><b>Time:</b> @formatted_time</p>\n </div>\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n fig.add_tools(hover)\n\n fig.add_layout(\n Title(\n text=\"Note: tasks less than 2% of max are not displayed\",\n text_font_style=\"italic\",\n ),\n \"below\",\n )\n\n self.fig = fig\n tab1 = Panel(child=fig, title=\"Bar Chart\")\n\n compute_wedge_data = {\n \"times\": [0.2, 0.1],\n \"formatted_time\": [\"0.2 ms\", \"2.8 us\"],\n \"angles\": [1.4, 0.8],\n \"color\": [ts_color_lookup[\"transfer\"], ts_color_lookup[\"compute\"]],\n \"names\": [\"sum\", \"sum_partial\"],\n }\n\n fig2 = figure(\n title=\"Compute Time Per Task\",\n tools=\"\",\n id=\"bk-Compute-by-key-pie\",\n name=\"compute_time_per_key-pie\",\n x_range=(-0.5, 1.0),\n **kwargs,\n )\n\n wedge = fig2.wedge(\n x=0,\n y=1,\n radius=0.4,\n start_angle=cumsum(\"angles\", include_zero=True),\n end_angle=cumsum(\"angles\"),\n line_color=\"white\",\n fill_color=\"color\",\n legend_field=\"names\",\n source=self.compute_source,\n )\n\n fig2.axis.axis_label = None\n fig2.axis.visible = False\n fig2.grid.grid_line_color = None\n fig2.add_layout(\n Title(\n text=\"Note: tasks less than 2% of max are not displayed\",\n text_font_style=\"italic\",\n ),\n \"below\",\n )\n\n hover = HoverTool()\n hover.tooltips = \"\"\"\n <div>\n <p><b>Name:</b> @names</p>\n <p><b>Time:</b> @formatted_time</p>\n </div>\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n fig2.add_tools(hover)\n self.wedge_fig = fig2\n tab2 = Panel(child=fig2, title=\"Pie Chart\")\n\n self.tabs = Tabs(tabs=[tab1, tab2])\n\n @without_property_validation\n def update(self):\n with log_errors():\n compute_times = defaultdict(float)\n\n for key, ts in self.scheduler.task_prefixes.items():\n name = key_split(key)\n for action, t in ts.all_durations.items():\n if action == \"compute\":\n compute_times[name] += t\n\n # order by largest time first\n compute_times = sorted(\n compute_times.items(), key=lambda x: x[1], reverse=True\n )\n\n # keep only time which are 2% of max or greater\n if compute_times:\n max_time = compute_times[0][1] * 0.02\n compute_times = [(n, t) for n, t in compute_times if t > max_time]\n compute_colors = list()\n compute_names = list()\n compute_time = list()\n total_time = 0\n for name, t in compute_times:\n compute_names.append(name)\n compute_colors.append(ts_color_of(name))\n compute_time.append(t)\n total_time += t\n\n angles = [t / total_time * 2 * math.pi for t in compute_time]\n\n self.fig.x_range.factors = compute_names\n\n compute_result = dict(\n angles=angles,\n times=compute_time,\n color=compute_colors,\n names=compute_names,\n formatted_time=[format_time(t) for t in compute_time],\n )\n\n update(self.compute_source, compute_result)\n\n\nclass AggregateAction(DashboardComponent):\n \"\"\" Bar chart showing time spend in action by key prefix\"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n\n es = [p for p in self.scheduler.plugins if isinstance(p, TaskStreamPlugin)]\n if not es:\n self.plugin = TaskStreamPlugin(self.scheduler)\n else:\n self.plugin = es[0]\n\n action_data = {\n \"times\": [0.2, 0.1],\n \"formatted_time\": [\"0.2 ms\", \"2.8 us\"],\n \"color\": [ts_color_lookup[\"transfer\"], ts_color_lookup[\"compute\"]],\n \"names\": [\"transfer\", \"compute\"],\n }\n\n self.action_source = ColumnDataSource(data=action_data)\n\n fig = figure(\n title=\"Aggregate Per Action\",\n tools=\"\",\n id=\"bk-aggregate-per-action-plot\",\n name=\"aggregate_per_action\",\n x_range=[\"a\", \"b\"],\n **kwargs,\n )\n\n rect = fig.vbar(\n source=self.action_source,\n x=\"names\",\n top=\"times\",\n width=0.7,\n color=\"color\",\n )\n\n fig.y_range.start = 0\n fig.min_border_right = 20\n fig.min_border_bottom = 60\n fig.yaxis[0].formatter = NumeralTickFormatter(format=\"0\")\n fig.yaxis.axis_label = \"Time (s)\"\n fig.yaxis.ticker = AdaptiveTicker(**TICKS_1024)\n fig.xaxis.major_label_orientation = -math.pi / 12\n fig.xaxis.major_label_text_font_size = \"16px\"\n rect.nonselection_glyph = None\n\n fig.xaxis.minor_tick_line_alpha = 0\n fig.xgrid.visible = False\n\n fig.toolbar.logo = None\n fig.toolbar_location = None\n\n hover = HoverTool()\n hover.tooltips = \"\"\"\n <div>\n <p><b>Name:</b> @names</p>\n <p><b>Time:</b> @formatted_time</p>\n </div>\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n fig.add_tools(hover)\n\n self.fig = fig\n\n @without_property_validation\n def update(self):\n with log_errors():\n agg_times = defaultdict(float)\n\n for key, ts in self.scheduler.task_prefixes.items():\n for action, t in ts.all_durations.items():\n agg_times[action] += t\n\n # order by largest time first\n agg_times = sorted(agg_times.items(), key=lambda x: x[1], reverse=True)\n\n agg_colors = list()\n agg_names = list()\n agg_time = list()\n for action, t in agg_times:\n agg_names.append(action)\n if action == \"compute\":\n agg_colors.append(\"purple\")\n else:\n agg_colors.append(ts_color_lookup[action])\n agg_time.append(t)\n\n self.fig.x_range.factors = agg_names\n self.fig.title.text = \"Aggregate Time Per Action\"\n\n action_result = dict(\n times=agg_time,\n color=agg_colors,\n names=agg_names,\n formatted_time=[format_time(t) for t in agg_time],\n )\n\n update(self.action_source, action_result)\n\n\nclass MemoryByKey(DashboardComponent):\n \"\"\" Bar chart showing memory use by key prefix\"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"name\": [\"a\", \"b\"],\n \"nbytes\": [100, 1000],\n \"count\": [1, 2],\n \"color\": [\"blue\", \"blue\"],\n }\n )\n\n fig = figure(\n title=\"Memory Use\",\n tools=\"\",\n id=\"bk-memory-by-key-plot\",\n name=\"memory_by_key\",\n x_range=[\"a\", \"b\"],\n **kwargs,\n )\n rect = fig.vbar(\n source=self.source, x=\"name\", top=\"nbytes\", width=0.9, color=\"color\"\n )\n fig.yaxis[0].formatter = NumeralTickFormatter(format=\"0.0 b\")\n fig.yaxis.ticker = AdaptiveTicker(**TICKS_1024)\n fig.xaxis.major_label_orientation = -math.pi / 12\n rect.nonselection_glyph = None\n\n fig.xaxis.minor_tick_line_alpha = 0\n fig.ygrid.visible = False\n\n fig.toolbar.logo = None\n fig.toolbar_location = None\n\n hover = HoverTool()\n hover.tooltips = \"@name: @nbytes_text\"\n hover.tooltips = \"\"\"\n <div>\n <p><b>Name:</b> @name</p>\n <p><b>Bytes:</b> @nbytes_text </p>\n <p><b>Count:</b> @count objects </p>\n </div>\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n fig.add_tools(hover)\n\n self.fig = fig\n\n @without_property_validation\n def update(self):\n with log_errors():\n counts = defaultdict(int)\n nbytes = defaultdict(int)\n for ws in self.scheduler.workers.values():\n for ts in ws.has_what:\n ks = key_split(ts.key)\n counts[ks] += 1\n nbytes[ks] += ts.nbytes\n\n names = list(sorted(counts))\n self.fig.x_range.factors = names\n result = {\n \"name\": names,\n \"count\": [counts[name] for name in names],\n \"nbytes\": [nbytes[name] for name in names],\n \"nbytes_text\": [format_bytes(nbytes[name]) for name in names],\n \"color\": [color_of(name) for name in names],\n }\n self.fig.title.text = \"Total Use: \" + format_bytes(sum(nbytes.values()))\n\n update(self.source, result)\n\n\nclass CurrentLoad(DashboardComponent):\n \"\"\" How many tasks are on each worker \"\"\"\n\n def __init__(self, scheduler, width=600, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"nprocessing\": [1, 2],\n \"nprocessing-half\": [0.5, 1],\n \"nprocessing-color\": [\"red\", \"blue\"],\n \"nbytes\": [1, 2],\n \"nbytes-half\": [0.5, 1],\n \"nbytes_text\": [\"1B\", \"2B\"],\n \"cpu\": [1, 2],\n \"cpu-half\": [0.5, 1],\n \"worker\": [\"a\", \"b\"],\n \"y\": [1, 2],\n \"nbytes-color\": [\"blue\", \"blue\"],\n \"escaped_worker\": [\"a\", \"b\"],\n }\n )\n\n processing = figure(\n title=\"Tasks Processing\",\n tools=\"\",\n id=\"bk-nprocessing-plot\",\n name=\"processing_hist\",\n width=int(width / 2),\n **kwargs,\n )\n rect = processing.rect(\n source=self.source,\n x=\"nprocessing-half\",\n y=\"y\",\n width=\"nprocessing\",\n height=1,\n color=\"nprocessing-color\",\n )\n processing.x_range.start = 0\n rect.nonselection_glyph = None\n\n nbytes = figure(\n title=\"Bytes stored\",\n tools=\"\",\n id=\"bk-nbytes-worker-plot\",\n width=int(width / 2),\n name=\"nbytes_hist\",\n **kwargs,\n )\n rect = nbytes.rect(\n source=self.source,\n x=\"nbytes-half\",\n y=\"y\",\n width=\"nbytes\",\n height=1,\n color=\"nbytes-color\",\n )\n rect.nonselection_glyph = None\n\n cpu = figure(\n title=\"CPU Utilization\",\n tools=\"\",\n id=\"bk-cpu-worker-plot\",\n width=int(width / 2),\n name=\"cpu_hist\",\n x_range=(0, None),\n **kwargs,\n )\n rect = cpu.rect(\n source=self.source,\n x=\"cpu-half\",\n y=\"y\",\n width=\"cpu\",\n height=1,\n color=\"blue\",\n )\n rect.nonselection_glyph = None\n\n nbytes.axis[0].ticker = BasicTicker(**TICKS_1024)\n nbytes.xaxis[0].formatter = NumeralTickFormatter(format=\"0.0 b\")\n nbytes.xaxis.major_label_orientation = -math.pi / 12\n nbytes.x_range.start = 0\n\n for fig in [processing, nbytes, cpu]:\n fig.xaxis.minor_tick_line_alpha = 0\n fig.yaxis.visible = False\n fig.ygrid.visible = False\n\n tap = TapTool(\n callback=OpenURL(url=\"./info/worker/@escaped_worker.html\")\n )\n fig.add_tools(tap)\n\n fig.toolbar.logo = None\n fig.toolbar_location = None\n fig.yaxis.visible = False\n\n hover = HoverTool()\n hover.tooltips = \"@worker : @nprocessing tasks\"\n hover.point_policy = \"follow_mouse\"\n processing.add_tools(hover)\n\n hover = HoverTool()\n hover.tooltips = \"@worker : @nbytes_text\"\n hover.point_policy = \"follow_mouse\"\n nbytes.add_tools(hover)\n\n hover = HoverTool()\n hover.tooltips = \"@worker : @cpu %\"\n hover.point_policy = \"follow_mouse\"\n cpu.add_tools(hover)\n\n self.processing_figure = processing\n self.nbytes_figure = nbytes\n self.cpu_figure = cpu\n\n processing.y_range = nbytes.y_range\n cpu.y_range = nbytes.y_range\n\n @without_property_validation\n def update(self):\n with log_errors():\n workers = list(self.scheduler.workers.values())\n\n y = list(range(len(workers)))\n\n cpu = [int(ws.metrics[\"cpu\"]) for ws in workers]\n\n nprocessing = [len(ws.processing) for ws in workers]\n processing_color = []\n for ws in workers:\n if ws in self.scheduler.idle:\n processing_color.append(\"red\")\n elif ws in self.scheduler.saturated:\n processing_color.append(\"green\")\n else:\n processing_color.append(\"blue\")\n\n nbytes = [ws.metrics[\"memory\"] for ws in workers]\n nbytes_text = [format_bytes(nb) for nb in nbytes]\n nbytes_color = []\n max_limit = 0\n for ws, nb in zip(workers, nbytes):\n limit = (\n getattr(self.scheduler.workers[ws.address], \"memory_limit\", inf)\n or inf\n )\n\n if limit > max_limit and limit != inf:\n max_limit = limit\n\n if nb > limit:\n nbytes_color.append(\"red\")\n elif nb > limit / 2:\n nbytes_color.append(\"orange\")\n else:\n nbytes_color.append(\"blue\")\n\n now = time()\n if any(nprocessing) or self.last + 1 < now:\n self.last = now\n result = {\n \"cpu\": cpu,\n \"cpu-half\": [c / 2 for c in cpu],\n \"nprocessing\": nprocessing,\n \"nprocessing-half\": [np / 2 for np in nprocessing],\n \"nprocessing-color\": processing_color,\n \"nbytes\": nbytes,\n \"nbytes-half\": [nb / 2 for nb in nbytes],\n \"nbytes-color\": nbytes_color,\n \"nbytes_text\": nbytes_text,\n \"worker\": [ws.address for ws in workers],\n \"escaped_worker\": [escape.url_escape(ws.address) for ws in workers],\n \"y\": y,\n }\n\n self.nbytes_figure.title.text = \"Bytes stored: \" + format_bytes(\n sum(nbytes)\n )\n self.nbytes_figure.x_range.end = max_limit\n if self.scheduler.workers:\n self.cpu_figure.x_range.end = (\n max(ws.nthreads or 1 for ws in self.scheduler.workers.values())\n * 100\n )\n else:\n self.cpu_figure.x_range.end = 100\n\n update(self.source, result)\n\n\nclass StealingTimeSeries(DashboardComponent):\n def __init__(self, scheduler, **kwargs):\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\"time\": [time(), time() + 1], \"idle\": [0, 0.1], \"saturated\": [0, 0.1]}\n )\n\n x_range = DataRange1d(follow=\"end\", follow_interval=20000, range_padding=0)\n\n fig = figure(\n title=\"Idle and Saturated Workers Over Time\",\n x_axis_type=\"datetime\",\n y_range=[-0.1, len(scheduler.workers) + 0.1],\n height=150,\n tools=\"\",\n x_range=x_range,\n **kwargs,\n )\n fig.line(source=self.source, x=\"time\", y=\"idle\", color=\"red\")\n fig.line(source=self.source, x=\"time\", y=\"saturated\", color=\"green\")\n fig.yaxis.minor_tick_line_color = None\n\n fig.add_tools(\n ResetTool(), PanTool(dimensions=\"width\"), WheelZoomTool(dimensions=\"width\")\n )\n\n self.root = fig\n\n @without_property_validation\n def update(self):\n with log_errors():\n result = {\n \"time\": [time() * 1000],\n \"idle\": [len(self.scheduler.idle)],\n \"saturated\": [len(self.scheduler.saturated)],\n }\n if PROFILING:\n curdoc().add_next_tick_callback(\n lambda: self.source.stream(result, 10000)\n )\n else:\n self.source.stream(result, 10000)\n\n\nclass StealingEvents(DashboardComponent):\n def __init__(self, scheduler, **kwargs):\n self.scheduler = scheduler\n self.steal = scheduler.extensions[\"stealing\"]\n self.last = 0\n self.source = ColumnDataSource(\n {\n \"time\": [time() - 20, time()],\n \"level\": [0, 15],\n \"color\": [\"white\", \"white\"],\n \"duration\": [0, 0],\n \"radius\": [1, 1],\n \"cost_factor\": [0, 10],\n \"count\": [1, 1],\n }\n )\n\n x_range = DataRange1d(follow=\"end\", follow_interval=20000, range_padding=0)\n\n fig = figure(\n title=\"Stealing Events\",\n x_axis_type=\"datetime\",\n y_axis_type=\"log\",\n height=250,\n tools=\"\",\n x_range=x_range,\n **kwargs,\n )\n\n fig.circle(\n source=self.source,\n x=\"time\",\n y=\"cost_factor\",\n color=\"color\",\n size=\"radius\",\n alpha=0.5,\n )\n fig.yaxis.axis_label = \"Cost Multiplier\"\n\n hover = HoverTool()\n hover.tooltips = \"Level: @level, Duration: @duration, Count: @count, Cost factor: @cost_factor\"\n hover.point_policy = \"follow_mouse\"\n\n fig.add_tools(\n hover,\n ResetTool(),\n PanTool(dimensions=\"width\"),\n WheelZoomTool(dimensions=\"width\"),\n )\n\n self.root = fig\n\n def convert(self, msgs):\n \"\"\" Convert a log message to a glyph \"\"\"\n total_duration = 0\n for msg in msgs:\n time, level, key, duration, sat, occ_sat, idl, occ_idl = msg\n total_duration += duration\n\n try:\n color = Viridis11[level]\n except (KeyError, IndexError):\n color = \"black\"\n\n radius = math.sqrt(min(total_duration, 10)) * 30 + 2\n\n d = {\n \"time\": time * 1000,\n \"level\": level,\n \"count\": len(msgs),\n \"color\": color,\n \"duration\": total_duration,\n \"radius\": radius,\n \"cost_factor\": min(10, self.steal.cost_multipliers[level]),\n }\n\n return d\n\n @without_property_validation\n def update(self):\n with log_errors():\n log = self.scheduler.get_events(topic=\"stealing\")\n n = self.steal.count - self.last\n log = [log[-i][1] for i in range(1, n + 1) if isinstance(log[-i][1], list)]\n self.last = self.steal.count\n\n if log:\n new = pipe(\n log,\n map(groupby(1)),\n map(dict.values),\n concat,\n map(self.convert),\n list,\n transpose,\n )\n if PROFILING:\n curdoc().add_next_tick_callback(\n lambda: self.source.stream(new, 10000)\n )\n else:\n self.source.stream(new, 10000)\n\n\nclass Events(DashboardComponent):\n def __init__(self, scheduler, name, height=150, **kwargs):\n self.scheduler = scheduler\n self.action_ys = dict()\n self.last = 0\n self.name = name\n self.source = ColumnDataSource(\n {\"time\": [], \"action\": [], \"hover\": [], \"y\": [], \"color\": []}\n )\n\n x_range = DataRange1d(follow=\"end\", follow_interval=200000)\n\n fig = figure(\n title=name,\n x_axis_type=\"datetime\",\n height=height,\n tools=\"\",\n x_range=x_range,\n **kwargs,\n )\n\n fig.circle(\n source=self.source,\n x=\"time\",\n y=\"y\",\n color=\"color\",\n size=50,\n alpha=0.5,\n **{\"legend_field\" if BOKEH_VERSION >= \"1.4\" else \"legend\": \"action\"},\n )\n fig.yaxis.axis_label = \"Action\"\n fig.legend.location = \"top_left\"\n\n hover = HoverTool()\n hover.tooltips = \"@action<br>@hover\"\n hover.point_policy = \"follow_mouse\"\n\n fig.add_tools(\n hover,\n ResetTool(),\n PanTool(dimensions=\"width\"),\n WheelZoomTool(dimensions=\"width\"),\n )\n\n self.root = fig\n\n @without_property_validation\n def update(self):\n with log_errors():\n log = self.scheduler.events[self.name]\n n = self.scheduler.event_counts[self.name] - self.last\n if log:\n log = [log[-i] for i in range(1, n + 1)]\n self.last = self.scheduler.event_counts[self.name]\n\n if log:\n actions = []\n times = []\n hovers = []\n ys = []\n colors = []\n for msg_time, msg in log:\n times.append(msg_time * 1000)\n action = msg[\"action\"]\n actions.append(action)\n try:\n ys.append(self.action_ys[action])\n except KeyError:\n self.action_ys[action] = len(self.action_ys)\n ys.append(self.action_ys[action])\n colors.append(color_of(action))\n hovers.append(\"TODO\")\n\n new = {\n \"time\": times,\n \"action\": actions,\n \"hover\": hovers,\n \"y\": ys,\n \"color\": colors,\n }\n\n if PROFILING:\n curdoc().add_next_tick_callback(\n lambda: self.source.stream(new, 10000)\n )\n else:\n self.source.stream(new, 10000)\n\n\nclass TaskStream(DashboardComponent):\n def __init__(self, scheduler, n_rectangles=1000, clear_interval=\"20s\", **kwargs):\n self.scheduler = scheduler\n self.offset = 0\n es = [p for p in self.scheduler.plugins if isinstance(p, TaskStreamPlugin)]\n if not es:\n self.plugin = TaskStreamPlugin(self.scheduler)\n else:\n self.plugin = es[0]\n self.index = max(0, self.plugin.index - n_rectangles)\n self.workers = dict()\n self.n_rectangles = n_rectangles\n clear_interval = parse_timedelta(clear_interval, default=\"ms\")\n self.clear_interval = clear_interval\n self.last = 0\n self.last_seen = 0\n\n self.source, self.root = task_stream_figure(clear_interval, **kwargs)\n\n # Required for update callback\n self.task_stream_index = [0]\n\n @without_property_validation\n def update(self):\n if self.index == self.plugin.index:\n return\n with log_errors():\n if self.index and len(self.source.data[\"start\"]):\n start = min(self.source.data[\"start\"])\n duration = max(self.source.data[\"duration\"])\n boundary = (self.offset + start - duration) / 1000\n else:\n boundary = self.offset\n rectangles = self.plugin.rectangles(\n istart=self.index, workers=self.workers, start_boundary=boundary\n )\n n = len(rectangles[\"name\"])\n self.index = self.plugin.index\n\n if not rectangles[\"start\"]:\n return\n\n # If it has been a while since we've updated the plot\n if time() > self.last_seen + self.clear_interval:\n new_start = min(rectangles[\"start\"]) - self.offset\n old_start = min(self.source.data[\"start\"])\n old_end = max(\n map(\n operator.add,\n self.source.data[\"start\"],\n self.source.data[\"duration\"],\n )\n )\n\n density = (\n sum(self.source.data[\"duration\"])\n / len(self.workers)\n / (old_end - old_start)\n )\n\n # If whitespace is more than 3x the old width\n if (new_start - old_end) > (old_end - old_start) * 2 or density < 0.05:\n self.source.data.update({k: [] for k in rectangles}) # clear\n self.offset = min(rectangles[\"start\"]) # redefine offset\n\n rectangles[\"start\"] = [x - self.offset for x in rectangles[\"start\"]]\n self.last_seen = time()\n\n # Convert to numpy for serialization speed\n if n >= 10 and np:\n for k, v in rectangles.items():\n if isinstance(v[0], Number):\n rectangles[k] = np.array(v)\n\n if PROFILING:\n curdoc().add_next_tick_callback(\n lambda: self.source.stream(rectangles, self.n_rectangles)\n )\n else:\n self.source.stream(rectangles, self.n_rectangles)\n\n\ndef task_stream_figure(clear_interval=\"20s\", **kwargs):\n \"\"\"\n kwargs are applied to the bokeh.models.plots.Plot constructor\n \"\"\"\n clear_interval = parse_timedelta(clear_interval, default=\"ms\")\n\n source = ColumnDataSource(\n data=dict(\n start=[time() - clear_interval],\n duration=[0.1],\n key=[\"start\"],\n name=[\"start\"],\n color=[\"white\"],\n duration_text=[\"100 ms\"],\n worker=[\"foo\"],\n y=[0],\n worker_thread=[1],\n alpha=[0.0],\n )\n )\n\n x_range = DataRange1d(range_padding=0)\n y_range = DataRange1d(range_padding=0)\n\n root = figure(\n name=\"task_stream\",\n title=\"Task Stream\",\n id=\"bk-task-stream-plot\",\n x_range=x_range,\n y_range=y_range,\n toolbar_location=\"above\",\n x_axis_type=\"datetime\",\n min_border_right=35,\n tools=\"\",\n **kwargs,\n )\n\n rect = root.rect(\n source=source,\n x=\"start\",\n y=\"y\",\n width=\"duration\",\n height=0.4,\n fill_color=\"color\",\n line_color=\"color\",\n line_alpha=0.6,\n fill_alpha=\"alpha\",\n line_width=3,\n )\n rect.nonselection_glyph = None\n\n root.yaxis.major_label_text_alpha = 0\n root.yaxis.minor_tick_line_alpha = 0\n root.yaxis.major_tick_line_alpha = 0\n root.xgrid.visible = False\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 12px; font-weight: bold;\">@name:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@duration_text</span>\n </div>\n \"\"\",\n )\n\n tap = TapTool(callback=OpenURL(url=\"./profile?key=@name\"))\n\n root.add_tools(\n hover,\n tap,\n BoxZoomTool(),\n ResetTool(),\n PanTool(dimensions=\"width\"),\n WheelZoomTool(dimensions=\"width\"),\n )\n if ExportTool:\n export = ExportTool()\n export.register_plot(root)\n root.add_tools(export)\n\n return source, root\n\n\nclass TaskGraph(DashboardComponent):\n \"\"\"\n A dynamic node-link diagram for the task graph on the scheduler\n\n See also the GraphLayout diagnostic at\n distributed/diagnostics/graph_layout.py\n \"\"\"\n\n def __init__(self, scheduler, **kwargs):\n self.scheduler = scheduler\n self.layout = GraphLayout(scheduler)\n self.invisible_count = 0 # number of invisible nodes\n\n self.node_source = ColumnDataSource(\n {\"x\": [], \"y\": [], \"name\": [], \"state\": [], \"visible\": [], \"key\": []}\n )\n self.edge_source = ColumnDataSource({\"x\": [], \"y\": [], \"visible\": []})\n\n node_view = CDSView(\n source=self.node_source,\n filters=[GroupFilter(column_name=\"visible\", group=\"True\")],\n )\n edge_view = CDSView(\n source=self.edge_source,\n filters=[GroupFilter(column_name=\"visible\", group=\"True\")],\n )\n\n node_colors = factor_cmap(\n \"state\",\n factors=[\"waiting\", \"processing\", \"memory\", \"released\", \"erred\"],\n palette=[\"gray\", \"green\", \"red\", \"blue\", \"black\"],\n )\n\n self.root = figure(title=\"Task Graph\", **kwargs)\n self.subtitle = Title(text=\" \", text_font_style=\"italic\")\n self.root.add_layout(self.subtitle, \"above\")\n\n self.root.multi_line(\n xs=\"x\",\n ys=\"y\",\n source=self.edge_source,\n line_width=1,\n view=edge_view,\n color=\"black\",\n alpha=0.3,\n )\n rect = self.root.square(\n x=\"x\",\n y=\"y\",\n size=10,\n color=node_colors,\n source=self.node_source,\n view=node_view,\n **{\"legend_field\" if BOKEH_VERSION >= \"1.4\" else \"legend\": \"state\"},\n )\n self.root.xgrid.grid_line_color = None\n self.root.ygrid.grid_line_color = None\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"<b>@name</b>: @state\",\n renderers=[rect],\n )\n tap = TapTool(callback=OpenURL(url=\"info/task/@key.html\"), renderers=[rect])\n rect.nonselection_glyph = None\n self.root.add_tools(hover, tap)\n self.max_items = config.get(\"distributed.dashboard.graph-max-items\", 5000)\n\n @without_property_validation\n def update(self):\n with log_errors():\n # If there are too many tasks in the scheduler we'll disable this\n # compoonents to not overload scheduler or client. Once we drop\n # below the threshold, the data is filled up again as usual\n if len(self.scheduler.tasks) > self.max_items:\n self.subtitle.text = \"Scheduler has too many tasks to display.\"\n for container in [self.node_source, self.edge_source]:\n container.data = {col: [] for col in container.column_names}\n else:\n # occasionally reset the column data source to remove old nodes\n if self.invisible_count > len(self.node_source.data[\"x\"]) / 2:\n self.layout.reset_index()\n self.invisible_count = 0\n update = True\n else:\n update = False\n\n new, self.layout.new = self.layout.new, []\n new_edges = self.layout.new_edges\n self.layout.new_edges = []\n\n self.add_new_nodes_edges(new, new_edges, update=update)\n\n self.patch_updates()\n\n if len(self.scheduler.tasks) == 0:\n self.subtitle.text = \"Scheduler is empty.\"\n else:\n self.subtitle.text = \" \"\n\n @without_property_validation\n def add_new_nodes_edges(self, new, new_edges, update=False):\n if new or update:\n node_key = []\n node_x = []\n node_y = []\n node_state = []\n node_name = []\n edge_x = []\n edge_y = []\n\n x = self.layout.x\n y = self.layout.y\n\n tasks = self.scheduler.tasks\n for key in new:\n try:\n task = tasks[key]\n except KeyError:\n continue\n xx = x[key]\n yy = y[key]\n node_key.append(escape.url_escape(key))\n node_x.append(xx)\n node_y.append(yy)\n node_state.append(task.state)\n node_name.append(task.prefix.name)\n\n for a, b in new_edges:\n try:\n edge_x.append([x[a], x[b]])\n edge_y.append([y[a], y[b]])\n except KeyError:\n pass\n\n node = {\n \"x\": node_x,\n \"y\": node_y,\n \"state\": node_state,\n \"name\": node_name,\n \"key\": node_key,\n \"visible\": [\"True\"] * len(node_x),\n }\n edge = {\"x\": edge_x, \"y\": edge_y, \"visible\": [\"True\"] * len(edge_x)}\n\n if update or not len(self.node_source.data[\"x\"]):\n # see https://github.com/bokeh/bokeh/issues/7523\n self.node_source.data.update(node)\n self.edge_source.data.update(edge)\n else:\n self.node_source.stream(node)\n self.edge_source.stream(edge)\n\n @without_property_validation\n def patch_updates(self):\n \"\"\"\n Small updates like color changes or lost nodes from task transitions\n \"\"\"\n n = len(self.node_source.data[\"x\"])\n m = len(self.edge_source.data[\"x\"])\n\n if self.layout.state_updates:\n state_updates = self.layout.state_updates\n self.layout.state_updates = []\n updates = [(i, c) for i, c in state_updates if i < n]\n self.node_source.patch({\"state\": updates})\n\n if self.layout.visible_updates:\n updates = self.layout.visible_updates\n updates = [(i, c) for i, c in updates if i < n]\n self.layout.visible_updates = []\n self.node_source.patch({\"visible\": updates})\n self.invisible_count += len(updates)\n\n if self.layout.visible_edge_updates:\n updates = self.layout.visible_edge_updates\n updates = [(i, c) for i, c in updates if i < m]\n self.layout.visible_edge_updates = []\n self.edge_source.patch({\"visible\": updates})\n\n def __del__(self):\n self.scheduler.remove_plugin(self.layout)\n\n\nclass TaskProgress(DashboardComponent):\n \"\"\" Progress bars per task type \"\"\"\n\n def __init__(self, scheduler, **kwargs):\n self.scheduler = scheduler\n\n data = progress_quads(\n dict(all={}, memory={}, erred={}, released={}, processing={})\n )\n self.source = ColumnDataSource(data=data)\n\n x_range = DataRange1d(range_padding=0)\n y_range = Range1d(-8, 0)\n\n self.root = figure(\n id=\"bk-task-progress-plot\",\n title=\"Progress\",\n name=\"task_progress\",\n x_range=x_range,\n y_range=y_range,\n toolbar_location=None,\n tools=\"\",\n **kwargs,\n )\n self.root.line( # just to define early ranges\n x=[0, 0.9], y=[-1, 0], line_color=\"#FFFFFF\", alpha=0.0\n )\n self.root.quad(\n source=self.source,\n top=\"top\",\n bottom=\"bottom\",\n left=\"left\",\n right=\"right\",\n fill_color=\"#aaaaaa\",\n line_color=\"#aaaaaa\",\n fill_alpha=0.1,\n line_alpha=0.3,\n )\n self.root.quad(\n source=self.source,\n top=\"top\",\n bottom=\"bottom\",\n left=\"left\",\n right=\"released-loc\",\n fill_color=\"color\",\n line_color=\"color\",\n fill_alpha=0.6,\n )\n self.root.quad(\n source=self.source,\n top=\"top\",\n bottom=\"bottom\",\n left=\"released-loc\",\n right=\"memory-loc\",\n fill_color=\"color\",\n line_color=\"color\",\n fill_alpha=1.0,\n )\n self.root.quad(\n source=self.source,\n top=\"top\",\n bottom=\"bottom\",\n left=\"memory-loc\",\n right=\"erred-loc\",\n fill_color=\"black\",\n fill_alpha=0.5,\n line_alpha=0,\n )\n self.root.quad(\n source=self.source,\n top=\"top\",\n bottom=\"bottom\",\n left=\"erred-loc\",\n right=\"processing-loc\",\n fill_color=\"gray\",\n fill_alpha=0.35,\n line_alpha=0,\n )\n self.root.text(\n source=self.source,\n text=\"show-name\",\n y=\"bottom\",\n x=\"left\",\n x_offset=5,\n text_font_size=value(\"10pt\"),\n )\n self.root.text(\n source=self.source,\n text=\"done\",\n y=\"bottom\",\n x=\"right\",\n x_offset=-5,\n text_align=\"right\",\n text_font_size=value(\"10pt\"),\n )\n self.root.ygrid.visible = False\n self.root.yaxis.minor_tick_line_alpha = 0\n self.root.yaxis.visible = False\n self.root.xgrid.visible = False\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.xaxis.visible = False\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Name:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@name</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">All:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@all</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Memory:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@memory</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Erred:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@erred</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Ready:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@processing</span>\n </div>\n \"\"\",\n )\n self.root.add_tools(hover)\n\n @without_property_validation\n def update(self):\n with log_errors():\n state = {\n \"memory\": {},\n \"erred\": {},\n \"released\": {},\n \"processing\": {},\n \"waiting\": {},\n }\n\n for tp in self.scheduler.task_prefixes.values():\n active_states = tp.active_states\n if any(active_states.get(s) for s in state.keys()):\n state[\"memory\"][tp.name] = active_states[\"memory\"]\n state[\"erred\"][tp.name] = active_states[\"erred\"]\n state[\"released\"][tp.name] = active_states[\"released\"]\n state[\"processing\"][tp.name] = active_states[\"processing\"]\n state[\"waiting\"][tp.name] = active_states[\"waiting\"]\n\n state[\"all\"] = {\n k: sum(v[k] for v in state.values()) for k in state[\"memory\"]\n }\n\n if not state[\"all\"] and not len(self.source.data[\"all\"]):\n return\n\n d = progress_quads(state)\n\n update(self.source, d)\n\n totals = {\n k: sum(state[k].values())\n for k in [\"all\", \"memory\", \"erred\", \"released\", \"waiting\"]\n }\n totals[\"processing\"] = totals[\"all\"] - sum(\n v for k, v in totals.items() if k != \"all\"\n )\n\n self.root.title.text = (\n \"Progress -- total: %(all)s, \"\n \"in-memory: %(memory)s, processing: %(processing)s, \"\n \"waiting: %(waiting)s, \"\n \"erred: %(erred)s\" % totals\n )\n\n\nclass WorkerTable(DashboardComponent):\n \"\"\"Status of the current workers\n\n This is two plots, a text-based table for each host and a thin horizontal\n plot laying out hosts by their current memory use.\n \"\"\"\n\n excluded_names = {\"executing\", \"in_flight\", \"in_memory\", \"ready\", \"time\"}\n\n def __init__(self, scheduler, width=800, **kwargs):\n self.scheduler = scheduler\n self.names = [\n \"name\",\n \"address\",\n \"nthreads\",\n \"cpu\",\n \"memory\",\n \"memory_limit\",\n \"memory_percent\",\n \"num_fds\",\n \"read_bytes\",\n \"write_bytes\",\n \"cpu_fraction\",\n ]\n workers = self.scheduler.workers.values()\n self.extra_names = sorted(\n {\n m\n for ws in workers\n for m, v in ws.metrics.items()\n if m not in self.names and isinstance(v, (str, int, float))\n }\n - self.excluded_names\n )\n\n table_names = [\n \"name\",\n \"address\",\n \"nthreads\",\n \"cpu\",\n \"memory\",\n \"memory_limit\",\n \"memory_percent\",\n \"num_fds\",\n \"read_bytes\",\n \"write_bytes\",\n ]\n\n self.source = ColumnDataSource({k: [] for k in self.names})\n\n columns = {\n name: TableColumn(field=name, title=name.replace(\"_percent\", \" %\"))\n for name in table_names\n }\n\n formatters = {\n \"cpu\": NumberFormatter(format=\"0.0 %\"),\n \"memory_percent\": NumberFormatter(format=\"0.0 %\"),\n \"memory\": NumberFormatter(format=\"0 b\"),\n \"memory_limit\": NumberFormatter(format=\"0 b\"),\n \"read_bytes\": NumberFormatter(format=\"0 b\"),\n \"write_bytes\": NumberFormatter(format=\"0 b\"),\n \"num_fds\": NumberFormatter(format=\"0\"),\n \"nthreads\": NumberFormatter(format=\"0\"),\n }\n\n if BOKEH_VERSION < \"0.12.15\":\n dt_kwargs = {\"row_headers\": False}\n else:\n dt_kwargs = {\"index_position\": None}\n\n table = DataTable(\n source=self.source,\n columns=[columns[n] for n in table_names],\n reorderable=True,\n sortable=True,\n width=width,\n **dt_kwargs,\n )\n\n for name in table_names:\n if name in formatters:\n table.columns[table_names.index(name)].formatter = formatters[name]\n\n extra_names = [\"name\", \"address\"] + self.extra_names\n extra_columns = {\n name: TableColumn(field=name, title=name.replace(\"_percent\", \"%\"))\n for name in extra_names\n }\n\n extra_table = DataTable(\n source=self.source,\n columns=[extra_columns[n] for n in extra_names],\n reorderable=True,\n sortable=True,\n width=width,\n **dt_kwargs,\n )\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">Worker (@name): </span>\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@memory_percent</span>\n </div>\n \"\"\",\n )\n\n mem_plot = figure(\n title=\"Memory Use (%)\",\n toolbar_location=None,\n x_range=(0, 1),\n y_range=(-0.1, 0.1),\n height=60,\n width=width,\n tools=\"\",\n **kwargs,\n )\n mem_plot.circle(\n source=self.source, x=\"memory_percent\", y=0, size=10, fill_alpha=0.5\n )\n mem_plot.ygrid.visible = False\n mem_plot.yaxis.minor_tick_line_alpha = 0\n mem_plot.xaxis.visible = False\n mem_plot.yaxis.visible = False\n mem_plot.add_tools(hover, BoxSelectTool())\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">Worker (@name): </span>\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@cpu_fraction</span>\n </div>\n \"\"\",\n )\n\n cpu_plot = figure(\n title=\"CPU Use (%)\",\n toolbar_location=None,\n x_range=(0, 1),\n y_range=(-0.1, 0.1),\n height=60,\n width=width,\n tools=\"\",\n **kwargs,\n )\n cpu_plot.circle(\n source=self.source, x=\"cpu_fraction\", y=0, size=10, fill_alpha=0.5\n )\n cpu_plot.ygrid.visible = False\n cpu_plot.yaxis.minor_tick_line_alpha = 0\n cpu_plot.xaxis.visible = False\n cpu_plot.yaxis.visible = False\n cpu_plot.add_tools(hover, BoxSelectTool())\n self.cpu_plot = cpu_plot\n\n if \"sizing_mode\" in kwargs:\n sizing_mode = {\"sizing_mode\": kwargs[\"sizing_mode\"]}\n else:\n sizing_mode = {}\n\n components = [cpu_plot, mem_plot, table]\n if self.extra_names:\n components.append(extra_table)\n\n self.root = column(*components, id=\"bk-worker-table\", **sizing_mode)\n\n @without_property_validation\n def update(self):\n data = {name: [] for name in self.names + self.extra_names}\n for i, (addr, ws) in enumerate(\n sorted(self.scheduler.workers.items(), key=lambda kv: str(kv[1].name))\n ):\n for name in self.names + self.extra_names:\n data[name].append(ws.metrics.get(name, None))\n data[\"name\"][-1] = ws.name if ws.name is not None else i\n data[\"address\"][-1] = ws.address\n if ws.memory_limit:\n data[\"memory_percent\"][-1] = ws.metrics[\"memory\"] / ws.memory_limit\n else:\n data[\"memory_percent\"][-1] = \"\"\n data[\"memory_limit\"][-1] = ws.memory_limit\n data[\"cpu\"][-1] = ws.metrics[\"cpu\"] / 100.0\n data[\"cpu_fraction\"][-1] = ws.metrics[\"cpu\"] / 100.0 / ws.nthreads\n data[\"nthreads\"][-1] = ws.nthreads\n\n for name in self.names + self.extra_names:\n if name == \"name\":\n data[name].insert(\n 0, \"Total ({nworkers})\".format(nworkers=len(data[name]))\n )\n continue\n try:\n if len(self.scheduler.workers) == 0:\n total_data = None\n elif name == \"memory_percent\":\n total_mem = sum(\n ws.memory_limit for ws in self.scheduler.workers.values()\n )\n total_data = (\n (\n sum(\n ws.metrics[\"memory\"]\n for ws in self.scheduler.workers.values()\n )\n / total_mem\n )\n if total_mem\n else \"\"\n )\n elif name == \"cpu\":\n total_data = (\n sum(ws.metrics[\"cpu\"] for ws in self.scheduler.workers.values())\n / 100\n / len(self.scheduler.workers.values())\n )\n elif name == \"cpu_fraction\":\n total_data = (\n sum(ws.metrics[\"cpu\"] for ws in self.scheduler.workers.values())\n / 100\n / sum(ws.nthreads for ws in self.scheduler.workers.values())\n )\n else:\n total_data = sum(data[name])\n\n data[name].insert(0, total_data)\n except TypeError:\n data[name].insert(0, None)\n\n self.source.data.update(data)\n\n\ndef systemmonitor_doc(scheduler, extra, doc):\n with log_errors():\n sysmon = SystemMonitor(scheduler, sizing_mode=\"stretch_both\")\n doc.title = \"Dask: Scheduler System Monitor\"\n add_periodic_callback(doc, sysmon, 500)\n\n doc.add_root(sysmon.root)\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef stealing_doc(scheduler, extra, doc):\n with log_errors():\n occupancy = Occupancy(scheduler, height=200, sizing_mode=\"scale_width\")\n stealing_ts = StealingTimeSeries(scheduler, sizing_mode=\"scale_width\")\n stealing_events = StealingEvents(scheduler, sizing_mode=\"scale_width\")\n stealing_events.root.x_range = stealing_ts.root.x_range\n doc.title = \"Dask: Work Stealing\"\n add_periodic_callback(doc, occupancy, 500)\n add_periodic_callback(doc, stealing_ts, 500)\n add_periodic_callback(doc, stealing_events, 500)\n\n doc.add_root(\n column(\n occupancy.root,\n stealing_ts.root,\n stealing_events.root,\n sizing_mode=\"scale_width\",\n )\n )\n\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef events_doc(scheduler, extra, doc):\n with log_errors():\n events = Events(scheduler, \"all\", height=250)\n events.update()\n add_periodic_callback(doc, events, 500)\n doc.title = \"Dask: Scheduler Events\"\n doc.add_root(column(events.root, sizing_mode=\"scale_width\"))\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef workers_doc(scheduler, extra, doc):\n with log_errors():\n table = WorkerTable(scheduler)\n table.update()\n add_periodic_callback(doc, table, 500)\n doc.title = \"Dask: Workers\"\n doc.add_root(table.root)\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef tasks_doc(scheduler, extra, doc):\n with log_errors():\n ts = TaskStream(\n scheduler,\n n_rectangles=dask.config.get(\n \"distributed.scheduler.dashboard.tasks.task-stream-length\"\n ),\n clear_interval=\"60s\",\n sizing_mode=\"stretch_both\",\n )\n ts.update()\n add_periodic_callback(doc, ts, 5000)\n doc.title = \"Dask: Task Stream\"\n doc.add_root(ts.root)\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef graph_doc(scheduler, extra, doc):\n with log_errors():\n graph = TaskGraph(scheduler, sizing_mode=\"stretch_both\")\n doc.title = \"Dask: Task Graph\"\n graph.update()\n add_periodic_callback(doc, graph, 200)\n doc.add_root(graph.root)\n\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef status_doc(scheduler, extra, doc):\n with log_errors():\n task_stream = TaskStream(\n scheduler,\n n_rectangles=dask.config.get(\n \"distributed.scheduler.dashboard.status.task-stream-length\"\n ),\n clear_interval=\"5s\",\n sizing_mode=\"stretch_both\",\n )\n task_stream.update()\n add_periodic_callback(doc, task_stream, 100)\n\n task_progress = TaskProgress(scheduler, sizing_mode=\"stretch_both\")\n task_progress.update()\n add_periodic_callback(doc, task_progress, 100)\n\n if len(scheduler.workers) < 50:\n current_load = CurrentLoad(scheduler, sizing_mode=\"stretch_both\")\n current_load.update()\n add_periodic_callback(doc, current_load, 100)\n doc.add_root(current_load.nbytes_figure)\n doc.add_root(current_load.processing_figure)\n else:\n nbytes_hist = NBytesHistogram(scheduler, sizing_mode=\"stretch_both\")\n nbytes_hist.update()\n processing_hist = ProcessingHistogram(scheduler, sizing_mode=\"stretch_both\")\n processing_hist.update()\n add_periodic_callback(doc, nbytes_hist, 100)\n add_periodic_callback(doc, processing_hist, 100)\n current_load_fig = row(\n nbytes_hist.root, processing_hist.root, sizing_mode=\"stretch_both\"\n )\n\n doc.add_root(nbytes_hist.root)\n doc.add_root(processing_hist.root)\n\n doc.title = \"Dask: Status\"\n doc.add_root(task_progress.root)\n doc.add_root(task_stream.root)\n doc.theme = BOKEH_THEME\n doc.template = env.get_template(\"status.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef individual_task_stream_doc(scheduler, extra, doc):\n task_stream = TaskStream(\n scheduler, n_rectangles=1000, clear_interval=\"10s\", sizing_mode=\"stretch_both\"\n )\n task_stream.update()\n add_periodic_callback(doc, task_stream, 100)\n doc.add_root(task_stream.root)\n doc.theme = BOKEH_THEME\n\n\ndef individual_nbytes_doc(scheduler, extra, doc):\n current_load = CurrentLoad(scheduler, sizing_mode=\"stretch_both\")\n current_load.update()\n add_periodic_callback(doc, current_load, 100)\n doc.add_root(current_load.nbytes_figure)\n doc.theme = BOKEH_THEME\n\n\ndef individual_cpu_doc(scheduler, extra, doc):\n current_load = CurrentLoad(scheduler, sizing_mode=\"stretch_both\")\n current_load.update()\n add_periodic_callback(doc, current_load, 100)\n doc.add_root(current_load.cpu_figure)\n doc.theme = BOKEH_THEME\n\n\ndef individual_nprocessing_doc(scheduler, extra, doc):\n current_load = CurrentLoad(scheduler, sizing_mode=\"stretch_both\")\n current_load.update()\n add_periodic_callback(doc, current_load, 100)\n doc.add_root(current_load.processing_figure)\n doc.theme = BOKEH_THEME\n\n\ndef individual_progress_doc(scheduler, extra, doc):\n task_progress = TaskProgress(scheduler, height=160, sizing_mode=\"stretch_both\")\n task_progress.update()\n add_periodic_callback(doc, task_progress, 100)\n doc.add_root(task_progress.root)\n doc.theme = BOKEH_THEME\n\n\ndef individual_graph_doc(scheduler, extra, doc):\n with log_errors():\n graph = TaskGraph(scheduler, sizing_mode=\"stretch_both\")\n graph.update()\n\n add_periodic_callback(doc, graph, 200)\n doc.add_root(graph.root)\n doc.theme = BOKEH_THEME\n\n\ndef individual_systemmonitor_doc(scheduler, extra, doc):\n with log_errors():\n sysmon = SystemMonitor(scheduler, sizing_mode=\"stretch_both\")\n doc.title = \"Dask: Scheduler System Monitor\"\n add_periodic_callback(doc, sysmon, 500)\n\n doc.add_root(sysmon.root)\n doc.theme = BOKEH_THEME\n\n\ndef individual_profile_doc(scheduler, extra, doc):\n with log_errors():\n prof = ProfileTimePlot(scheduler, sizing_mode=\"scale_width\", doc=doc)\n doc.add_root(prof.root)\n prof.trigger_update()\n doc.theme = BOKEH_THEME\n\n\ndef individual_profile_server_doc(scheduler, extra, doc):\n with log_errors():\n prof = ProfileServer(scheduler, sizing_mode=\"scale_width\", doc=doc)\n doc.add_root(prof.root)\n prof.trigger_update()\n doc.theme = BOKEH_THEME\n\n\ndef individual_workers_doc(scheduler, extra, doc):\n with log_errors():\n table = WorkerTable(scheduler)\n table.update()\n add_periodic_callback(doc, table, 500)\n doc.add_root(table.root)\n doc.theme = BOKEH_THEME\n\n\ndef individual_bandwidth_types_doc(scheduler, extra, doc):\n with log_errors():\n bw = BandwidthTypes(scheduler, sizing_mode=\"stretch_both\")\n bw.update()\n add_periodic_callback(doc, bw, 500)\n doc.add_root(bw.fig)\n doc.theme = BOKEH_THEME\n\n\ndef individual_bandwidth_workers_doc(scheduler, extra, doc):\n with log_errors():\n bw = BandwidthWorkers(scheduler, sizing_mode=\"stretch_both\")\n bw.update()\n add_periodic_callback(doc, bw, 500)\n doc.add_root(bw.fig)\n doc.theme = BOKEH_THEME\n\n\ndef individual_memory_by_key_doc(scheduler, extra, doc):\n with log_errors():\n component = MemoryByKey(scheduler, sizing_mode=\"stretch_both\")\n component.update()\n add_periodic_callback(doc, component, 500)\n doc.add_root(component.fig)\n doc.theme = BOKEH_THEME\n\n\ndef individual_compute_time_per_key_doc(scheduler, extra, doc):\n with log_errors():\n component = ComputePerKey(scheduler, sizing_mode=\"stretch_both\")\n component.update()\n add_periodic_callback(doc, component, 500)\n doc.add_root(component.tabs)\n doc.theme = BOKEH_THEME\n\n\ndef individual_aggregate_time_per_action_doc(scheduler, extra, doc):\n with log_errors():\n component = AggregateAction(scheduler, sizing_mode=\"stretch_both\")\n component.update()\n add_periodic_callback(doc, component, 500)\n doc.add_root(component.fig)\n doc.theme = BOKEH_THEME\n\n\ndef profile_doc(scheduler, extra, doc):\n with log_errors():\n doc.title = \"Dask: Profile\"\n prof = ProfileTimePlot(scheduler, sizing_mode=\"stretch_both\", doc=doc)\n doc.add_root(prof.root)\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n prof.trigger_update()\n\n\ndef profile_server_doc(scheduler, extra, doc):\n with log_errors():\n doc.title = \"Dask: Profile of Event Loop\"\n prof = ProfileServer(scheduler, sizing_mode=\"stretch_both\", doc=doc)\n doc.add_root(prof.root)\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n prof.trigger_update()\n" ]
[ [ "numpy.array", "numpy.histogram" ] ]
karthikbhamidipati/image-classification-deeper-networks
[ "d6fececa08092a0b8af6fd01fe89485958e61c01", "d6fececa08092a0b8af6fd01fe89485958e61c01" ]
[ "tests/test_metrics.py", "model/predict.py" ]
[ "import unittest\n\nimport torch\n\nfrom model.metrics import Metrics\n\n\nclass TestMetrics(unittest.TestCase):\n def test_metrics_initialization(self):\n metrics = Metrics(2)\n self.assertEqual(metrics.asdict(), {'loss': 0.0, 'accuracy': 0.0,\n 'precision': 0.0, 'recall': 0.0,\n 'fscore': 0.0, 'top_k_accuracy': 0.0},\n \"Initial metrics mismatch\")\n\n def test_metrics_update(self):\n metrics = Metrics(2)\n metrics.update(torch.tensor(2.999),\n torch.Tensor([[0.5, 0.2, 0.2],\n [0.3, 0.4, 0.2],\n [0.2, 0.4, 0.3],\n [0.7, 0.2, 0.1]]),\n torch.Tensor([0, 1, 2, 0]))\n self.assertEqual(metrics.asdict(), {'loss': 3.0, 'accuracy': 75.0,\n 'precision': 83.33, 'recall': 66.67,\n 'fscore': 55.56, 'top_k_accuracy': 100.0},\n \"Metrics mismatch first update\")\n\n metrics.update(torch.tensor(2.0),\n torch.Tensor([[0.5, 0.2, 0.2],\n [0.3, 0.4, 0.2],\n [0.2, 0.4, 0.3],\n [0.7, 0.2, 0.1]]),\n torch.Tensor([0, 1, 0, 2]))\n self.assertEqual(metrics.asdict(), {'loss': 2.5, 'accuracy': 62.5,\n 'precision': 75.0, 'recall': 58.34,\n 'fscore': 47.22, 'top_k_accuracy': 75.0},\n \"Metrics mismatch second update\")\n\n\nif __name__ == '__main__':\n unittest.main()\n", "import logging\n\nimport torch\nimport wandb\nfrom torchvision.models import GoogLeNetOutputs\n\nfrom model import run_device\nfrom model.metrics import Metrics\n\n\ndef predict(model, data_loader, criterion):\n metrics = Metrics()\n\n model.eval()\n with torch.no_grad():\n for data, labels in data_loader:\n data, label = data.to(run_device), labels.to(run_device)\n output = model(data)\n if isinstance(output, GoogLeNetOutputs):\n output = output[0]\n loss = criterion(output, label)\n metrics.update(loss, output, label)\n\n return metrics.asdict()\n\n\ndef log_pred_metrics(metrics):\n logging.info(\"Test stats: {}\".format(metrics))\n wandb.log({'test': metrics})\n" ]
[ [ "torch.Tensor", "torch.tensor" ], [ "torch.no_grad" ] ]
xuyuluo/spreco
[ "e9c720fb0d8a9c59a0e83696d2b7efcdc90b2cc3" ]
[ "spreco/model/refine_net.py" ]
[ "from spreco.model import nn, utils\n\nimport tensorflow.compat.v1 as tf\ntf.disable_eager_execution()\nfrom tf_slim import add_arg_scope\nfrom tf_slim import arg_scope\n\n@add_arg_scope\ndef cond_crp_block(x, h, nr_filters, nr_stages, nonlinearity, normalizer, **kwargs):\n \"\"\"\n chained residual pool block\n \"\"\"\n x = nonlinearity(x)\n path = x\n if nr_filters is None:\n nr_filters = nn.int_shape(x)[-1]\n for _ in range(nr_stages):\n path = normalizer(path, h)\n path = tf.nn.avg_pool2d(path, ksize=[5,5], strides=1, padding=\"SAME\") #avg_pool2d\n path = nn.conv2d_plus(path, nr_filters, nonlinearity=None, bias=False, scope='cond_crp') # don't need bias \n x = path + x\n return x\n\n@add_arg_scope\ndef cond_rcu_block(x, h, nr_filters, nr_resnet, nr_stages, nonlinearity, normalizer, **kwargs):\n \"\"\"\n residual convolution unit\n \"\"\"\n if nr_filters is None:\n nr_filters = nn.int_shape(x)[-1]\n for _ in range(nr_resnet):\n residual = x\n for _ in range(nr_stages):\n x = normalizer(x, h)\n x = nonlinearity(x)\n x = nn.conv2d_plus(x, nr_filters, nonlinearity=None, bias=False, scope='cond_rcu') # don't need bias\n x += residual\n return x\n\n@add_arg_scope\ndef cond_msf_block(blocks, h, nr_filters, out_shape, normalizer, **kwargs):\n \"\"\"\n multi-resolution fusion\n blocks -> a list or tuple of blocks passed to msf \n out_shape -> [batch_size, dim_0, dim_1, 2]\n \"\"\"\n sums = []\n \n for i in range(len(blocks)):\n xl_out = normalizer(blocks[i], h)\n if nr_filters is None:\n nr_filters = nn.int_shape(blocks[i])[-1]\n xl_out = nn.conv2d_plus(xl_out, nr_filters, nonlinearity=None, scope='cond_msf')\n xl_out = tf.image.resize(xl_out, out_shape, method='bilinear')\n sums.append(xl_out)\n return tf.reduce_sum(sums, axis=0)\n\n\n@add_arg_scope\ndef cond_refine_block(blocks, h, nr_filters, out_shape, nonlinearity, normalizer, end=False, **kwargs):\n \"\"\"\n refine block\n \"\"\"\n outs = []\n\n for i in range(len(blocks)):\n outs.append(cond_rcu_block(blocks[i], h, nr_filters=None, nr_resnet=2, nr_stages=2, nonlinearity=nonlinearity, normalizer=normalizer))\n \n if len(blocks) > 1:\n y = cond_msf_block(outs, h, nr_filters=nr_filters, out_shape=out_shape, normalizer=normalizer)\n else:\n y = outs[0]\n \n y = cond_crp_block(y, h, nr_filters=None, nr_stages=2, nonlinearity=nonlinearity, normalizer=normalizer)\n y = cond_rcu_block(y, h, nr_filters, nr_resnet=3 if end else 1, nr_stages=2, nonlinearity=nonlinearity, normalizer=normalizer)\n \n return y\n\n@add_arg_scope\ndef cond_res_block(x, h, out_filters, nonlinearity, normalizer, rescale=False, **kwargs):\n \"\"\"\n resnet block\n out_filters is output_dims/feature\n \"\"\"\n in_filters = nn.int_shape(x)[-1]\n x_skip = x\n x = normalizer(x, h) \n x = nonlinearity(x)\n if rescale:\n x = nn.conv2d_plus(x, in_filters, nonlinearity=None, scope='cond_res', **kwargs)\n else:\n x = nn.conv2d_plus(x, out_filters, nonlinearity=None, scope='cond_res', **kwargs)\n\n x = normalizer(x, h)\n x = nonlinearity(x)\n\n x = nn.conv2d_plus(x, out_filters, nonlinearity=None, scope='cond_res', **kwargs)\n if 'dilation' not in kwargs.keys() and rescale:\n x = tf.nn.avg_pool2d(x, ksize=(1,2,2,1), strides=(1,2,2,1), padding='SAME')\n \n if out_filters == in_filters and not rescale:\n shortcut = x_skip\n else:\n if 'dilation' not in kwargs.keys():\n shortcut = nn.conv2d_plus(x_skip, out_filters, filter_size=[1,1])\n shortcut = tf.nn.avg_pool2d(shortcut, ksize=(1,2,2,1), strides=(1,2,2,1), padding='SAME')\n else:\n shortcut = nn.conv2d_plus(x_skip, out_filters)\n\n return shortcut + x\n\nclass cond_refine_net():\n\n def __init__(self, config, chns=2, normalizer=nn.cond_instance_norm_plus):\n self.chns = chns\n self.nr_filters = config['nr_filters']\n self.nr_classes = config['nr_levels']\n self.nonlinearity = utils.get_nonlinearity(config['nonlinearity'])\n self.normalizer = normalizer\n self.counters = {}\n self.affine_x = config['affine_x']\n self.forward = tf.make_template('forward', self.body)\n\n def body(self, x, h):\n \"\"\"\n multi level refine net conditional on y\n \"\"\"\n if self.affine_x:\n x = 2*x - 1\n\n with arg_scope([nn.conv2d_plus, cond_refine_block, cond_crp_block, cond_rcu_block, cond_msf_block, cond_res_block, nn.cond_instance_norm_plus],\n nonlinearity=self.nonlinearity, counters=self.counters, normalizer=self.normalizer, nr_classes=self.nr_classes):\n\n x_level_0 = nn.conv2d_plus(x, num_filters=1*self.nr_filters, nonlinearity=None)\n x_level_1_0 = cond_res_block(x_level_0, h, out_filters=1*self.nr_filters, rescale=False)\n x_level_1_1 = cond_res_block(x_level_1_0, h, out_filters=1*self.nr_filters, rescale=False)\n x_level_2_0 = cond_res_block(x_level_1_1, h, out_filters=2*self.nr_filters, rescale=True)\n x_level_2_1 = cond_res_block(x_level_2_0, h, out_filters=2*self.nr_filters, rescale=False)\n x_level_3_0 = cond_res_block(x_level_2_1, h, out_filters=2*self.nr_filters, rescale=True, dilation=2)\n x_level_3_1 = cond_res_block(x_level_3_0, h, out_filters=2*self.nr_filters, rescale=False, dilation=2)\n x_level_4_0 = cond_res_block(x_level_3_1, h, out_filters=2*self.nr_filters, rescale=True, dilation=4)\n x_level_4_1 = cond_res_block(x_level_4_0, h, out_filters=2*self.nr_filters, rescale=False, dilation=4)\n\n refine_0 = cond_refine_block([x_level_4_1], h, nr_filters=2*self.nr_filters, out_shape=nn.int_shape(x_level_4_1)[1:3])\n refine_1 = cond_refine_block([x_level_3_1, refine_0], h, nr_filters=2*self.nr_filters, out_shape=nn.int_shape(x_level_3_1)[1:3])\n refine_2 = cond_refine_block([x_level_2_1, refine_1], h, nr_filters=1*self.nr_filters, out_shape=nn.int_shape(x_level_2_1)[1:3])\n refine_3 = cond_refine_block([x_level_1_1, refine_2], h, nr_filters=1*self.nr_filters, out_shape=nn.int_shape(x_level_1_1)[1:3], end=True)\n \n\n out = self.normalizer(refine_3, h)\n out = self.nonlinearity(out)\n out = nn.conv2d_plus(out, num_filters=self.chns, nonlinearity=None)\n \n self.counters = {} # reset counters\n\n return out" ]
[ [ "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.make_template", "tensorflow.compat.v1.disable_eager_execution", "tensorflow.compat.v1.nn.avg_pool2d", "tensorflow.compat.v1.image.resize" ] ]
mbarylsk/puzzles
[ "e320a880062b6b6a6670bcd4379611f4feb43c21" ]
[ "architect/architect.py" ]
[ "#\n# Copyright 2019, Marcin Barylski\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software \n# and associated documentation files (the \"Software\"), to deal in the Software without restriction, \n# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, \n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, \n# subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, \n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. \n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE \n# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\nimport numpy\nimport dataprocessing\n\nGAS_CANDIDATE = 1\nGAS_HOUSE_NORTH = 2\nGAS_HOUSE_EAST = 3\nGAS_HOUSE_SOUTH = 4\nGAS_HOUSE_WEST = 5\n\nclass Architect:\n\n def __init__(self, dp, me, mg, mh, wg, h, w, verbosity):\n self.dp = dp\n self.matrix_excluded = me\n self.matrix_gas = mg\n self.matrix_house = mh\n self.wages = wg\n self.h = h\n self.w = w\n self.verbose = verbosity\n self.score = 0\n self.matrix_excluded_temp = me\n self.matrix_gas_temp = mg\n self.matrix_house_temp = mh\n\n def print (self, print_temp):\n print (\"\\n\")\n border = \"+---\"\n for i in range(self.w):\n border += \"---\"\n border += \"+\"\n print (border)\n if print_temp:\n print (\"Best score so far...\\n\")\n\n line2 = \" \"\n for j in range(self.w):\n line2 += str(self.wages[1][j]) + \" \"\n print (line2)\n \n for i in range(self.h):\n line = \" \"\n line += str(self.wages[0][i]) + \" \"\n for j in range(self.w):\n if self.is_house (i, j, print_temp) and self.is_house_with_gas (i, j, print_temp):\n line+= \" H \"\n elif self.is_house (i, j, print_temp) and self.is_house_without_gas (i, j, print_temp):\n line+= \" h \"\n elif self.is_gas_any (i, j, print_temp):\n line+= \" o \"\n elif self.is_excluded (i, j, print_temp):\n line+= \" . \"\n if not self.is_excluded (i, j, print_temp) and not self.is_gas_any (i, j, print_temp) and not self.is_house (i, j, print_temp):\n line+= \" _ \"\n print (line)\n print (border)\n\n print (\"\\nLegend:\")\n print (\" H - house with gas\")\n print (\" h - house without gas\")\n print (\" o - gas\")\n print (\" . - field excluded from analysis\")\n print (\" _ - unknown\")\n\n def is_gas (self, x, y, value, use_temp):\n if use_temp:\n if self.dp.is_x_correct (self.matrix_gas_temp, x) and self.dp.is_x_correct (self.matrix_gas_temp, y) and self.matrix_gas_temp [x][y] == 1:\n return True\n return False\n else:\n if self.dp.is_x_correct (self.matrix_gas, x) and self.dp.is_x_correct (self.matrix_gas, y) and self.matrix_gas [x][y] == value:\n return True\n return False\n\n def is_gas_any (self, x, y, use_temp):\n if use_temp:\n if self.dp.is_x_correct (self.matrix_gas_temp, x) and self.dp.is_x_correct (self.matrix_gas_temp, y) and self.matrix_gas_temp [x][y] > 0:\n return True\n return False\n else:\n if self.dp.is_x_correct (self.matrix_gas, x) and self.dp.is_x_correct (self.matrix_gas, y) and self.matrix_gas [x][y] > 0:\n return True\n return False\n\n def is_gas_candidate (self, x, y):\n if self.dp.is_x_correct (self.matrix_gas, x) and self.dp.is_x_correct (self.matrix_gas, y) and self.matrix_gas [x][y] == GAS_CANDIDATE:\n return True\n return False\n\n def is_house (self, x, y, use_temp):\n if use_temp:\n if self.dp.is_x_correct (self.matrix_house_temp, x) and self.dp.is_x_correct (self.matrix_house_temp, y) and self.matrix_house_temp [x][y] > 0:\n return True\n return False\n else:\n if self.dp.is_x_correct (self.matrix_house, x) and self.dp.is_x_correct (self.matrix_house, y) and self.matrix_house [x][y] > 0:\n return True\n return False\n\n def is_house_without_gas (self, x, y, use_temp):\n if not self.is_house (x, y, False):\n return False\n if self.is_gas(x-1, y, GAS_HOUSE_SOUTH, False):\n return False\n if self.is_gas(x+1, y, GAS_HOUSE_NORTH, False):\n return False\n if self.is_gas(x, y-1, GAS_HOUSE_EAST, False):\n return False\n if self.is_gas(x, y+1, GAS_HOUSE_WEST, False):\n return False\n return True\n\n def is_empty (self, x, y, use_temp):\n if not self.is_gas_any (x, y, use_temp) and not self.is_house (x, y, use_temp) and not self.is_excluded(x, y, use_temp):\n return True\n else:\n return False\n\n def is_house_with_gas (self, x, y, use_temp):\n if self.is_house (x, y, False):\n if self.is_gas(x-1, y, GAS_HOUSE_SOUTH, use_temp):\n return True\n if self.is_gas(x+1, y, GAS_HOUSE_NORTH, use_temp):\n return True\n if self.is_gas(x, y-1, GAS_HOUSE_EAST, use_temp):\n return True\n if self.is_gas(x, y+1, GAS_HOUSE_WEST, use_temp):\n return True\n return False\n\n def is_excluded (self, x, y, use_temp):\n if use_temp:\n if self.dp.is_x_correct (self.matrix_excluded_temp, x) and self.dp.is_x_correct (self.matrix_excluded_temp, y) and self.matrix_excluded_temp [x][y] > 0:\n return True\n return False\n else:\n if self.dp.is_x_correct (self.matrix_excluded, x) and self.dp.is_x_correct (self.matrix_excluded, y) and self.matrix_excluded [x][y] == 1:\n return True\n return False\n\n def set_excluded (self, x, y):\n if self.dp.is_x_correct(self.matrix_excluded, x) and self.dp.is_y_correct(self.matrix_excluded, y):\n self.matrix_excluded [x][y] = 1\n\n def set_gas_candidate (self, x, y):\n if self.dp.is_x_correct(self.matrix_gas, x) and self.dp.is_y_correct(self.matrix_gas, y):\n self.matrix_gas [x][y] = GAS_CANDIDATE\n\n def set_gas_with_house (self, x, y, value):\n if self.dp.is_x_correct(self.matrix_gas, x) and self.dp.is_y_correct(self.matrix_gas, y):\n self.matrix_gas [x][y] = value\n\n def unset_gas (self, x, y):\n if self.dp.is_x_correct(self.matrix_gas, x) and self.dp.is_y_correct(self.matrix_gas, y):\n self.matrix_gas [x][y] = 0\n\n def set_house_with_gas (self, x, y, value):\n if self.dp.is_x_correct(self.matrix_house, x) and self.dp.is_y_correct(self.matrix_house, y):\n self.matrix_house [x][y] = value\n\n def cleanup_of_gas_candidates (self):\n for i in range(self.h):\n for j in range(self.w):\n if self.is_gas_candidate (i, j):\n self.unset_gas (i, j)\n\n def get_all_not_excluded (self):\n results = []\n for i in range(self.h):\n for j in range(self.w):\n if not self.is_excluded (i, j, False):\n results.append ((i,j))\n return results\n\n def is_solved (self):\n\n # check if wages match number of gas containers - horizontally\n for i in range(self.h):\n temp_sum = 0\n for j in range(self.w):\n if self.is_gas_any (i, j, False):\n temp_sum += 1\n if temp_sum != self.wages[0][i]:\n return False\n\n # check if wages match number of gas containers - vertically\n for j in range(self.h):\n temp_sum = 0\n for i in range(self.w):\n if self.is_gas_any (i, j, False):\n temp_sum += 1\n if temp_sum != self.wages[1][j]:\n return False\n\n # check if houses and gas containers do not overlap\n temp_sum_gas = 0\n temp_sum_houses = 0\n for i in range(self.h):\n for j in range(self.w):\n if self.is_gas_any (i, j, False) and self.is_house (i, j, False):\n return False\n if self.is_gas_any (i, j, False):\n temp_sum_gas += 1\n if self.is_house (i, j, False):\n temp_sum_houses += 1\n\n # number of houses must match number of gas containers\n if temp_sum_gas != temp_sum_houses:\n return False\n\n # check if gas container are not direct neighbours\n for i in range(self.h):\n for j in range(self.w):\n if self.is_gas_any (i, j, False):\n temp_target_3 = numpy.zeros ((3, 3))\n temp_target_3[1][1] = 1\n temp_current_3 = self.dp.change_matrix_nonzero_to_value (self.dp.get_submatrix_3 (self.matrix_gas, i, j), 1)\n if not self.dp.are_matrices_equal (temp_current_3, temp_target_3):\n return False\n\n # TBD - number of houses with gas matches number of gas containers\n \n return True\n\n def get_number_of_houses (self, with_gas, use_temp):\n output_sum = 0\n for i in range(self.h):\n for j in range(self.w):\n if use_temp:\n #print (self.matrix_house_temp)\n if with_gas:\n if self.matrix_house_temp [i][j] == 2:\n output_sum += 1\n else:\n if self.matrix_house_temp [i][j] == 1:\n output_sum += 1\n else:\n if with_gas:\n if self.matrix_house [i][j] == 2:\n output_sum += 1\n else:\n if self.matrix_house [i][j] == 1:\n output_sum += 1\n return output_sum\n\n def get_number_of_gases (self, use_temp):\n output_sum = 0\n for i in range(self.h):\n for j in range(self.w):\n if use_temp:\n if self.matrix_gas_temp [i][j] > GAS_CANDIDATE:\n output_sum += 1\n else:\n if self.matrix_gas [i][j] > GAS_CANDIDATE:\n output_sum += 1\n \n return output_sum\n\n # Returns True if method was able to update any location with gas\n # Otherwise returns False\n def update_gas_in_all_certain_places (self, use_temp):\n\n # Approach #1 - locate free places (by checking all fields one-by-one)\n # that has no other options - gas must be here\n for i in range(self.h):\n for j in range(self.w):\n c = 0\n x = 0\n y = 0\n value = 0\n if self.is_house (i, j, use_temp):\n if self.dp.is_x_correct (self.matrix_gas, i-1) and self.is_empty (i-1, j, use_temp):\n c += 1\n x = i-1\n y = j\n value = GAS_HOUSE_EAST\n if self.dp.is_x_correct (self.matrix_gas, i+1) and self.is_empty (i+1, j, use_temp):\n c += 1\n x = i+1\n y = j\n value = GAS_HOUSE_WEST\n if self.dp.is_y_correct (self.matrix_gas, j-1) and self.is_empty (i, j-1, use_temp):\n c += 1\n x = i\n y = j-1\n value = GAS_HOUSE_NORTH\n if self.dp.is_y_correct (self.matrix_gas, j+1) and self.is_empty (i, j+1, use_temp):\n c += 1\n x = i\n y = j+1\n value = GAS_HOUSE_SOUTH\n if c == 1 and not use_temp:\n self.set_gas_with_house (x, y, value)\n return True\n\n # Approach #2 - locate free places in lines (by checking both vertical and horizontal lines)\n # If there is one free place left and there is still one more gas to place, gas must in this location\n\n # horizontally\n for i in range(self.h):\n temp_sum_gas = 0\n temp_sum_not_excluded = 0\n list_xy = []\n for j in range(self.w):\n if self.is_gas_any (i, j, False):\n temp_sum_gas += 1\n if not self.is_excluded (i, j, False):\n temp_sum_not_excluded += 1\n list_xy.append((i, j))\n\n if self.wages[0][i] - temp_sum_gas == temp_sum_not_excluded and temp_sum_not_excluded > 0:\n for (x, y) in list_xy:\n if self.dp.is_x_correct (self.matrix_house, x-1) and self.is_house_without_gas (x-1, y, False):\n value = GAS_HOUSE_WEST\n elif self.dp.is_x_correct (self.matrix_house, x+1) and self.is_house_without_gas (x-1, y, False):\n value = GAS_HOUSE_EAST\n elif self.dp.is_y_correct (self.matrix_house, y-1) and self.is_house_without_gas (x-1, y, False):\n value = GAS_HOUSE_SOUTH\n elif self.dp.is_y_correct (self.matrix_house, y+1) and self.is_house_without_gas (x-1, y, False):\n value = GAS_HOUSE_NORTH\n self.set_gas_with_house (x, y, value)\n return True\n\n # vertically\n for j in range(self.h):\n temp_sum_gas = 0\n temp_sum_not_excluded = 0\n list_xy = []\n for i in range(self.w):\n if self.is_gas_any (i, j, False):\n temp_sum_gas += 1\n if not self.is_excluded (i, j, False):\n temp_sum_not_excluded += 1\n list_xy.append((i, j))\n\n if self.wages[0][i] - temp_sum_gas == temp_sum_not_excluded == 1 and temp_sum_not_excluded > 0:\n for (x, y) in list_xy:\n if self.dp.is_x_correct (self.matrix_house, x-1) and self.is_house_without_gas (x-1, y, False):\n value = GAS_HOUSE_WEST\n elif self.dp.is_x_correct (self.matrix_house, x+1) and self.is_house_without_gas (x-1, y, False):\n value = GAS_HOUSE_EAST\n elif self.dp.is_y_correct (self.matrix_house, y-1) and self.is_house_without_gas (x-1, y, False):\n value = GAS_HOUSE_SOUTH\n elif self.dp.is_y_correct (self.matrix_house, y+1) and self.is_house_without_gas (x-1, y, False):\n value = GAS_HOUSE_NORTH\n self.set_gas_with_house (x, y, value)\n return True\n\n return False\n \n def update_excluded (self):\n\n # case 1: exlude fields where is either house or gas\n for i in range(self.h):\n for j in range(self.w):\n if self.is_house(i, j, False):\n self.set_excluded (i, j)\n if self.is_gas_any(i, j, False):\n self.set_excluded (i, j)\n\n # case 2: if wages are 0 - horizontally and vertically\n for i in range(self.h):\n if self.wages[0][i] == 0:\n for j in range(self.w):\n self.set_excluded (i, j)\n for j in range(self.w):\n if self.wages[1][j] == 0:\n for i in range(self.w):\n self.set_excluded (i, j)\n\n # case 3: each gas container must have space around\n for i in range(self.h):\n for j in range(self.w):\n if self.is_gas_any (i, j, False):\n if self.dp.is_x_correct (self.matrix_gas, i-1):\n self.set_excluded (i-1, j)\n if self.dp.is_x_correct (self.matrix_gas, i+1):\n self.set_excluded (i+1, j)\n if self.dp.is_y_correct (self.matrix_gas, j-1):\n self.set_excluded (i, j-1)\n if self.dp.is_y_correct (self.matrix_gas, j+1):\n self.set_excluded (i, j+1)\n if self.dp.is_x_correct (self.matrix_gas, i-1) and self.dp.is_y_correct (self.matrix_gas, j-1):\n self.set_excluded (i-1, j-1)\n if self.dp.is_x_correct (self.matrix_gas, i+1) and self.dp.is_y_correct (self.matrix_gas, j+1):\n self.set_excluded (i+1, j+1)\n if self.dp.is_x_correct (self.matrix_gas, i-1) and self.dp.is_y_correct (self.matrix_gas, j+1):\n self.set_excluded (i-1, j+1)\n if self.dp.is_x_correct (self.matrix_gas, i+1) and self.dp.is_y_correct (self.matrix_gas, j-1):\n self.set_excluded (i+1, j-1)\n\n # case 4: exclude fields that are too far from houses\n for i in range(self.h):\n for j in range(self.w):\n if not self.is_excluded (i, j, False):\n if self.dp.is_matrix_zeroed (self.dp.get_submatrix_3 (self.matrix_house, i, j)):\n self.set_excluded (i, j)\n \n # case 5: exclude fields that are close to houses which are already connected to gas\n # and there is no other option for gas available\n for i in range(self.h):\n for j in range(self.w):\n if not self.is_excluded (i, j, False):\n sum_available_houses = 0\n if self.dp.is_x_correct (self.matrix_house, i-1) and self.is_house (i-1, j, False) and not self.is_house_with_gas (i-1, j, False):\n sum_available_houses += 1\n if self.dp.is_x_correct (self.matrix_house, i+1) and self.is_house (i+1, j, False) and not self.is_house_with_gas (i+1, j, False):\n sum_available_houses += 1\n if self.dp.is_y_correct (self.matrix_house, j-1) and self.is_house (i, j-1, False) and not self.is_house_with_gas (i, j-1, False):\n sum_available_houses += 1\n if self.dp.is_y_correct (self.matrix_house, j+1) and self.is_house (i, j+1, False) and not self.is_house_with_gas (i, j+1, False):\n sum_available_houses += 1\n if sum_available_houses == 0:\n self.set_excluded (i, j)\n\n def update_best_score (self):\n score = self.get_number_of_houses (True, True)\n # print (score)\n if score > self.score:\n self.matrix_excluded_temp = self.matrix_excluded\n self.matrix_gas_temp = self.matrix_gas\n self.matrix_house_temp = self.matrix_house\n self.score = score\n return True\n return False\n\n def solve (self):\n all_combinations_checked = False\n max_combinations = 20000000\n start_from_combination = 0\n \n self.update_excluded ()\n self.print (False)\n \n gas_updated = True\n while gas_updated:\n gas_updated = self.update_gas_in_all_certain_places (False)\n if self.verbose and gas_updated:\n print (\"\\n---> Found new certain place for gas !!!\\n\")\n self.print (False)\n\n self.print (False)\n solved = False\n\n self.update_excluded ()\n empty_fields = self.get_all_not_excluded ()\n houses_to_be_fixed = self.get_number_of_houses (False, False) - self.get_number_of_gases (False)\n\n self.print (False)\n\n print (\"DEBUG: Houses to be fixed: \", houses_to_be_fixed)\n print (\"DEBUG: Empty fields: \", empty_fields)\n\n while not solved or not all_combinations_checked:\n\n combinations_to_check = self.dp.get_combinations (empty_fields, houses_to_be_fixed, max_combinations, start_from_combination)\n for combination in combinations_to_check:\n for field in combination:\n (x, y) = field\n self.set_gas_candidate (x, y)\n \n self.update_excluded ()\n solved = self.is_solved ()\n if solved:\n if self.verbose:\n print (\"\\n---> Found solution !!!\\n\")\n break\n else:\n score_improved = self.update_best_score ()\n if self.verbose and score_improved:\n self.print (True)\n self.cleanup_of_gas_candidates()\n\n start_from_combination += max_combinations\n \n if not combinations_to_check:\n all_combinations_checked = True\n if not solved:\n if self.verbose:\n print (\"\\n--> Solution not found !!!\\n\")\n break\n\n self.print (False)\n" ]
[ [ "numpy.zeros" ] ]
omer11a/digital-gimbal
[ "3d193e8fc9548bab816ddb5f83fc1f1093e46e4c", "3d193e8fc9548bab816ddb5f83fc1f1093e46e4c" ]
[ "utils.py", "estimators.py" ]
[ "import numpy as np\nimport torch\nimport tensorboardX\nimport torchvision\n\nimport itertools\nimport collections\nimport os\nimport datetime\nimport shutil\nimport time\n\ndef forward_in_patches(model, x, patch_size, stride=1, *args, **kwargs):\n original_shape = x.shape\n x = x.reshape(-1, *original_shape[-3:])\n ones = torch.ones(1, *original_shape[-3:], device=x.device)\n unfolded_ones = torch.nn.functional.unfold(ones, patch_size, stride=stride)\n _, patch_dim, num_patches = unfolded_ones.shape\n normalizer = torch.nn.functional.fold(\n unfolded_ones, original_shape[-2:], patch_size, stride=stride)\n normalizer[normalizer < 1] = 1\n\n patches = torch.nn.functional.unfold(x, patch_size, stride=stride)\n results_per_patch = []\n for i in range(num_patches):\n patch = patches[:, :, i].reshape(*original_shape[:-2], *patch_size)\n results_per_patch.append(model(patch, *args, **kwargs))\n\n results = []\n for i in range(len(results_per_patch[0])):\n result_patches = [\n patch_results[i].reshape(-1, patch_dim)\n for patch_results in results_per_patch\n ]\n\n result_patches = torch.stack(result_patches, dim=-1)\n result = torch.nn.functional.fold(\n result_patches, original_shape[-2:], patch_size, stride=stride)\n result = torch.reshape(result / normalizer, (original_shape[0], -1, *original_shape[-3:]))\n results.append(result.squeeze(1))\n\n return results\n\ndef adaptive_conv(images, kernels, padding=None):\n h, w = images.shape[-2:]\n kernel_size = int(np.sqrt(kernels.size(-3)))\n kernel_radius = kernel_size // 2\n padding = 'constant' if padding is None else padding\n images = torch.nn.functional.pad(\n images,\n (kernel_radius, kernel_radius, kernel_radius, kernel_radius),\n mode=padding\n )\n\n slices = [\n images[..., i:i + h, j:j + w]\n for i, j in itertools.product(range(kernel_size), range(kernel_size))\n ]\n\n stacked_slices = torch.stack(slices, dim=-3)\n return stacked_slices.mul(kernels).sum(dim=-3)\n\nclass CNN(torch.nn.Module):\n @staticmethod\n def _build_conv_layers(in_channel_num, out_channel_num, kernel_size, padding):\n return [\n torch.nn.Conv2d(in_channel_num, out_channel_num, kernel_size, padding=padding),\n torch.nn.ReLU(),\n ]\n\n def __init__(self, channel_nums, kernel_size, end_with_relu=True):\n super().__init__()\n\n padding = (kernel_size - 1) // 2\n layers = [\n type(self)._build_conv_layers(in_channel_num, out_channel_num, kernel_size, padding)\n for in_channel_num, out_channel_num in zip(channel_nums[:-1], channel_nums[1:])\n ]\n\n if not end_with_relu:\n layers[-1] = layers[-1][:1]\n\n self._model = torch.nn.Sequential(*sum(layers, []))\n\n def forward(self, x):\n return self._model(x)\n\nclass RepeatedConv(CNN):\n DEFAULT_KERNEL_SIZE = 3\n DEFAULT_NUM_REPEATS = 3\n\n def __init__(\n self,\n in_channel_num,\n out_channel_num,\n kernel_size=DEFAULT_KERNEL_SIZE,\n num_repeats=DEFAULT_NUM_REPEATS\n ):\n channel_nums = (in_channel_num, ) + (out_channel_num, ) * num_repeats\n super().__init__(channel_nums, kernel_size)\n\nclass MovingAverage(object):\n def __init__(self, max_length):\n self._cache = collections.deque(maxlen=max_length)\n\n def update(self, value):\n self._cache.append(value)\n\n def get(self):\n return sum(self._cache) / len(self._cache)\n\nclass Model(object):\n DEFAULT_BACKEND = 'nccl'\n\n @staticmethod\n def detach(tensor, pil=False):\n tensor = tensor.detach().cpu()\n if pil:\n return torchvision.transforms.functional.to_pil_image(tensor)\n\n return tensor.numpy()\n\n def _set_device(self, backend=None):\n self._world_size = torch.cuda.device_count()\n backend = type(self).DEFAULT_BACKEND if backend is None else backend\n torch.distributed.init_process_group(\n backend,\n world_size=self._world_size,\n rank=self._gpu\n )\n\n torch.cuda.set_device(self._gpu)\n\n def _prepare_checkpoint_dir(self):\n train_config = self._config['train']\n self._checkpoint_dir = train_config['checkpoint_dir']\n os.makedirs(self._checkpoint_dir, exist_ok=True)\n self._max_num_checkpoints = train_config.getint('max_num_checkpoints')\n\n def _prepare_output_dir(self):\n self._output_dir = self._config['eval']['output_dir']\n os.makedirs(self._output_dir, exist_ok=True)\n\n def _prepare_log_writer(self):\n if self._gpu != 0 or not self._should_log:\n return\n\n main_dir = self._config['train']['log_dir']\n sub_dir = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')\n log_dir = os.path.join(main_dir, sub_dir)\n os.makedirs(log_dir, exist_ok=True)\n self._log_writer = tensorboardX.SummaryWriter(log_dir)\n\n def _get_checkpoint_filename(self, is_best=False):\n basename = 'best' if is_best else f'{self._t:06d}'\n return os.path.join(self._checkpoint_dir, f'{basename}.pth.tar')\n\n def _load_checkpoint(self):\n filename = self._get_checkpoint_filename(is_best=True)\n checkpoint = torch.load(filename, map_location='cpu')\n self._epoch = checkpoint['epoch']\n self._t = checkpoint['t']\n self._best_loss = checkpoint['best_loss']\n self._model.load_state_dict(checkpoint['state_dict'])\n self._optimizer.load_state_dict(checkpoint['optimizer'])\n\n def _remove_extra_checkpoints(self):\n filenames = sorted(os.listdir(self._checkpoint_dir))\n num_files_to_remove = max(0, len(filenames) - self._max_num_checkpoints)\n for filename in filenames[:num_files_to_remove]:\n os.remove(os.path.join(self._checkpoint_dir, filename))\n\n def _save_checkpoint(self):\n if self._gpu != 0:\n return\n\n state = {\n 'epoch': self._epoch,\n 't': self._t,\n 'best_loss': self._best_loss,\n 'state_dict': self._model.state_dict(),\n 'optimizer': self._optimizer.state_dict(),\n }\n\n filename = self._get_checkpoint_filename()\n torch.save(state, filename)\n\n current_average_loss = self._average_loss.get()\n if current_average_loss < self._best_loss:\n self._best_loss = current_average_loss\n best_filename = self._get_checkpoint_filename(is_best=True)\n shutil.copyfile(filename, best_filename)\n\n self._remove_extra_checkpoints()\n\n def _write_log(self, scalars, scalar_dicts, images, image_lists):\n if self._gpu != 0 or not self._should_log:\n return\n\n for name, scalar in scalars.items():\n self._log_writer.add_scalar(name, scalar, global_step=self._t)\n\n for name, scalar_dict in scalar_dicts.items():\n self._log_writer.add_scalars(name, scalar_dict, global_step=self._t)\n\n for name, image in images.items():\n self._log_writer.add_image(name, image, global_step=self._t)\n\n for name, image_list in image_lists.items():\n self._log_writer.add_images(name, image_list, global_step=self._t)\n\n def _print(self, to_print):\n if self._gpu == 0:\n print(to_print)\n\n def _print_iteration(self, step, to_print, elapsed_time):\n prefix = (\n f'{self._t:4d}',\n f'epoch {self._epoch:2d}',\n f'step {step:4d}',\n )\n\n postfix = (\n f'time:{elapsed_time:.2f} seconds.',\n )\n\n self._print('\\t| '.join(prefix + to_print + postfix))\n\n def _prepare_data(self, num_workers):\n raise NotImplementedError()\n\n def _prepare_model(self):\n raise NotImplementedError()\n\n def _prepare_loss(self):\n raise NotImplementedError()\n\n def _prepare_optimizer(self):\n raise NotImplementedError()\n\n def _convert_to_cuda(self, data):\n raise NotImplementedError()\n\n def _train_step(self, x, y):\n raise NotImplementedError()\n\n def _eval_step(self, i, x, y):\n raise NotImplementedError()\n\n def _train(self):\n reached_max_steps = False\n start_epoch = self._epoch\n for self._epoch in range(start_epoch, self._num_epochs):\n epoch_start_time = time.time()\n for step, data in enumerate(self._data_loader):\n step_start_time = time.time()\n data = self._convert_to_cuda(data)\n loss, scalars, scalar_dicts, images, image_lists, to_print = self._train_step(*data)\n elapsed_time = time.time() - step_start_time\n self._average_loss.update(loss)\n self._write_log(scalars, scalar_dicts, images, image_lists)\n self._print_iteration(step, to_print, elapsed_time)\n\n self._t += 1\n if self._t % self._save_freq == 0:\n self._save_checkpoint()\n\n if self._num_steps is not None and self._t == self._num_steps:\n self._print(f'Reached maximum number of steps.')\n reached_max_steps = True\n break\n\n elapsed_time = time.time() - epoch_start_time\n self._print(f'Epoch {self._epoch} is finished, time elapsed {elapsed_time:.2f} seconds.')\n if reached_max_steps:\n break\n\n self._save_checkpoint()\n\n def _metrics_to_str(self, metrics):\n raise NotImplementedError()\n\n def _eval(self):\n total_metrics = 0\n for i, data in enumerate(self._data_loader):\n data = self._convert_to_cuda(data)\n\n metrics, images = self._eval_step(i, *data)\n total_metrics += metrics\n str_metrics = self._metrics_to_str(metrics)\n self._print(f'For {i}-th example got ' + ', '.join(str_metrics))\n for name, image in images.items():\n name = '.'.join((name, self._config['eval']['output_suffix'], ))\n path = os.path.join(self._config['eval']['output_dir'], name)\n image.save(path)\n\n average_metrics = total_metrics / (i + 1)\n str_metrics = self._metrics_to_str(average_metrics)\n self._print('Average metrics: ' + ', '.join(str_metrics))\n\n def __init__(\n self,\n config,\n gpu,\n eval_mode=False,\n backend=None,\n num_workers=0,\n restart=False,\n should_log=False,\n verbose=False\n ):\n self._config = config\n self._gpu = gpu\n self._eval_mode = eval_mode\n self._should_log = should_log\n self._verbose = verbose\n self._set_device(backend=backend)\n self._data_loader = self._prepare_data(num_workers)\n self._model = self._prepare_model()\n self._loss = self._prepare_loss()\n self._optimizer = self._prepare_optimizer()\n\n self._prepare_checkpoint_dir()\n if self._eval_mode:\n self._prepare_output_dir()\n should_load = True\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n self._print('=> eval')\n else:\n self._prepare_log_writer()\n train_config = config['train']\n self._num_epochs = train_config.getint('num_epochs')\n self._save_freq = train_config.getint('save_freq')\n self._average_loss = MovingAverage(train_config.getint('save_freq'))\n if train_config.getboolean('force_num_steps'):\n self._num_steps = self._num_epochs * len(self._data_loader)\n else:\n self._num_steps = None\n\n should_load = not restart\n self._print('=> train')\n\n if should_load:\n checkpoint = self._load_checkpoint()\n self._print(f'=> loaded checkpoint (epoch {self._epoch}, t {self._t})')\n else:\n self._epoch = 0\n self._t = 0\n self._best_loss = np.inf\n\n def run(self):\n if self._eval_mode:\n self._eval()\n else:\n self._train()\n", "import torch\nimport numpy as np\n\nEPS = 1e-8\n\nclass GenGS(torch.nn.Module):\n DEFAULT_N = 1500\n DEFAULT_TAU = 1\n\n def __init__(self, n=DEFAULT_N):\n super().__init__()\n\n k = torch.arange(n, requires_grad=False, dtype=torch.float)\n self.register_buffer('_k', k)\n self.register_buffer('_lgamma', torch.lgamma(k[:-1] + 1))\n\n def forward(self, x, tau=DEFAULT_TAU):\n x = x.unsqueeze(-1)\n logits = torch.log(x + EPS) * self._k[:-1] - x - self._lgamma\n pi = torch.exp(logits)\n pi_remainder = torch.nn.functional.relu(1 - pi.sum(-1, keepdim=True))\n logit_remainder = torch.log(pi_remainder + EPS)\n logits = torch.cat((logits, logit_remainder, ), dim=-1)\n w = torch.nn.functional.gumbel_softmax(logits, tau)\n return w.matmul(self._k)\n\nclass ReparameterizedPoisson(torch.nn.Module):\n DEFAULT_THRESHOLD = 1000\n DEFAULT_MIN_TAU = 0.1\n DEFAULT_R = 1e-5\n\n def __init__(\n self,\n n=GenGS.DEFAULT_N,\n threshold=DEFAULT_THRESHOLD,\n min_tau=DEFAULT_MIN_TAU,\n r=DEFAULT_R\n ):\n super().__init__()\n self._gen_gs = GenGS(n=n)\n self._threshold = threshold\n self._min_tau = min_tau\n self._r = r\n\n def forward(self, x, t=None):\n x = torch.nn.functional.relu(x)\n if (t is None) or (not self.training):\n return torch.poisson(x)\n\n tau = max(self._min_tau, np.exp(-self._r * t))\n gen_gs = self._gen_gs(x, tau=tau)\n normal = x + torch.normal(0, 1, x.shape, device=x.device) * torch.sqrt(x + EPS)\n return torch.where(x <= self._threshold, gen_gs, normal)\n\nclass RoundSTE(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x):\n return torch.round(x)\n\n @staticmethod\n def backward(ctx, grad):\n return grad\n\nround_ste = RoundSTE.apply\n" ]
[ [ "torch.nn.functional.fold", "torch.ones", "torch.distributed.init_process_group", "torch.cuda.set_device", "torch.load", "torch.reshape", "torch.nn.Conv2d", "torch.nn.ReLU", "torch.stack", "torch.nn.functional.unfold", "torch.cuda.device_count", "torch.nn.functional.pad", "torch.save" ], [ "torch.normal", "torch.nn.functional.gumbel_softmax", "torch.poisson", "torch.cat", "torch.sqrt", "torch.round", "torch.exp", "torch.nn.functional.relu", "torch.log", "torch.where", "torch.arange", "torch.lgamma", "numpy.exp" ] ]
NREL/VirtualEngineering
[ "f23f409132bc7965334db1e29d83502001ec4e09" ]
[ "EH_OpenFOAM/tests/RushtonNonReact/get_solids.py" ]
[ "# trace generated using paraview version 5.8.1\n#\n# To ensure correct image size when batch processing, please search \n# for and uncomment the line `# renderView*.ViewSize = [*,*]`\n\n#### import the simple module from the paraview\nimport numpy as np\nfrom paraview import simple as pv\nimport vtk.numpy_interface.dataset_adapter as dsa \nimport sys\nfrom sys import argv\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.collections import LineCollection\n\nsolfoam = pv.OpenFOAMReader(FileName = './soln.foam') # just need to provide folder\nsolfoam.CaseType = 'Reconstructed Case'\nsolfoam.MeshRegions = ['internalMesh']\n#solfoam.CellArrays = ['C']\nsolfoam.PointArrays = ['phis','phifs']\nt = np.array(solfoam.TimestepValues)\nN=t.size\n\nofvtkdata = pv.servermanager.Fetch(solfoam)\nofdata = dsa.WrapDataObject( ofvtkdata)\nofpts = np.array(ofdata.Points.Arrays[0])\nptsmin = ofpts.min(axis=0)\nptsmax = ofpts.max(axis=0)\nprint(ptsmin)\nprint(ptsmax)\n\nprint(\"doing time:\",t[-1])\n\npv.UpdatePipeline(time=t[-1], proxy=solfoam)\npltline1 = pv.PlotOverLine(Input=solfoam,\nSource='High Resolution Line Source')\n\npltline1.Source.Point1 = [0.25*ptsmin[0]+0.75*ptsmax[0], 0.5*(ptsmin[1]+ptsmax[1]),ptsmin[2]]\npltline1.Source.Point2 = [0.25*ptsmin[0]+0.75*ptsmax[0], 0.5*(ptsmin[1]+ptsmax[1]),ptsmax[2]]\n\nidat1 = dsa.WrapDataObject(pv.servermanager.Fetch(pltline1))\n\nphis = abs(idat1.PointData['phis'])\nphifs = abs(idat1.PointData['phifs'])\noutarr=np.array([idat1.Points[:,2],phis,phifs])\nnp.savetxt(\"solids_along_line.dat\",np.transpose(outarr),delimiter=\" \")\n" ]
[ [ "numpy.array", "numpy.transpose" ] ]
rubenvillegas/icml2017hierchvid
[ "9584bd3c97ed3e5869cf79e906c850deed098349", "9584bd3c97ed3e5869cf79e906c850deed098349" ]
[ "imggen_src/model_analogy.py", "imggen_src/alexnet.py" ]
[ "import os\nimport time\nfrom glob import glob\nimport tensorflow as tf\n\nfrom alexnet import alexnet\nfrom hg_stacked import hg_forward\nfrom ops import *\nfrom utils import *\n\n\nclass IMGGEN(object):\n def __init__(self,\n image_size=128,\n batch_size=32,\n c_dim=3,\n layer=0,\n alpha=1.,\n beta=1.,\n n_joints=48,\n is_train=True,\n checkpoint_dir=None):\n\n self.batch_size = batch_size\n self.image_size = image_size\n self.c_dim = c_dim\n self.n_joints = n_joints\n\n self.layer = layer\n self.alpha = alpha\n self.beta = beta\n self.gf_dim = 64\n self.df_dim = 64\n self.is_train = is_train\n\n self.img_shape = [image_size, image_size, self.c_dim]\n self.pose_shape = [image_size, image_size, n_joints]\n self.checkpoint_dir = checkpoint_dir\n\n self.build_model()\n\n def build_model(self):\n self.xt_ = tf.placeholder(\n tf.float32, [self.batch_size] + self.img_shape, name='xt')\n self.xtpn_ = tf.placeholder(\n tf.float32, [self.batch_size] + self.img_shape, name='xtpn')\n self.pt_ = tf.placeholder(\n tf.float32, [self.batch_size] + self.pose_shape, name='pt')\n self.ptpn_ = tf.placeholder(\n tf.float32, [self.batch_size] + self.pose_shape, name='ptpn')\n\n with tf.variable_scope('GEN'):\n xtpn = self.generator(self.xt_, self.pt_, self.ptpn_)\n self.G = tf.reshape(\n xtpn,\n [self.batch_size, self.image_size, self.image_size, 1, self.c_dim])\n\n if self.is_train:\n true_sim = inverse_transform(self.xtpn_)\n gen_sim = inverse_transform(xtpn)\n h_sim1_t = alexnet(true_sim * 255. - tf.reduce_mean(true_sim * 255.))\n h_sim1_g = alexnet(gen_sim * 255. - tf.reduce_mean(gen_sim * 255.))\n h_sim2_t = hg_forward(true_sim)\n h_sim2_g = hg_forward(gen_sim)\n\n good_data = tf.concat(axis=3, values=[self.xtpn_, self.ptpn_])\n bad_data = tf.concat(axis=3, values=[self.xt_, self.ptpn_])\n gen_data = tf.concat(axis=3, values=[xtpn, self.ptpn_])\n\n with tf.variable_scope('DIS'):\n self.D, self.D_logits = self.discriminator(good_data)\n with tf.variable_scope('DIS', reuse=True):\n self.DB, self.DB_logits = self.discriminator(bad_data)\n self.D_, self.D_logits_ = self.discriminator(gen_data)\n\n self.feat_loss1 = 0.5 * tf.reduce_mean(tf.square(h_sim1_t - h_sim1_g))\n self.feat_loss2 = 0.5 * tf.reduce_mean(\n tf.square(h_sim2_t[self.layer] - h_sim2_g[self.layer]))\n self.L_feat = self.alpha * self.feat_loss1 + self.beta * self.feat_loss2\n self.L_img = 0.5 * tf.reduce_mean(tf.square(xtpn - self.xtpn_))\n self.L = self.L_img + self.L_feat\n\n self.d_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.D_logits, labels=tf.ones_like(self.D)))\n\n self.d_loss_fake1 = 0.5 * tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.D_logits_, labels=tf.zeros_like(self.D_)))\n\n self.d_loss_fake2 = 0.5 * tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.DB_logits, labels=tf.zeros_like(self.DB)))\n self.d_loss_fake = self.d_loss_fake1 + self.d_loss_fake2\n self.d_loss = self.d_loss_real + self.d_loss_fake\n self.g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.D_logits_, labels=tf.ones_like(self.D_)))\n\n self.L_sum = tf.summary.scalar('L', self.L)\n self.L_img_sum = tf.summary.scalar('L_img', self.L_img)\n self.L_feat_sum = tf.summary.scalar('L_feat', self.L_feat)\n self.g_loss_sum = tf.summary.scalar('g_loss', self.g_loss)\n self.d_loss_sum = tf.summary.scalar('d_loss', self.d_loss)\n self.d_loss_real_sum = tf.summary.scalar('d_loss_real', self.d_loss_real)\n self.d_loss_fake_sum = tf.summary.scalar('d_loss_fake', self.d_loss_fake)\n\n self.t_vars = tf.trainable_variables()\n self.g_vars = [var for var in self.t_vars if 'GEN' in var.name]\n self.dis_vars = [var for var in self.t_vars if 'DIS' in var.name]\n num_param = 0.0\n for var in self.g_vars:\n num_param += int(np.prod(var.get_shape()))\n print('NUMBER OF PARAMETERS: ' + str(num_param))\n self.saver = tf.train.Saver()\n\n def generator(self, xt, pt, ptpn, reuse=False):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n\n h_img = self.img_encoder(xt)\n h_pose = self.pose_encoder(tf.concat(axis=0, values=[pt, ptpn]))\n h_pose_t = h_pose[:self.batch_size, :, :, :]\n h_pose_tpn = h_pose[self.batch_size:, :, :, :]\n h_diff = h_pose_tpn - h_pose_t\n xtp1 = self.decoder(h_img + h_diff)\n\n return xtp1\n\n def img_encoder(self, xt):\n conv1_1 = relu(\n conv2d(\n xt,\n output_dim=self.gf_dim,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_iconv1_1'))\n conv1_2 = relu(\n conv2d(\n conv1_1,\n output_dim=self.gf_dim,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_iconv1_2'))\n pool1 = MaxPooling(conv1_2, [2, 2])\n\n conv2_1 = relu(\n conv2d(\n pool1,\n output_dim=self.gf_dim * 2,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_iconv2_1'))\n conv2_2 = relu(\n conv2d(\n conv2_1,\n output_dim=self.gf_dim * 2,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_iconv2_2'))\n pool2 = MaxPooling(conv2_2, [2, 2])\n\n conv3_1 = relu(\n conv2d(\n pool2,\n output_dim=self.gf_dim * 4,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_iconv3_1'))\n conv3_2 = relu(\n conv2d(\n conv3_1,\n output_dim=self.gf_dim * 4,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_iconv3_2'))\n conv3_3 = relu(\n conv2d(\n conv3_2,\n output_dim=self.gf_dim * 4,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_iconv3_3'))\n pool3 = MaxPooling(conv3_3, [2, 2])\n\n return pool3\n\n def pose_encoder(self, pt):\n conv1_1 = relu(\n conv2d(\n pt,\n output_dim=self.gf_dim,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_pconv1_1'))\n conv1_2 = relu(\n conv2d(\n conv1_1,\n output_dim=self.gf_dim,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_pconv1_2'))\n pool1 = MaxPooling(conv1_2, [2, 2])\n\n conv2_1 = relu(\n conv2d(\n pool1,\n output_dim=self.gf_dim * 2,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_pconv2_1'))\n conv2_2 = relu(\n conv2d(\n conv2_1,\n output_dim=self.gf_dim * 2,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_pconv2_2'))\n pool2 = MaxPooling(conv2_2, [2, 2])\n\n conv3_1 = relu(\n conv2d(\n pool2,\n output_dim=self.gf_dim * 4,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_pconv3_1'))\n conv3_2 = relu(\n conv2d(\n conv3_1,\n output_dim=self.gf_dim * 4,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_pconv3_2'))\n conv3_3 = relu(\n conv2d(\n conv3_2,\n output_dim=self.gf_dim * 4,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='enc_pconv3_3'))\n pool3 = MaxPooling(conv3_3, [2, 2])\n\n return pool3\n\n def decoder(self, h_comb):\n shapel3 = [\n self.batch_size, self.image_size / 4, self.image_size / 4,\n self.gf_dim * 4\n ]\n shapeout3 = [\n self.batch_size, self.image_size / 4, self.image_size / 4,\n self.gf_dim * 2\n ]\n depool3 = FixedUnPooling(h_comb, [2, 2])\n deconv3_3 = relu(\n deconv2d(\n depool3,\n output_shape=shapel3,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='dec_deconv3_3'))\n deconv3_2 = relu(\n deconv2d(\n deconv3_3,\n output_shape=shapel3,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='dec_deconv3_2'))\n deconv3_1 = relu(\n deconv2d(\n deconv3_2,\n output_shape=shapeout3,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='dec_deconv3_1'))\n\n shapel2 = [\n self.batch_size, self.image_size / 2, self.image_size / 2,\n self.gf_dim * 2\n ]\n shapeout3 = [\n self.batch_size, self.image_size / 2, self.image_size / 2, self.gf_dim\n ]\n depool2 = FixedUnPooling(deconv3_1, [2, 2])\n deconv2_2 = relu(\n deconv2d(\n depool2,\n output_shape=shapel2,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='dec_deconv2_2'))\n deconv2_1 = relu(\n deconv2d(\n deconv2_2,\n output_shape=shapeout3,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='dec_deconv2_1'))\n\n shapel1 = [self.batch_size, self.image_size, self.image_size, self.gf_dim]\n shapeout1 = [self.batch_size, self.image_size, self.image_size, self.c_dim]\n depool1 = FixedUnPooling(deconv2_1, [2, 2])\n deconv1_2 = relu(\n deconv2d(\n depool1,\n output_shape=shapel1,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='dec_deconv1_2'))\n xtp1 = tanh(\n deconv2d(\n deconv1_2,\n output_shape=shapeout1,\n k_h=3,\n k_w=3,\n d_h=1,\n d_w=1,\n name='dec_deconv1_1'))\n return xtp1\n\n def discriminator(self, image):\n h0 = lrelu(conv2d(image, self.df_dim, name='dis_h0_conv'))\n h1 = lrelu(\n batch_norm(conv2d(h0, self.df_dim * 2, name='dis_h1_conv'), 'bn1'))\n h2 = lrelu(\n batch_norm(conv2d(h1, self.df_dim * 4, name='dis_h2_conv'), 'bn2'))\n h3 = lrelu(\n batch_norm(conv2d(h2, self.df_dim * 8, name='dis_h3_conv'), 'bn3'))\n h = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'dis_h3_lin')\n\n return tf.nn.sigmoid(h), h\n\n def save(self, sess, checkpoint_dir, step):\n model_name = 'IMGGEN.model'\n\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n self.saver.save(\n sess, os.path.join(checkpoint_dir, model_name), global_step=step)\n\n def load(self, sess, checkpoint_dir, model_name=None):\n print(\"[*] Reading checkpoints...\")\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n if model_name is None: model_name = ckpt_name\n self.saver.restore(sess, os.path.join(checkpoint_dir, model_name))\n print(\" Loaded model: \"+str(model_name))\n return True, model_name\n else:\n return False, None\n", "\"\"\"Implementation from https://www.cs.toronto.edu/~guerzhoy/tf_alexnet/\"\"\"\nimport tensorflow as tf\n\nfrom utils import *\nfrom ops import relu\n\n\ndef alexnet(image):\n net_data = np.load(\"./perceptual_models/alexnet/bvlc_alexnet.npy\").item()\n k_h = 11\n k_w = 11\n c_o = 96\n s_h = 4\n s_w = 4\n conv1W = tf.Variable(net_data[\"conv1\"][0])\n conv1b = tf.Variable(net_data[\"conv1\"][1])\n conv1 = relu(\n conv(\n image,\n conv1W,\n conv1b,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n padding=\"SAME\",\n group=1))\n radius = 2\n alpha = 2e-05\n beta = 0.75\n bias = 1.0\n lrn1 = tf.nn.local_response_normalization(\n conv1, depth_radius=radius, alpha=alpha, beta=beta, bias=bias)\n k_h = 3\n k_w = 3\n s_h = 2\n s_w = 2\n padding = 'VALID'\n maxpool1 = tf.nn.max_pool(\n lrn1, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)\n\n k_h = 5\n k_w = 5\n c_o = 256\n s_h = 1\n s_w = 1\n group = 2\n conv2W = tf.Variable(net_data[\"conv2\"][0])\n conv2b = tf.Variable(net_data[\"conv2\"][1])\n conv2 = relu(\n conv(\n maxpool1,\n conv2W,\n conv2b,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n padding=\"SAME\",\n group=group))\n radius = 2\n alpha = 2e-05\n beta = 0.75\n bias = 1.0\n lrn2 = tf.nn.local_response_normalization(\n conv2, depth_radius=radius, alpha=alpha, beta=beta, bias=bias)\n k_h = 3\n k_w = 3\n s_h = 2\n s_w = 2\n padding = 'VALID'\n maxpool2 = tf.nn.max_pool(\n lrn2, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding)\n\n k_h = 3\n k_w = 3\n c_o = 384\n s_h = 1\n s_w = 1\n group = 1\n conv3W = tf.Variable(net_data[\"conv3\"][0])\n conv3b = tf.Variable(net_data[\"conv3\"][1])\n conv3 = relu(\n conv(\n maxpool2,\n conv3W,\n conv3b,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n padding=\"SAME\",\n group=group))\n\n k_h = 3\n k_w = 3\n c_o = 384\n s_h = 1\n s_w = 1\n group = 2\n conv4W = tf.Variable(net_data[\"conv4\"][0])\n conv4b = tf.Variable(net_data[\"conv4\"][1])\n conv4 = relu(\n conv(\n conv3,\n conv4W,\n conv4b,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n padding=\"SAME\",\n group=group))\n\n k_h = 3\n k_w = 3\n c_o = 256\n s_h = 1\n s_w = 1\n group = 2\n conv5W = tf.Variable(net_data[\"conv5\"][0])\n conv5b = tf.Variable(net_data[\"conv5\"][1])\n conv5 = relu(\n conv(\n conv4,\n conv5W,\n conv5b,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n padding=\"SAME\",\n group=group))\n\n return conv5\n\n\ndef conv(input_,\n kernel,\n biases,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n padding=\"VALID\",\n group=1):\n \"\"\"From https://github.com/ethereon/caffe-tensorflow\n \"\"\"\n c_i = input_.get_shape()[-1]\n assert c_i % group == 0\n assert c_o % group == 0\n convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)\n\n if group == 1:\n conv = convolve(input_, kernel)\n else:\n input_groups = tf.split(axis=3, num_or_size_splits=group, value=input_)\n kernel_groups = tf.split(axis=3, num_or_size_splits=group, value=kernel)\n output_groups = [\n convolve(i, k) for i, k in zip(input_groups, kernel_groups)\n ]\n conv = tf.concat(axis=3, values=output_groups)\n return tf.reshape(\n tf.nn.bias_add(conv, biases), [-1] + conv.get_shape().as_list()[1:])\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.concat", "tensorflow.nn.sigmoid", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.ones_like", "tensorflow.placeholder", "tensorflow.trainable_variables", "tensorflow.zeros_like", "tensorflow.variable_scope", "tensorflow.square", "tensorflow.train.Saver", "tensorflow.get_variable_scope", "tensorflow.summary.scalar" ], [ "tensorflow.nn.bias_add", "tensorflow.concat", "tensorflow.Variable", "tensorflow.nn.max_pool", "tensorflow.split", "tensorflow.nn.local_response_normalization", "tensorflow.nn.conv2d" ] ]
fuzzy-string-matching/JaroWinkler
[ "736981e52c05b0815c5cb0de889f5cf51981f006" ]
[ "bench/benchmark_visualize.py" ]
[ "import pandas as pd\nimport matplotlib.pyplot as plt\n\ndf=pd.read_csv(\"results/jaro_winkler.csv\")\n\ndf *= 1000 * 1000\ndf[\"length\"] /= 1000 * 1000\n\n\nax=df.plot(x=\"length\")\n\nplt.xticks(list(range(0, 513, 64)))\n\nplt.title(\"Performance comparision of the \\nJaro-Winkler similarity in different libraries\")\nplt.xlabel(\"string length [in characters]\")\nplt.ylabel(\"runtime [μs]\")\nax.set_xlim(xmin=0)\nax.set_ylim(bottom=0)\nplt.grid()\nplt.show()\n\n\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
Jacobvs/ML-Music-Analyzer
[ "00b694e26cee6ccddb9b727deca6288fda37f9b6" ]
[ "test.py" ]
[ "import os\nimport time\nimport librosa\nimport vampyhost\nimport numpy as np\nimport collections\nimport vamp.frames\nimport scipy.signal\nimport youtube_dl\nimport pygame, pygame.sndarray\nfrom vampyhost import load_plugin\nfrom keras.models import load_model\n#from matplotlib import pyplot as plt\nfrom music21 import chord\nfrom pygame.time import delay\nfrom vamp.collect import get_feature_step_time, fill_timestamps\n\n\n\n############# SONG INPUT ##############\nsong_name = \"aint no rest for the wicked instrumental\"\nsong_type = \".mp3\"\nuse_youtube = True\nyoutube_url = \"https://www.youtube.com/watch?v=9l5L34VqzlU\"\n#######################################\n\n\nif use_youtube:\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'outtmpl': '%(title)s.%(etx)s',\n 'quiet': False\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print(youtube_url)\n info = ydl.extract_info(youtube_url, download=True)\n song_name = info.get('title', None)\n song_type = '.mp3'\n os.rename(song_name+song_type, 'testing/'+song_name+song_type)\n\n\ndef deduce_shape(output_desc):\n if output_desc[\"hasDuration\"]:\n return \"list\"\n if output_desc[\"sampleType\"] == vampyhost.VARIABLE_SAMPLE_RATE:\n return \"list\"\n if not output_desc[\"hasFixedBinCount\"]:\n return \"list\"\n if output_desc[\"binCount\"] == 0:\n return \"list\"\n if output_desc[\"binCount\"] == 1:\n return \"vector\"\n return \"matrix\"\n\n\ndef reshape(results, sample_rate, step_size, output_desc, shape):\n output = output_desc[\"identifier\"]\n out_step = get_feature_step_time(sample_rate, step_size, output_desc)\n\n if shape == \"vector\":\n rv = ( out_step,\n np.array([r[output][\"values\"][0] for r in results], np.float32) )\n elif shape == \"matrix\":\n outseq = [r[output][\"values\"] for r in results]\n rv = ( out_step, np.array(outseq, np.float32) )\n else:\n rv = list(fill_timestamps(results, sample_rate, step_size, output_desc))\n\n return rv\n\n\ndef slice_vals(chroma_vals, slice_size):\n blank = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n num_slices = int(len(chroma_vals)/slice_size)\n sliced_chroma = []\n for i in range(num_slices):\n sliced_chroma.append(chroma_vals[i*slice_size:(i+1)*100])\n\n remaining_chroma = list(chroma_vals[num_slices*100:])\n\n for i in range(100-len(remaining_chroma)):\n remaining_chroma.append(blank)\n\n if len(remaining_chroma) > 0:\n sliced_chroma.append(remaining_chroma)\n\n del remaining_chroma\n\n return sliced_chroma\n\n\nprint(\"Loading song: {}{}\".format(song_name,song_type))\n\nnp.set_printoptions(threshold=np.nan)\ndata, rate = librosa.load(\"testing/\" + song_name + song_type)\n\nprint(\"--LOADED SONG--\")\n\nnnls_chroma = load_plugin(\"nnls-chroma:nnls-chroma\", rate, 0x03)\nnnls_chroma.set_parameter_value(\"rollon\", 1)\n\nstepsize = nnls_chroma.get_preferred_step_size()\nblocksize = nnls_chroma.get_preferred_block_size()\nchannels = 1\nif data.ndim > 1:\n channels = data.shape[0]\n\nnnls_chroma.initialise(channels, stepsize, blocksize)\nframes = vamp.frames.frames_from_array(data, stepsize, blocksize)\nresults = vamp.process.process_with_initialised_plugin(frames, rate, stepsize, nnls_chroma, [nnls_chroma.get_output(5)[\"identifier\"]])\nshape = deduce_shape(nnls_chroma.get_output(5))\nrv = reshape(results, rate, stepsize, nnls_chroma.get_output(5), shape)\n\nnnls_chroma.unload()\nchroma = {shape : rv}\n\n\nstepsize, chroma_data = chroma['matrix']\n\nstructured_chroma = collections.OrderedDict()\ntimestamp = 0.0\nstepsize = stepsize.to_float()\n# chroma_data = chroma_data.tolist()\n\nprint(\"Stepsize = {}\".format(timestamp+stepsize))\nprint(\"Length of song: {}\".format(len(chroma_data)))\n\n\nfor index, data in enumerate(chroma_data):\n timestamp = (timestamp + stepsize)\n structured_chroma[timestamp] = chroma_data[index]\n\nsliced_chroma = np.expand_dims(slice_vals(chroma_data, 100), axis=3)\nprint(\"Shape: {}\".format(sliced_chroma.shape))\n\nmodel = load_model(\"trained_music_model.hdf5\")\n\npredicted_probability = model.predict(sliced_chroma, batch_size=32, verbose=1)\npredicted_probability = predicted_probability.reshape(-1, 100)\npredicted_classes = (predicted_probability >= 0.5).astype(np.int32)\n\npredicted_probability_arr = predicted_probability.reshape(-1, 12)\npredicted_classes_arr = predicted_classes.reshape(-1, 12)\n\n\n\n# plot, (subplot1, subplot2) = plt.subplots(2, 1)\n# # plot.suptitle('Music Model Test')\n# plot.set_size_inches((6, 10))\n#\n# subplot1.imshow(predicted_classes_arr, vmin=0, vmax=1)\n# subplot1.set_aspect('auto')\n# subplot1.set_ylabel('Pitch Class')\n# subplot1.set_title('Predicted Chords')\n#\n# subplot2.imshow(predicted_probability_arr, vmin=0, vmax=1)\n# subplot2.set_aspect('auto')\n# subplot2.set_ylabel('Pitch Class')\n# subplot2.set_title('Predicted Chords (Probability)')\n#\n#plt.show()\n\n\ntimestamps = list(structured_chroma.keys())\npredicted_frequencies = collections.OrderedDict()\npredicted_chords = []\n\nwith open(\"testing/predictions/Predicted Chords -- {}.txt\".format(song_name), \"w\") as file:\n #progress_bar = ProgressBar(widgets=['PROCESSED: ', collections.Counter(), '/{} '.format(len(predicted_classes)), Bar('>'), Percentage(),' --- ', AdaptiveETA()], maxval=len(predicted_classes) + 1)\n #progress_bar.start()\n for i, arr in enumerate(predicted_classes_arr):\n pitch_class = [i for i in range(len(arr)) if arr[i] == 1]\n try:\n if pitch_class:\n c = chord.Chord(pitch_class)\n predicted_frequencies[timestamps[i]] = list([x.frequency for x in c.pitches])\n predicted_chords.append(c.pitchedCommonName)\n file.write(\"{} {} {}\\n\".format(timestamps[i], pitch_class, c.pitchedCommonName))\n else:\n predicted_frequencies[timestamps[i]] = [0.0]\n predicted_chords.append(\"No_Chord\")\n file.write(\"{} {} No_Chord\\n\".format(timestamps[i], [0,0,0]))\n except IndexError:\n pass\n\n\nprint(\"Done Writing Predictions\")\n\n\nprint(\"PLAYING PREDICTED AUDIO:\")\n\ndef play_for(sample_wave, ms):\n \"\"\"Play the given NumPy array, as a sound, for ms milliseconds.\"\"\"\n sound = pygame.sndarray.make_sound(sample_wave)\n sound.play(-1)\n pygame.time.delay(ms)\n sound.stop()\n\nsample_rate = 44100\n\ndef sine_wave(hz, peak, n_samples=sample_rate):\n \"\"\"Compute N samples of a sine wave with given frequency and peak amplitude.\n Defaults to one second.\n \"\"\"\n length = sample_rate / float(hz)\n omega = np.pi * 2 / length\n xvalues = np.arange(int(length)) * omega\n onecycle = peak * np.sin(xvalues)\n return np.resize(onecycle, (n_samples,)).astype(np.int16)\n\n\nlast_frequency = []\nlast_changed_index = -1\ntime_distributed_sounds = []\ntime_distributed_chords = []\n\nfor i, frequencies in enumerate(predicted_frequencies.values()):\n if frequencies == last_frequency:\n #print(\"i: {}, last changed index: {}, len: {}\".format(i, last_changed_index, len(time_distributed_sounds)))\n time_distributed_sounds[last_changed_index] = (time_distributed_sounds[last_changed_index][0]+stepsize, frequencies)\n else:\n time_distributed_sounds.append((stepsize, frequencies))\n time_distributed_chords.append(predicted_chords[i])\n last_changed_index += 1\n last_frequency = frequencies\n\n#print(time_distributed_sounds)\n\nstart_time = time.time()\npygame.mixer.pre_init(sample_rate, -16, 1)\npygame.init()\ntime_running = 0\n\nfor i, sound_tuple in enumerate(time_distributed_sounds):\n time_running += sound_tuple[0]\n print(\"Time: {} -- Chord: {}\".format(time_running, time_distributed_chords[i]))\n if sound_tuple[1] != [0.0]:\n play_for(sum([sine_wave(x, 2048) for x in sound_tuple[1]]), int(sound_tuple[0] * 1000))\n else:\n delay(int(sound_tuple[0] * 1000))\n\n\n# for timestamp, frequencies in predicted_frequencies.items():\n# while start_time + timestamp > time.time():\n# pass\n#\n# if frequencies != [0.0]:\n# print(frequencies)\n# print(sum([sine_wave(x, 4096) for x in frequencies]))\n# play_for(sum([sine_wave(x, 4096) for x in frequencies]), int(stepsize*1000))\n# # play pitch class through speakers\n# print(\"Time: {} -- Chord: {}\".format(timestamp, frequencies))\n" ]
[ [ "numpy.set_printoptions", "numpy.array", "numpy.resize", "numpy.sin" ] ]
mbercx/cage
[ "90f34135c251f438c8709fdd9e814a47f7aa12e1" ]
[ "cage/utils.py" ]
[ "# Encoding = utf-8\n\nimport numpy as np\nimport pymatgen.io.nwchem as nw\n\n\"\"\"\nA collection of utility methods for other modules.\n\n\"\"\"\n\n\ndef distance(coord1, coord2):\n \"\"\"\n Calculate the distance between two coordinates, defined by arrays.\n :param coord1:\n :param coord2:\n :return:\n \"\"\"\n return np.linalg.norm(coord1 - coord2)\n\n\ndef unit_vector(vector):\n \"\"\" Returns the unit vector of the vector. \"\"\"\n return vector / np.linalg.norm(vector)\n\n\ndef angle_between(v1, v2):\n \"\"\"\n Returns the angle in radians between vectors 'v1' and 'v2'::\n \"\"\"\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n\n\ndef site_center(sites):\n \"\"\"\n Find the geometric center of a collection of sites.\n :param sites: Tuple of Site objects\n :return: Array of the cartesian coordinates of the center of the sites\n \"\"\"\n return sum([site.coords for site in sites]) / len(sites)\n\n\ndef schoenflies_to_hm():\n \"\"\"\n Function for converting the Schoenflies point group symbol to the Hermann\n Manguin one.\n :return:\n \"\"\"\n pass # TODO\n" ]
[ [ "numpy.dot", "numpy.linalg.norm" ] ]
dringeis/VP_rheologies
[ "5c4835eeb4bb3d15b481825ccc609580d67abf14" ]
[ "VP_rheology.py" ]
[ "#!/usr/bin python\nimport numpy as np\nfrom matplotlib.patches import Ellipse\nimport pylab as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport copy\nfrom matplotlib.colors import SymLogNorm\nfrom math import copysign\n\n#import functions defining the VP rheologies\nfrom VP_rheology_functions import *\n\n# import settings\nfrom VP_rheology_settings import *\n\n# Using LaTeX in figures\nplt.rc('text', usetex=True)\nplt.rc('font', family='sans')\n\n# create fake deformation data\ne11, e22, e12, e21 = create_data(random=True,i=1e-3,j=0,plot=False,sym=False,s=21)\n\n# compute simpler and additionnal variables\nep, em, e1, e2, eI, eII, e12s, e1s, e2s, eIs, eIIs = comp_sim_sr(e11,e22,e12,e21)\n\n\n####################\n# CHOICE OF RHEOLOGY\n####################\n\nell= True\nell_rot= True\n\n#######################\n# COMPUTING VISCOSITIES\n#######################\n\nif ell :\n zeta_ell, eta_ell, press_ell = ellip(ep,em,e12s,e12s,e=e)\n\nif ell_rot:\n zeta_ell_rot, eta_ell_rot, press_ell_rot = ellip(ep,em,e12,e21,e=e)\n\n####################\n# COMPUTING STRESSES\n####################\n\nif ell :\n\n s11_ell, s12_ell, s21_ell, s22_ell = compu_sigma(e11,e22,e12s,e12s,zeta_ell,eta_ell,press_ell,plot=False)\n sI_ell, sII_ell = comp_str_inva(s11_ell, s12_ell, s22_ell, s21_ell, plot=False) \n\nif ell_rot :\n\n s11_ell_rot, s12_ell_rot, s21_ell_rot, s22_ell_rot = compu_sigma(e11,e22,e12,e21,zeta_ell_rot,eta_ell_rot,press_ell_rot,plot=False)\n sI_ell_rot, sII_ell_rot = comp_str_inva(s11_ell_rot, s12_ell_rot, s22_ell_rot, s21_ell_rot, plot=False)\n\n\n###################\n# PLOTTING STRESSES\n###################\n\nfig1=plt.figure('stress states')\nax = fig1.gca()\nplt.grid()\nplt.axis('equal')\n\nif ell:\n plot_inv(sI_ell,sII_ell,eIs,eIIs,opt='ellipse',ax=ax,carg='b.')\n\nif ell_rot :\n plot_inv(sI_ell_rot,sII_ell_rot,eI,eII,opt='ellipse',ax=ax,carg='r.')\n\n#################################\n# TEST FOR THE EFFECT OF ROTATION\n#################################\n\nif ell and ell_rot :\n\n sI_diff=sI_ell_rot-sI_ell\n sII_diff=sII_ell_rot-sII_ell\n\n print('sI_diff mean', np.nanmean(sI_diff))\n print('sII_diff mean', np.nanmean(sII_diff))\n\n print('sI_diff median', np.nanmedian(sI_diff))\n print('sII_diff median', np.nanmedian(sII_diff))\n\n print('sI std diff ', np.nanstd(sI_ell_rot)-np.nanstd(sI_ell))\n print('sII std diff ', np.nanstd(sII_ell_rot)-np.nanstd(sII_ell))\n\n print('sI_diff mean sI>-0.5', np.nanmean(sI_diff[sI_ell>-0.5]))\n print('sII_diff mean sI<-0.5', np.nanmean(sII_diff[sI_ell<-0.5]))\n\n plt.figure('diff sI')\n plt.pcolormesh(sI_diff)\n plt.axis('equal')\n plt.colorbar()\n\n plt.figure('diff sII')\n plt.pcolormesh(sII_diff)\n plt.axis('equal')\n plt.colorbar()\n\n A1=[sI_ell.flatten(),sI_ell_rot.flatten()]\n B1=[sII_ell.flatten(),sII_ell_rot.flatten()]\n\n plt.figure('diff yield curve')\n plt.plot(A1,B1)\n plt.axis('equal')\n\n A2=[sI_ell.flatten(),sII_ell.flatten()]\n B2=[sI_ell_rot.flatten(),sII_ell_rot.flatten()]\n\n plt.figure('arrows diff yield curve')\n for c in range(len(A2[0])):\n # print(c)\n # print(A2[0][c])\n plt.arrow(A2[0][c], A2[1][c], B2[0][c] - A2[0][c], B2[1][c] - A2[1][c],head_width=0.01, length_includes_head=True,ec='k',fc='r')\n plt.axis('equal')\n\n###########\n# PLOT SHOW\n###########\n\nplt.show()" ]
[ [ "numpy.nanstd", "numpy.nanmedian", "numpy.nanmean" ] ]
Tbabm/compare-mt
[ "e68b4b0f4d8682cc61558f8fbca2d380b534a107" ]
[ "compare_mt/bucketers.py" ]
[ "import sys\nimport itertools\nimport numpy as np\nfrom collections import defaultdict\n\nfrom compare_mt import corpus_utils\nfrom compare_mt import scorers\nfrom compare_mt import arg_utils\n\nclass Bucketer:\n\n def set_bucket_cutoffs(self, bucket_cutoffs, num_type='int'):\n self.bucket_cutoffs = bucket_cutoffs\n self.bucket_strs = []\n for i, x in enumerate(bucket_cutoffs):\n if i == 0:\n self.bucket_strs.append(f'<{x}')\n elif num_type == 'int' and x-1 == bucket_cutoffs[i-1]:\n self.bucket_strs.append(f'{x-1}')\n else:\n self.bucket_strs.append(f'[{bucket_cutoffs[i-1]},{x})')\n self.bucket_strs.append(f'>={x}')\n\n def cutoff_into_bucket(self, value):\n for i, v in enumerate(self.bucket_cutoffs):\n if value < v:\n return i\n return len(self.bucket_cutoffs)\n\nclass WordBucketer(Bucketer):\n\n def calc_bucket(self, val, label=None):\n \"\"\"\n Calculate the bucket for a particular word\n\n Args:\n val: The word to calculate the bucket for\n label: If there's a label on the target word, add it\n\n Returns:\n An integer ID of the bucket\n \"\"\"\n raise NotImplementedError('calc_bucket must be implemented in subclasses of WordBucketer')\n\n def _calc_trg_matches(self, ref_sent, out_sents):\n ref_pos = defaultdict(lambda: [])\n out_matches = [[-1 for _ in s] for s in out_sents]\n ref_matches = [[-1 for _ in ref_sent] for _ in out_sents]\n for ri, ref_word in enumerate(ref_sent):\n ref_pos[ref_word].append(ri)\n for oai, out_sent in enumerate(out_sents):\n out_word_cnts = {}\n for oi, out_word in enumerate(out_sent):\n ref_poss = ref_pos.get(out_word, None)\n if ref_poss:\n out_word_cnt = out_word_cnts.get(out_word, 0)\n if out_word_cnt < len(ref_poss):\n out_matches[oai][oi] = ref_poss[out_word_cnt]\n ref_matches[oai][ref_poss[out_word_cnt]] = oi\n out_word_cnts[out_word] = out_word_cnt + 1\n return out_matches, ref_matches\n\n def _calc_trg_buckets_and_matches(self, ref_sent, ref_label, out_sents, out_labels):\n # Initial setup for special cases\n if self.case_insensitive:\n ref_sent = [corpus_utils.lower(w) for w in ref_sent]\n out_sents = [[corpus_utils.lower(w) for w in out_sent] for out_sent in out_sents]\n if not ref_label:\n ref_label = []\n out_labels = [[] for _ in out_sents]\n # Get matches\n out_matches, _ = self._calc_trg_matches(ref_sent, out_sents)\n # Process the reference, getting the bucket\n ref_buckets = [self.calc_bucket(w, label=l) for (w,l) in itertools.zip_longest(ref_sent, ref_label)]\n # Process each of the outputs, finding matches\n out_buckets = [[] for _ in out_sents]\n for oai, (out_sent, out_label, match, out_buck) in \\\n enumerate(itertools.zip_longest(out_sents, out_labels, out_matches, out_buckets)):\n for oi, (w, l, m) in enumerate(itertools.zip_longest(out_sent, out_label, match)):\n out_buck.append(self.calc_bucket(w, label=l) if m < 0 else ref_buckets[m])\n # Calculate totals for each sentence\n num_buckets = len(self.bucket_strs)\n num_outs = len(out_sents)\n my_ref_total = np.zeros(num_buckets ,dtype=int)\n my_out_totals = np.zeros( (num_outs, num_buckets) ,dtype=int)\n my_out_matches = np.zeros( (num_outs, num_buckets) ,dtype=int)\n for b in ref_buckets:\n my_ref_total[b] += 1\n for oi, (obs, ms) in enumerate(zip(out_buckets, out_matches)):\n for b, m in zip(obs, ms):\n my_out_totals[oi,b] += 1\n if m >= 0:\n my_out_matches[oi,b] += 1\n return my_ref_total, my_out_totals, my_out_matches, ref_buckets, out_buckets, out_matches\n\n def _calc_src_buckets_and_matches(self, src_sent, src_label, ref_sent, ref_aligns, out_sents):\n # Initial setup for special cases\n if self.case_insensitive:\n src_sent = [corpus_utils.lower(w) for w in src_sent]\n ref_sent = [corpus_utils.lower(w) for w in ref_sent]\n out_sents = [[corpus_utils.lower(w) for w in out_sent] for out_sent in out_sents]\n if not src_label:\n src_label = []\n # Get matches\n _, ref_matches = self._calc_trg_matches(ref_sent, out_sents)\n # Process the source, getting the bucket\n src_buckets = [self.calc_bucket(w, label=l) for (w,l) in itertools.zip_longest(src_sent, src_label)]\n # For each source word, find the reference words that need to be correct\n src_aligns = [[] for _ in src_sent]\n for src, trg in ref_aligns:\n src_aligns[src].append(trg)\n # Calculate totals for each sentence\n num_buckets = len(self.bucket_strs)\n num_outs = len(out_sents)\n my_ref_total = np.zeros(num_buckets ,dtype=int)\n my_out_matches = np.zeros( (num_outs, num_buckets) ,dtype=int)\n for src_bucket in src_buckets:\n my_ref_total[src_bucket] += 1\n my_out_totals = np.broadcast_to(np.reshape(my_ref_total, (1, num_buckets)), (num_outs, num_buckets))\n for oai, (out_sent, ref_match) in enumerate(zip(out_sents, ref_matches)):\n for src_bucket, src_align in zip(src_buckets, src_aligns):\n if len(src_align) != 0:\n if all([ref_match[x] >= 0 for x in src_align]):\n my_out_matches[oai,src_bucket] += 1\n return my_ref_total, my_out_totals, my_out_matches, src_buckets, src_aligns, ref_matches\n\n def calc_statistics(self, ref, outs,\n src=None,\n ref_labels=None, out_labels=None,\n ref_aligns=None, src_labels=None):\n \"\"\"\n Calculate match statistics, bucketed by the type of word we have, and IDs of example sentences to show.\n This must be used with a subclass that has self.bucket_strs defined, and self.calc_bucket(word) implemented.\n\n Args:\n ref: The reference corpus\n outs: A list of output corpora\n src: Source sentences.\n If src is set, it will use ref_aligns, out_aligns, and src_labels.\n Otherwise, it will use ref_labels and out_labels.\n ref_labels: Labels of the reference corpus (optional)\n out_labels: Labels of the output corpora (should be specified iff ref_labels is)\n\n Returns:\n statistics: containing a list of equal length to out, containing for each system\n both_tot: the frequency of a particular bucket appearing in both output and reference\n ref_tot: the frequency of a particular bucket appearing in just reference\n out_tot: the frequency of a particular bucket appearing in just output\n rec: recall of the bucket\n prec: precision of the bucket\n fmeas: f1-measure of the bucket\n my_ref_total_list: containing a list of statistics of the reference\n my_out_matches_list: containing a list of statistics of the outputs\n \"\"\"\n if not hasattr(self, 'case_insensitive'):\n self.case_insensitive = False\n\n # Dimensions\n num_buckets = len(self.bucket_strs)\n num_outs = len(outs)\n\n # Initialize the sufficient statistics for prec/rec/fmeas\n ref_total = np.zeros(num_buckets, dtype=int)\n out_totals = np.zeros( (num_outs, num_buckets) ,dtype=int)\n out_matches = np.zeros( ( num_outs, num_buckets) ,dtype=int)\n\n my_ref_total_list = []\n my_out_totals_list = []\n my_out_matches_list = []\n\n # Step through the sentences\n for rsi, (ref_sent, ref_label) in enumerate(itertools.zip_longest(ref, ref_labels if ref_labels else [])):\n if src:\n my_ref_total, my_out_totals, my_out_matches, _, _, _ = \\\n self._calc_src_buckets_and_matches(src[rsi],\n src_labels[rsi] if src_labels else None,\n ref_sent,\n ref_aligns[rsi],\n [x[rsi] for x in outs])\n else:\n my_ref_total, my_out_totals, my_out_matches, _, _, _ = \\\n self._calc_trg_buckets_and_matches(ref_sent,\n ref_label,\n [x[rsi] for x in outs],\n [x[rsi] for x in out_labels] if out_labels else None)\n ref_total += my_ref_total\n out_totals += my_out_totals\n out_matches += my_out_matches\n\n my_ref_total_list.append(my_ref_total)\n my_out_totals_list.append(my_out_totals)\n my_out_matches_list.append(my_out_matches)\n\n # Calculate statistics\n statistics = [[] for _ in range(num_outs)]\n for oi, ostatistics in enumerate(statistics):\n for bi in range(num_buckets):\n mcnt, ocnt, rcnt = out_matches[oi,bi], out_totals[oi,bi], ref_total[bi]\n if mcnt == 0:\n rec, prec, fmeas = 0.0, 0.0, 0.0\n else:\n rec = mcnt / float(rcnt)\n prec = mcnt / float(ocnt)\n fmeas = 2 * prec * rec / (prec + rec)\n ostatistics.append( (mcnt, rcnt, ocnt, rec, prec, fmeas) )\n\n return statistics, my_ref_total_list, my_out_totals_list, my_out_matches_list\n\n def calc_bucket_details(self, my_ref_total_list, my_out_totals_list, my_out_matches_list, num_samples=1000, sample_ratio=0.5):\n \n ref_total = np.array(my_ref_total_list).sum(0)\n\n num_outs, num_buckets = my_out_totals_list[0].shape\n n = len(my_ref_total_list)\n ids = list(range(n))\n sample_size = int(np.ceil(n*sample_ratio))\n rt_arr = np.array(my_ref_total_list)\n ot_arr = np.array(my_out_totals_list)\n om_arr = np.array(my_out_matches_list)\n statistics = [[ [] for __ in range(num_buckets) ] for _ in range(num_outs)]\n for _ in range(num_samples):\n reduced_ids = np.random.choice(ids, size=sample_size, replace=True)\n reduced_ref_total, reduced_out_totals, reduced_out_matches= rt_arr[reduced_ids].sum(0), ot_arr[reduced_ids].sum(0), om_arr[reduced_ids].sum(0)\n # Calculate accuracy on the reduced sample and save stats\n for oi in range(num_outs):\n for bi in range(num_buckets):\n mcnt, ocnt, rcnt = reduced_out_matches[oi,bi], reduced_out_totals[oi,bi], reduced_ref_total[bi]\n if mcnt == 0:\n rec, prec, fmeas = 0.0, 0.0, 0.0\n else:\n rec = mcnt / float(rcnt)\n prec = mcnt / float(ocnt)\n fmeas = 2 * prec * rec / (prec + rec)\n statistics[oi][bi].append( (mcnt, rcnt, ocnt, rec, prec, fmeas) )\n\n intervals = [[] for _ in range(num_outs)]\n for oi in range(num_outs):\n for bi in range(num_buckets):\n if len(statistics[oi][bi]) > 0: \n _, _, _, recs, precs, fmeas = zip(*statistics[oi][bi])\n else:\n recs, precs, fmeas = [0.0], [0.0], [0.0]\n # The first three elements (intervals of mcnt, ocnt and rcnt) are None\n bounds = [None, None, None]\n for x in [recs, precs, fmeas]:\n x = list(x)\n x.sort()\n lower_bound = x[int(num_samples * 0.025)]\n upper_bound = x[int(num_samples * 0.975)]\n bounds.append( (lower_bound, upper_bound) )\n intervals[oi].append(bounds)\n \n return ref_total, intervals\n\n def calc_examples(self, num_sents, num_outs,\n statistics,\n my_ref_total_list, my_out_matches_list,\n num_examples=5):\n \"\"\"\n Calculate examples based the computed statistics.\n\n Args:\n num_sents: number of sentences\n num_outs: number of outputs\n statistics: containing a list of equal length to out, containing for each system\n both_tot: the frequency of a particular bucket appearing in both output and reference\n ref_tot: the frequency of a particular bucket appearing in just reference\n out_tot: the frequency of a particular bucket appearing in just output\n rec: recall of the bucket\n prec: precision of the bucket\n fmeas: f1-measure of the bucket\n my_ref_total_list: containing a list of statistics of the reference\n my_out_matches_list: containing a list of statistics of the outputs\n num_examples: number of examples to print\n\n Returns:\n example: containing a list of examples to print\n \"\"\"\n num_buckets = len(self.bucket_strs)\n num_examp_feats = 3\n example_scores = np.zeros( (num_sents, num_examp_feats, num_buckets) )\n\n # Step through the sentences\n for rsi, (my_ref_total, my_out_matches) in enumerate(zip(my_ref_total_list, my_out_matches_list)):\n\n # Scoring of examples across different dimensions:\n # 0: overall variance of matches\n example_scores[rsi,0] = (my_out_matches / (my_ref_total+1e-10).reshape( (1, num_buckets) )).std(axis=0)\n # 1: overall percentage of matches\n example_scores[rsi,1] = my_out_matches.sum(axis=0) / (my_ref_total*num_outs+1e-10)\n # 2: overall percentage of misses\n example_scores[rsi,2] = (my_ref_total*num_outs-my_out_matches.sum(axis=0)) / (my_ref_total*num_outs+1e-10)\n\n # Calculate statistics\n # Find top-5 examples of each class\n examples = [[('Examples where some systems were good, some were bad', []),\n ('Examples where all systems were good', []),\n ('Examples where all systems were bad', [])] for _ in range(num_buckets)]\n # NOTE: This could be made faster with argpartition, but the complexity is probably not worth it\n topn = np.argsort(-example_scores, axis=0)\n for bi, bexamples in enumerate(examples):\n for fi, (_, fexamples) in enumerate(bexamples):\n for si in topn[:num_examples,fi,bi]:\n if example_scores[si,fi,bi] > 0:\n fexamples.append(si)\n\n return examples\n\n def calc_source_bucketed_matches(self, src, ref, out, ref_aligns, out_aligns, src_labels=None):\n \"\"\"\n Calculate the number of matches, bucketed by the type of word we have\n This must be used with a subclass that has self.bucket_strs defined, and self.calc_bucket(word) implemented.\n\n Args:\n src: The source corpus\n ref: The reference corpus\n out: The output corpus\n ref_aligns: Alignments of the reference corpus\n out_aligns: Alignments of the output corpus\n src_labels: Labels of the source corpus (optional)\n\n Returns:\n A tuple containing:\n both_tot: the frequency of a particular bucket appearing in both output and reference\n ref_tot: the frequency of a particular bucket appearing in just reference\n out_tot: the frequency of a particular bucket appearing in just output\n rec: recall of the bucket\n prec: precision of the bucket\n fmeas: f1-measure of the bucket\n \"\"\"\n if not hasattr(self, 'case_insensitive'):\n self.case_insensitive = False\n\n src_labels = src_labels if src_labels else []\n matches = [[0, 0, 0] for x in self.bucket_strs]\n for src_sent, ref_sent, out_sent, ref_align, out_align, src_lab in itertools.zip_longest(src, ref, out, ref_aligns, out_aligns, src_labels):\n ref_cnt = defaultdict(lambda: 0)\n for i, word in enumerate(ref_sent):\n if self.case_insensitive:\n word = corpus_utils.lower(word)\n ref_cnt[word] += 1\n for i, (src_index, trg_index) in enumerate(out_align):\n src_word = src_sent[src_index]\n word = out_sent[trg_index]\n if self.case_insensitive:\n word = corpus_utils.lower(word)\n bucket = self.calc_bucket(src_word,\n label=src_lab[src_index] if src_lab else None)\n if ref_cnt[word] > 0:\n ref_cnt[word] -= 1\n matches[bucket][0] += 1\n matches[bucket][2] += 1\n for i, (src_index, trg_index) in enumerate(ref_align):\n src_word = src_sent[src_index]\n bucket = self.calc_bucket(src_word,\n label=src_lab[src_index] if src_lab else None)\n matches[bucket][1] += 1\n\n for both_tot, ref_tot, out_tot in matches:\n if both_tot == 0:\n rec, prec, fmeas = 0.0, 0.0, 0.0\n else:\n rec = both_tot / float(ref_tot)\n prec = both_tot / float(out_tot)\n fmeas = 2 * prec * rec / (prec + rec)\n yield both_tot, ref_tot, out_tot, rec, prec, fmeas\n\n def calc_bucketed_likelihoods(self, corpus, likelihoods):\n \"\"\"\n Calculate the average of log likelihoods, bucketed by the type of word/label we have\n This must be used with a subclass that has self.bucket_strs defined, and self.calc_bucket(word) implemented.\n\n Args:\n corpus: The text/label corpus over which we compute the likelihoods\n likelihoods: The log-likelihoods corresponding to each word/label in the corpus\n\n Returns:\n the average log-likelihood bucketed by the type of word/label we have\n \"\"\"\n if not hasattr(self, 'case_insensitive'):\n self.case_insensitive = False\n\n if type(corpus) == str:\n corpus = corpus_utils.load_tokens(corpus)\n bucketed_likelihoods = [[0.0, 0] for _ in self.bucket_strs]\n if len(corpus) != len(likelihoods):\n raise ValueError(\"Corpus and likelihoods should have the same size.\")\n for sent, list_of_likelihoods in zip(corpus, likelihoods):\n if len(sent) != len(list_of_likelihoods):\n raise ValueError(\"Each sentence of the corpus should have likelihood value for each word\")\n\n for word, ll in zip(sent, list_of_likelihoods):\n if self.case_insensitive:\n word = corpus_utils.lower(word)\n bucket = self.calc_bucket(word, label=word)\n bucketed_likelihoods[bucket][0] += ll\n bucketed_likelihoods[bucket][1] += 1\n\n for ll, count in bucketed_likelihoods:\n if count != 0:\n yield ll/float(count)\n else:\n yield \"NA\" # not applicable\n\n\nclass FreqWordBucketer(WordBucketer):\n\n def __init__(self,\n freq_counts=None, freq_count_file=None, freq_corpus_file=None, freq_data=None,\n bucket_cutoffs=None,\n case_insensitive=False):\n \"\"\"\n A bucketer that buckets words by their frequency.\n\n Args:\n freq_counts: A dictionary containing word/count data.\n freq_count_file: A file containing counts for each word in tab-separated word, count format.\n Ignored if freq_counts exists.\n freq_corpus_file: A file with a corpus used for collecting counts. Ignored if freq_count_file exists.\n freq_data: A tokenized corpus from which counts can be calculated. Ignored if freq_corpus_file exists.\n bucket_cutoffs: Cutoffs for each bucket.\n The first bucket will be range(0,bucket_cutoffs[0]).\n Middle buckets will be range(bucket_cutoffs[i],bucket_cutoffs[i-1].\n Final bucket will be everything greater than bucket_cutoffs[-1].\n case_insensitive: A boolean specifying whether to turn on the case insensitive option.\n \"\"\"\n self.case_insensitive = case_insensitive\n if not freq_counts:\n freq_counts = defaultdict(lambda: 0)\n if freq_count_file != None:\n print(f'Reading frequency from \"{freq_count_file}\"')\n with open(freq_count_file, \"r\") as f:\n for line in f:\n cols = line.strip().split('\\t')\n if len(cols) != 2:\n print(f'Bad line in counts file {freq_count_file}, ignoring:\\n{line}')\n else:\n word, freq = cols\n if self.case_insensitive:\n word = corpus_utils.lower(word)\n freq_counts[word] = int(freq)\n elif freq_corpus_file:\n print(f'Reading frequency from \"{freq_corpus_file}\"')\n for words in corpus_utils.iterate_tokens(freq_corpus_file):\n for word in words:\n if self.case_insensitive:\n word = corpus_utils.lower(word)\n freq_counts[word] += 1\n elif freq_data:\n print('Reading frequency from the reference')\n for words in freq_data:\n for word in words:\n if self.case_insensitive:\n word = corpus_utils.lower(word)\n freq_counts[word] += 1\n else:\n raise ValueError('Must have at least one source of frequency counts for FreqWordBucketer')\n self.freq_counts = freq_counts\n\n if bucket_cutoffs is None:\n bucket_cutoffs = [1, 2, 3, 4, 5, 10, 100, 1000]\n self.set_bucket_cutoffs(bucket_cutoffs)\n\n def calc_bucket(self, word, label=None):\n if self.case_insensitive:\n word = corpus_utils.lower(word)\n return self.cutoff_into_bucket(self.freq_counts.get(word, 0))\n\n def name(self):\n return \"frequency\"\n\n def idstr(self):\n return \"freq\"\n\nclass CaseWordBucketer(WordBucketer):\n\n def __init__(self):\n \"\"\"\n A bucketer that buckets words by whether they're all all lower-case (lower), all upper-case (upper),\n title case (title), or other.\n \"\"\"\n self.bucket_strs = ['lower', 'upper', 'title', 'other']\n\n def calc_bucket(self, word, label=None):\n if word.islower():\n return 0\n elif word.isupper():\n return 1\n elif word.istitle():\n return 2\n else:\n return 3\n\n def name(self):\n return \"case\"\n\n def idstr(self):\n return \"case\"\n\nclass LabelWordBucketer(WordBucketer):\n\n def __init__(self,\n label_set=None):\n \"\"\"\n A bucketer that buckets words by their labels.\n\n Args:\n label_set: The set of labels to use as buckets. This can be a list, or a string separated by '+'s.\n \"\"\"\n if type(label_set) == str:\n label_set = label_set.split('+')\n self.bucket_strs = label_set + ['other']\n label_set_len = len(label_set)\n self.bucket_map = defaultdict(lambda: label_set_len)\n for i, l in enumerate(label_set):\n self.bucket_map[l] = i\n\n def calc_bucket(self, word, label=None):\n if not label:\n raise ValueError('When calculating buckets by label, label must be non-zero')\n return self.bucket_map[label]\n\n def name(self):\n return \"labels\"\n\n def idstr(self):\n return \"labels\"\n\nclass NumericalLabelWordBucketer(WordBucketer):\n\n def __init__(self,\n bucket_cutoffs=None):\n \"\"\"\n A bucketer that buckets words by labels that are numerical values.\n\n Args:\n bucket_cutoffs: Cutoffs for each bucket.\n The first bucket will be range(0,bucket_cutoffs[0]).\n Middle buckets will be range(bucket_cutoffs[i],bucket_cutoffs[i-1].\n Final bucket will be everything greater than bucket_cutoffs[-1].\n \"\"\"\n if bucket_cutoffs is None:\n bucket_cutoffs = [0.25, 0.5, 0.75]\n self.set_bucket_cutoffs(bucket_cutoffs)\n\n def calc_bucket(self, word, label=None):\n if label:\n return self.cutoff_into_bucket(float(label))\n else:\n raise ValueError('When calculating buckets by label must be non-zero')\n\n def name(self):\n return \"numerical labels\"\n\n def idstr(self):\n return \"numlabels\"\n\nclass SentenceBucketer(Bucketer):\n\n def calc_bucket(self, val, ref=None, src=None, out_label=None, ref_label=None):\n \"\"\"\n Calculate the bucket for a particular sentence\n\n Args:\n val: The sentence to calculate the bucket for\n ref: The reference sentence, if it exists\n src: The source sentence, if it exists\n ref_labels: The label of the reference sentence, if it exists\n out_labels: The label of the output sentence, if it exists\n\n Returns:\n An integer ID of the bucket\n \"\"\"\n raise NotImplementedError('calc_bucket must be implemented in subclasses of SentenceBucketer')\n\n def create_bucketed_corpus(self, out, ref=None, src=None, ref_labels=None, out_labels=None):\n bucketed_corpus = [([],[] if ref else None, []) for _ in self.bucket_strs]\n if ref is None:\n ref = out\n\n if ref_labels is None:\n ref_labels = out_labels\n\n src = [None for _ in out] if src is None else src\n\n for i, (out_words, ref_words, src_words) in enumerate(zip(out, ref, src)):\n bucket = self.calc_bucket(out_words, ref_words, src_words, label=(ref_labels[i][0] if ref_labels else None))\n\n bucketed_corpus[bucket][0].append(out_words)\n bucketed_corpus[bucket][1].append(ref_words)\n bucketed_corpus[bucket][2].append(src_words)\n \n return bucketed_corpus\n\n\nclass ScoreSentenceBucketer(SentenceBucketer):\n \"\"\"\n Bucket sentences by some score (e.g. BLEU)\n \"\"\"\n\n def __init__(self, score_type, bucket_cutoffs=None, case_insensitive=False):\n self.score_type = score_type\n self.scorer = scorers.create_scorer_from_profile(score_type)\n if bucket_cutoffs is None:\n bucket_cutoffs = [x * self.scorer.scale / 10.0 for x in range(1,10)]\n self.set_bucket_cutoffs(bucket_cutoffs, num_type='float')\n self.case_insensitive = case_insensitive\n\n def calc_bucket(self, val, ref=None, src=None, label=None):\n if self.case_insensitive:\n return self.cutoff_into_bucket(self.scorer.score_sentence(corpus_utils.lower(ref), corpus_utils.lower(val))[0])\n else:\n return self.cutoff_into_bucket(self.scorer.score_sentence(ref, val, src)[0])\n\n def name(self):\n return self.scorer.name()\n\n def idstr(self):\n return self.scorer.idstr()\n\nclass LengthSentenceBucketer(SentenceBucketer):\n \"\"\"\n Bucket sentences by length\n \"\"\"\n\n def __init__(self, bucket_cutoffs=None):\n if bucket_cutoffs is None:\n bucket_cutoffs = [10, 20, 30, 40, 50, 60]\n self.set_bucket_cutoffs(bucket_cutoffs, num_type='int')\n\n def calc_bucket(self, val, ref=None, src=None, label=None):\n return self.cutoff_into_bucket(len(ref))\n\n def name(self):\n return \"length\"\n\n def idstr(self):\n return \"length\"\n\nclass LengthDiffSentenceBucketer(SentenceBucketer):\n \"\"\"\n Bucket sentences by length\n \"\"\"\n\n def __init__(self, bucket_cutoffs=None):\n if bucket_cutoffs is None:\n bucket_cutoffs = [-20, -10, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 11, 21]\n self.set_bucket_cutoffs(bucket_cutoffs, num_type='int')\n\n def calc_bucket(self, val, ref=None, src=None, label=None):\n return self.cutoff_into_bucket(len(val) - len(ref))\n\n def name(self):\n return \"len(output)-len(reference)\"\n\n def idstr(self):\n return \"lengthdiff\"\n\nclass LabelSentenceBucketer(SentenceBucketer):\n\n def __init__(self, label_set=None):\n \"\"\"\n A bucketer that buckets sentences by their labels.\n\n Args:\n label_set: The set of labels to use as buckets. This can be a list, or a string separated by '+'s.\n \"\"\"\n if type(label_set) == str:\n label_set = label_set.split('+')\n self.bucket_strs = label_set + ['other']\n label_set_len = len(label_set)\n self.bucket_map = defaultdict(lambda: label_set_len)\n for i, l in enumerate(label_set):\n self.bucket_map[l] = i\n\n def calc_bucket(self, val, ref=None, src=None, label=None):\n return self.bucket_map[label]\n\n def name(self):\n return \"labels\"\n\n def idstr(self):\n return \"labels\"\n\nclass NumericalLabelSentenceBucketer(SentenceBucketer):\n\n def __init__(self, bucket_cutoffs=None):\n \"\"\"\n A bucketer that buckets sentences by labels that are numerical values.\n\n Args:\n bucket_cutoffs: Cutoffs for each bucket.\n The first bucket will be range(0,bucket_cutoffs[0]).\n Middle buckets will be range(bucket_cutoffs[i],bucket_cutoffs[i-1].\n Final bucket will be everything greater than bucket_cutoffs[-1].\n \"\"\"\n if bucket_cutoffs is None:\n bucket_cutoffs = [0.25, 0.5, 0.75]\n self.set_bucket_cutoffs(bucket_cutoffs)\n\n def calc_bucket(self, val, ref=None, src=None, label=None):\n return self.cutoff_into_bucket(float(label))\n\n def name(self):\n return \"numerical labels\"\n\n def idstr(self):\n return \"numlabels\"\n\ndef create_word_bucketer_from_profile(bucket_type,\n freq_counts=None, freq_count_file=None, freq_corpus_file=None, freq_data=None,\n label_set=None,\n bucket_cutoffs=None,\n case_insensitive=False):\n if type(bucket_cutoffs) == str:\n bucket_cutoffs = [arg_utils.parse_intfloat(x) for x in bucket_cutoffs.split(':')]\n if bucket_type == 'freq':\n return FreqWordBucketer(\n freq_counts=freq_counts,\n freq_count_file=freq_count_file,\n freq_corpus_file=freq_corpus_file,\n freq_data=freq_data,\n bucket_cutoffs=bucket_cutoffs,\n case_insensitive=case_insensitive)\n if bucket_type == 'case':\n return CaseWordBucketer()\n elif bucket_type == 'label':\n return LabelWordBucketer(\n label_set=label_set)\n elif bucket_type == 'numlabel':\n return NumericalLabelWordBucketer(\n bucket_cutoffs=bucket_cutoffs)\n else:\n raise ValueError(f'Illegal bucket type {bucket_type}')\n\ndef create_sentence_bucketer_from_profile(bucket_type,\n score_type=None,\n bucket_cutoffs=None,\n label_set=None,\n case_insensitive=False):\n if type(bucket_cutoffs) == str:\n bucket_cutoffs = [arg_utils.parse_intfloat(x) for x in bucket_cutoffs.split(':')]\n if bucket_type == 'score':\n return ScoreSentenceBucketer(score_type, bucket_cutoffs=bucket_cutoffs, case_insensitive=case_insensitive)\n elif bucket_type == 'length':\n return LengthSentenceBucketer(bucket_cutoffs=bucket_cutoffs)\n elif bucket_type == 'lengthdiff':\n return LengthDiffSentenceBucketer(bucket_cutoffs=bucket_cutoffs)\n elif bucket_type == 'label':\n return LabelSentenceBucketer(label_set=label_set)\n elif bucket_type == 'numlabel':\n return NumericalLabelSentenceBucketer(bucket_cutoffs=bucket_cutoffs)\n else:\n raise NotImplementedError(f'Illegal bucket type {bucket_type}')\n" ]
[ [ "numpy.random.choice", "numpy.reshape", "numpy.ceil", "numpy.argsort", "numpy.array", "numpy.zeros" ] ]
MartinKlevs/PyDMD
[ "2c50b775d00bf16b0f41d248040d884ee22e72c0" ]
[ "pydmd/mosesdmd.py" ]
[ "\"\"\"\r\nDerived module from dmdbase.py for higher order dmd.\r\n\r\nReference:\r\n- S. L Clainche, J. M. Vega, Higher Order Dynamic Mode Decomposition.\r\nJournal on Applied Dynamical Systems, 16(2), 882-925, 2017.\r\n\"\"\"\r\nfrom past.utils import old_div\r\nimport numpy as np\r\nimport scipy as sp\r\nfrom scipy.linalg import pinv2\r\nfrom mosessvd import MOSESSVD\r\nfrom numba import jit\r\n\r\nfrom .mosesdmdbase import MOSESDMDBase\r\n\r\n\r\ndef pinv(x): return pinv2(x, rcond=10 * np.finfo(float).eps)\r\n\r\n\r\nclass MOSESDMD(MOSESDMDBase):\r\n \"\"\"\r\n MOSES SVD based Higher Order Dynamic Mode Decomposition\r\n\r\n :param int svd_rank: rank truncation in SVD. If 0, the method computes the\r\n optimal rank and uses it for truncation; if positive number, the method\r\n uses the argument for the truncation; if -1, the method does not\r\n compute truncation.\r\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\r\n is 0, that means TLSQ is not applied.\r\n :param bool exact: flag to compute either exact DMD or projected DMD.\r\n Default is False.\r\n :param bool opt: flag to compute optimal amplitudes. See :class:`DMDBase`.\r\n Default is False.\r\n :param int d: the new order for spatial dimension of the input snapshots.\r\n Default is 1.\r\n :param int chunk_size: the horizontal size for the chunks given to MOSES SVD.\r\n :param numpy.dtype dtype: The desired datatype used for calculations.\r\n (might be removed in the future)\r\n :param boolean projection: Whether to use V or the projection of U for\r\n DMD. The second option is better, but requires more computations.\r\n Default is True.\r\n :param int or tring sqrt_K: Choose the method to calculate K. Default is True.\r\n \"\"\"\r\n\r\n def __init__(self, svd_rank=0, tlsq_rank=0, exact=False, opt=False, d=1,\r\n chunk_size=None, dtype=np.complex64, projection=True,\r\n sqrt_K=True):\r\n super(MOSESDMD, self).__init__(svd_rank, tlsq_rank, exact, opt)\r\n self.d = d\r\n self.chunk_size = chunk_size\r\n self.U = None\r\n self.s = None\r\n self.V = None\r\n self.K_list = None\r\n self.M = None\r\n self.dtype = dtype\r\n self.projection = projection\r\n self.sqrt_K = sqrt_K\r\n self.K_eigvec = None\r\n\r\n def linsolve(self, A, B):\r\n return np.matmul(B, np.linalg.inv(A))\r\n\r\n # @profile\r\n def fit(self, X):\r\n \"\"\"\r\n Compute the Dynamic Modes Decomposition to the input data.\r\n\r\n :param X: the input snapshots.\r\n :type X: numpy.ndarray or iterable\r\n \"\"\"\r\n if X.dtype != self.dtype:\r\n X = X.astype(self.dtype)\r\n\r\n self._snapshots = X\r\n\r\n n_samples = self._snapshots.shape[1]\r\n\r\n # X, Y = self._compute_tlsq(X, Y, self.tlsq_rank) not implemented\r\n\r\n msvd = MOSESSVD(rank=self.svd_rank)\r\n\r\n # MOSES SVD iteration loop\r\n i = -1\r\n for i in range(self.d-1, self._snapshots.shape[1] - self.chunk_size, self.chunk_size):\r\n chunk = [self._snapshots[:, i-j:i-j+self.chunk_size] for j in range(self.d)]\r\n chunk = np.vstack(chunk)\r\n msvd.update(chunk)\r\n\r\n # final chunk that contains the remaining snapshots\r\n chunk = [self._snapshots[:, i+1-j+self.chunk_size: self._snapshots.shape[1]-j] for j in range(self.d)]\r\n chunk = np.vstack(chunk)\r\n msvd.update(chunk)\r\n\r\n # get the SVD matrices\r\n U, s, V = msvd.S.astype(self.dtype), msvd.Gamma.astype(self.dtype), msvd.Q.astype(self.dtype)\r\n self.U, self.s, self.V = U, s, V\r\n\r\n M = np.zeros((self.svd_rank, self._snapshots.shape[1] - self.d)).astype(self.dtype)\r\n U_conj = np.ascontiguousarray(U.conj().T)\r\n\r\n # calculate M\r\n if self.projection:\r\n for i in range(self.svd_rank):\r\n M[i, :] = self.M_projection_value(self._snapshots, U_conj, i, self.d, self._snapshots.shape[1] - self.d,\r\n self.dtype)\r\n else:\r\n M = s.dot(V.conj().T)\r\n\r\n self.M = M\r\n\r\n # get the time shifted MX and MY\r\n MX = M[:, :-1]\r\n MY = M[:, 1:]\r\n\r\n # calculate the forward and backward operators\r\n Kf = MY.dot(pinv(MX))\r\n Kb = MX.dot(pinv(MY))\r\n Kbinv = pinv(Kb)\r\n if self.sqrt_K == \"mean\":\r\n K = (Kf + Kbinv) / 2\r\n elif self.sqrt_K:\r\n K = sp.linalg.sqrtm(Kf.dot(Kbinv))\r\n else:\r\n K = Kf\r\n self.Atilde = K\r\n K_eigval, K_eigvec = np.linalg.eig(K)\r\n self._eigs = K_eigval\r\n self.K_eigvec = K_eigvec\r\n\r\n # calculate the modes truncated to the original size\r\n self._modes = U[:self._snapshots.shape[0]].dot(K_eigvec.astype(self.dtype))\r\n\r\n # Default timesteps\r\n self.original_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\r\n self.dmd_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\r\n\r\n self._b = self._compute_amplitudes(self._modes, self._snapshots,\r\n self._eigs, self.opt)\r\n\r\n return self\r\n\r\n def _compute_amplitudes(self, modes, snapshots, eigs, opt):\r\n \"\"\"\r\n Compute the amplitude coefficients. If `opt` is False the amplitudes\r\n are computed by minimizing the error between the modes and the first\r\n snapshot; if `opt` is True the amplitudes are computed by minimizing\r\n the error between the modes and all the snapshots, at the expense of\r\n bigger computational cost.\r\n :param numpy.ndarray modes: 2D matrix that contains the modes, stored\r\n by column.\r\n :param numpy.ndarray snapshots: 2D matrix that contains the original\r\n snapshots, stored by column.\r\n :param numpy.ndarray eigs: array that contains the eigenvalues of the\r\n linear operator.\r\n :param bool opt: flag for computing the optimal amplitudes of the DMD\r\n modes, minimizing the error between the time evolution and all\r\n the original snapshots. If false the amplitudes are computed\r\n using only the initial condition, that is snapshots[0].\r\n :return: the amplitudes array\r\n :rtype: numpy.ndarray\r\n References for optimal amplitudes:\r\n Jovanovic et al. 2014, Sparsity-promoting dynamic mode decomposition,\r\n https://hal-polytechnique.archives-ouvertes.fr/hal-00995141/document\r\n \"\"\"\r\n if opt:\r\n # compute the vandermonde matrix\r\n omega = old_div(np.log(eigs), self.original_time['dt'])\r\n vander = np.exp(\r\n np.multiply(*np.meshgrid(omega, self.dmd_timesteps))).T\r\n\r\n # perform svd on all the snapshots\r\n # msvd = MOSESSVD(rank=self.svd_rank)\r\n # U, s, V = msvd.iterated_svd(snapshots, b=self.svd_rank+1)\r\n # V = V.conj().T\r\n # U, s, V = np.linalg.svd(self._snapshots, full_matrices=False)\r\n U, s, M = self.U, self.s, self.M\r\n K_eigvec = self.K_eigvec\r\n sinv = np.diag(np.reciprocal(np.diag(s)))\r\n V = np.dot(sinv, M).conj().T\r\n\r\n vander = vander[:,vander.shape[1] - V.shape[0]:]\r\n\r\n P = np.multiply(\r\n np.dot(K_eigvec.conj().T, K_eigvec),\r\n np.conj(np.dot(vander, vander.conj().T)))\r\n tmp = np.dot(V, s.conj().T)\r\n q = np.conj(np.diag(np.dot(np.dot(vander, tmp), K_eigvec)))\r\n\r\n # b optimal\r\n a = np.linalg.solve(P, q)\r\n else:\r\n a = np.linalg.lstsq(modes, snapshots.T[0], rcond=None)[0]\r\n\r\n return a\r\n\r\n @staticmethod\r\n @jit(nopython=True)\r\n def M_projection_value(snapshots, S_conj, index_i, d, length_j, dtype):\r\n \"\"\"\r\n Generates the i-th row from the matrix product of U and the stacked snapshots.\r\n This projects the stacked snapshots to the subspace of U\r\n Parameters\r\n ----------\r\n snapshots : numpy.ndarray\r\n Snapshot matrix\r\n U_conj : numpy.ndarray\r\n Complex conjugate of U matrix. It is more efficient to do the\r\n conjugate transpose outside this method\r\n index_i : int\r\n Index i for the M matrix\r\n d : int\r\n stacking depth of the snapshots\r\n dtype : numpy.dtype\r\n Target datatype.\r\n\r\n Returns\r\n -------\r\n value_row : The i-th row of M\r\n\r\n \"\"\"\r\n S_row = S_conj[index_i]\r\n snapshot_length = snapshots.shape[0]\r\n value_row = np.zeros(length_j).astype(dtype)\r\n for index_j in range(length_j):\r\n value = dtype(0)\r\n for m_slice_nr in range(d):\r\n m_slice = snapshots[:, index_j+d-1 - m_slice_nr]\r\n s_slice = S_row[m_slice_nr * snapshot_length : (m_slice_nr+1) * snapshot_length]\r\n value += s_slice.dot(m_slice)\r\n value_row[index_j] = value\r\n return value_row\r\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.log", "numpy.linalg.solve", "numpy.linalg.inv", "numpy.linalg.eig", "numpy.finfo", "numpy.linalg.lstsq", "numpy.meshgrid", "numpy.zeros", "numpy.vstack" ] ]
giadarol/hellofrom
[ "18f4030dc35bd4a7f61f4417f6fdc649f6ed0211" ]
[ "tests/test_sqrts.py" ]
[ "import pypkgexample as pe\nimport numpy as np\n\ndef test_sqrt_python():\n assert np.max(\n pe.sqrt_array_python([1., 4., 9.])-\n - np.array([1., 2., 3.])) < 1e30\n\ndef test_sqrt_fortran():\n assert np.max(\n pe.sqrt_array_fortran([1., 4., 9.])-\n - np.array([1., 2., 3.])) < 1e30\n\ndef test_sqrt_c_ctypes():\n assert np.max(\n pe.sqrt_array_c_ctypes([1., 4., 9.])-\n - np.array([1., 2., 3.])) < 1e30\n\ndef test_sqrt_c_cython():\n assert np.max(\n pe.sqrt_array_c_cython([1., 4., 9.])-\n - np.array([1., 2., 3.])) < 1e30\n" ]
[ [ "numpy.array" ] ]
juliette-r/WarpX
[ "4974f07209ebc5e0578fc383057b4be383cdf318" ]
[ "Examples/Physics_applications/laser_acceleration/PICMI_inputs_laser_acceleration.py" ]
[ "#!/usr/bin/env python3\n#\nimport numpy as np\nfrom pywarpx import picmi\n#from warp import picmi\n\nconstants = picmi.constants\n\n##########################\n# physics parameters\n##########################\n\n# --- laser\n\nlaser_a0 = 4. # Normalized potential vector\nlaser_wavelength = 8e-07 # Wavelength of the laser (in meters)\nlaser_waist = 5e-06 # Waist of the laser (in meters)\nlaser_duration = 15e-15 # Duration of the laser (in seconds)\nlaser_polarization = 0. # Polarization angle (in rad)\nlaser_injection_loc = 9.e-6 # Position of injection (in meters, along z)\nlaser_focal_distance = 100.e-6 # Focal distance from the injection (in meters)\nlaser_t_peak = 30.e-15 # The time at which the laser reaches its peak\n # at the antenna injection location (in seconds)\n# --- plasma\n\nplasma_density = 1.e24\nplasma_min = [-20.e-6, -20.e-6, 0.0e-6]\nplasma_max = [ 20.e-6, 20.e-6, 1.e-3]\n\n\n##########################\n# numerics parameters\n##########################\n\n# --- Nb time steps\n\nmax_steps = 10\n\n# --- grid\n\nnx = 64\nny = 64\nnz = 480\n\nxmin = 1.5*plasma_min[0]\nxmax = 1.5*plasma_max[0]\nymin = 1.5*plasma_min[1]\nymax = 1.5*plasma_max[1]\nzmin = -56.e-6\nzmax = 12.e-6\n\nmoving_window_velocity = [0., 0., constants.c]\n\nnumber_per_cell_each_dim = [2, 2, 1]\n\n##########################\n# physics components\n##########################\n\n# --- laser\n\nlaser = picmi.GaussianLaser(wavelength = laser_wavelength,\n waist = laser_waist,\n duration = laser_duration,\n focal_position = [0., 0., laser_focal_distance + laser_injection_loc],\n centroid_position = [0., 0., laser_injection_loc - constants.c*laser_t_peak],\n polarization_direction = [np.cos(laser_polarization), np.sin(laser_polarization), 0.],\n propagation_direction = [0,0,1],\n E0 = laser_a0*2.*np.pi*constants.m_e*constants.c**2/(constants.q_e*laser_wavelength)) # Maximum amplitude of the laser field (in V/m)\n\nlaser_antenna = picmi.LaserAntenna(position = [0., 0., laser_injection_loc], # This point is on the laser plane\n normal_vector = [0., 0., 1.]) # The plane normal direction\n\n# --- plasma\n\nuniform_plasma = picmi.UniformDistribution(density = plasma_density,\n lower_bound = plasma_min,\n upper_bound = plasma_max,\n fill_in = True)\n\nelectrons = picmi.Species(particle_type = 'electron',\n name = 'electrons',\n initial_distribution = uniform_plasma)\n\n\n##########################\n# numerics components\n##########################\n\ngrid = picmi.Cartesian3DGrid(number_of_cells = [nx, ny, nz],\n lower_bound = [xmin, ymin, zmin],\n upper_bound = [xmax, ymax, zmax],\n lower_boundary_conditions = ['periodic', 'periodic', 'open'],\n upper_boundary_conditions = ['periodic', 'periodic', 'open'],\n lower_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'],\n upper_boundary_conditions_particles = ['periodic', 'periodic', 'absorbing'],\n moving_window_velocity = moving_window_velocity,\n warpx_max_grid_size=32)\n\nsolver = picmi.ElectromagneticSolver(grid=grid, method='CKC', cfl=1.)\n\n\n##########################\n# diagnostics\n##########################\n\ndiag_field_list = [\"rho\", \"E\", \"B\", \"J\"]\nfield_diag1 = picmi.FieldDiagnostic(name = 'diag1',\n grid = grid,\n period = 10,\n write_dir = '.',\n warpx_file_prefix = 'Python_LaserAccelerationMR_plt',\n data_list = diag_field_list)\n\npart_diag1 = picmi.ParticleDiagnostic(name = 'diag1',\n period = 10,\n species = [electrons])\n\n##########################\n# simulation setup\n##########################\n\nsim = picmi.Simulation(solver = solver,\n max_steps = max_steps,\n verbose = 1,\n warpx_current_deposition_algo = 'esirkepov')\n\nsim.add_species(electrons, layout=picmi.GriddedLayout(grid=grid, n_macroparticle_per_cell=number_per_cell_each_dim))\n\nsim.add_laser(laser, injection_method=laser_antenna)\n\nsim.add_diagnostic(field_diag1)\nsim.add_diagnostic(part_diag1)\n\n##########################\n# simulation run\n##########################\n\n# write_inputs will create an inputs file that can be used to run\n# with the compiled version.\n#sim.write_input_file(file_name = 'inputs_from_PICMI')\n\n# Alternatively, sim.step will run WarpX, controlling it from Python\nsim.step(max_steps)\n\n" ]
[ [ "numpy.cos", "numpy.sin" ] ]
rohanharode/DRAW-Drug-Review-Analysis-Work
[ "89d8df82e1f0b67129727f16c32c038d64af35e2" ]
[ "ETL/Data_Aggregation/postgresql_db_creation.py" ]
[ "import psycopg2\nimport sqlalchemy\nimport pandas as pd\n\n\ndef postgres_table():\n engine = sqlalchemy.create_engine('postgresql://Shubham:@localhost:5432/draw')\n\n side_effect_df = pd.read_csv('../../side_effects.csv')\n\n side_effect_df.to_sql(\n name='drug_side_effects',\n con=engine,\n index=False,\n if_exists='replace'\n )\n print('side effect table created')\n full_merge_model_predictions_df = pd.read_csv('../../full_merge_model_predictions.csv')\n\n full_merge_model_predictions_df.to_sql(\n name='full_merge',\n con=engine,\n index=False,\n if_exists='replace'\n )\n print('final merge table created')\n print('end')\n\npostgres_table()\n" ]
[ [ "pandas.read_csv" ] ]
xrcui/pytorch-CycleGAN-and-pix2pix
[ "07821024f73ab1eb4cb2d9866f55deb0910b6c7e" ]
[ "models/pix2pix_model.py" ]
[ "import torch\nfrom .base_model import BaseModel\nfrom . import networks\nimport copy\n\n\nclass Pix2PixModel(BaseModel):\n \"\"\" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.\n\n The model training requires '--dataset_mode aligned' dataset.\n By default, it uses a '--netG unet256' U-Net generator,\n a '--netD basic' discriminator (PatchGAN),\n and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).\n\n pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf\n \"\"\"\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n\n For pix2pix, we do not use image buffer\n The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1\n By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.\n \"\"\"\n # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)\n parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')\n if is_train:\n parser.set_defaults(pool_size=0, gan_mode='vanilla')\n parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')\n\n return parser\n\n def __init__(self, opt):\n \"\"\"Initialize the pix2pix class.\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n self.visual_names = ['real_A', 'fake_B', 'real_B']\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>\n if self.isTrain:\n self.model_names = ['G', 'D']\n else: # during test time, only load G\n self.model_names = ['G']\n # define networks (both generator and discriminator)\n self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain:\n # define loss functions\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionL1 = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap images in domain A and domain B.\n \"\"\"\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n\n def forward(self):\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n if self.netG.module.__class__.__name__ == 'UnetGenerator':\n self.fake_B = self.netG(self.real_A)\n elif self.netG.module.__class__.__name__ == 'UnetGenerator_AddLayer':\n self.fake_B = self.netG(self.real_A,self.real_input) # G(A)\n\n def backward_D(self):\n \"\"\"Calculate GAN loss for the discriminator\"\"\"\n # Fake; stop backprop to the generator by detaching fake_B\n fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator\n pred_fake = self.netD(fake_AB.detach())\n self.loss_D_fake = self.criterionGAN(pred_fake, False)\n # Real\n real_AB = torch.cat((self.real_A, self.real_B), 1)\n pred_real = self.netD(real_AB)\n self.loss_D_real = self.criterionGAN(pred_real, True)\n # combine loss and calculate gradients\n self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5\n self.loss_D.backward()\n\n def backward_G(self):\n \"\"\"Calculate GAN and L1 loss for the generator\"\"\"\n # First, G(A) should fake the discriminator\n fake_AB = torch.cat((self.real_A, self.fake_B), 1)\n pred_fake = self.netD(fake_AB)\n self.loss_G_GAN = self.criterionGAN(pred_fake, True)\n # Second, G(A) = B\n self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1\n # combine loss and calculate gradients\n self.loss_G = self.loss_G_GAN + self.loss_G_L1\n self.loss_G.backward()\n\n def optimize_parameters(self):\n self.real_input = copy.deepcopy(self.real_A)\n self.forward() # compute fake images: G(A)\n # update D\n self.set_requires_grad(self.netD, True) # enable backprop for D\n self.optimizer_D.zero_grad() # set D's gradients to zero\n self.backward_D() # calculate gradients for D\n self.optimizer_D.step() # update D's weights\n # update G\n self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G\n self.optimizer_G.zero_grad() # set G's gradients to zero\n self.backward_G() # calculate graidents for G\n self.optimizer_G.step() # udpate G's weights\n" ]
[ [ "torch.nn.L1Loss", "torch.cat" ] ]
nikolasmorshuis/advchain
[ "f24eaca30d78677c8a8c3eb08b28e767b6c08435" ]
[ "advchain/models/unet.py" ]
[ "# Created by cc215 at 17/03/19\n# Enter feature description here\n# Enter scenario name here\n# Enter steps here\n\nimport math\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\n# noqa\nfrom advchain.models.custom_layers import BatchInstanceNorm2d\nfrom advchain.models.custom_layers import Self_Attn\nfrom advchain.models.unet_parts import *\nfrom advchain.common.utils import check_dir\n\n\ndef get_unet_model(model_path, num_classes=2, device=None, model_arch='UNet_16'):\n '''\n init model and load the trained parameters from the disk.\n model path: string. path to the model checkpoint\n device: torch device\n return pytorch nn.module model\n '''\n assert check_dir(model_path) == 1, model_path+' does not exists'\n if device is None:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n if model_arch == 'UNet_16':\n model = UNet(input_channel=1, num_classes=num_classes, feature_scale=4)\n elif model_arch == 'UNet_64':\n model = UNet(input_channel=1, num_classes=num_classes, feature_scale=1)\n else:\n raise NotImplementedError\n model.load_state_dict(torch.load(model_path))\n model = model.to(device)\n return model\n\n\nclass UNet(nn.Module):\n def __init__(self, input_channel, num_classes, feature_scale=1, encoder_dropout=None, decoder_dropout=None, norm=nn.BatchNorm2d, self_attention=False, if_SN=False, last_layer_act=None):\n super(UNet, self).__init__()\n self.inc = inconv(input_channel, 64//feature_scale,\n norm=norm, dropout=encoder_dropout)\n self.down1 = down(64//feature_scale, 128//feature_scale,\n norm=norm, if_SN=if_SN, dropout=encoder_dropout)\n self.down2 = down(128//feature_scale, 256//feature_scale,\n norm=norm, if_SN=if_SN, dropout=encoder_dropout)\n self.down3 = down(256//feature_scale, 512//feature_scale,\n norm=norm, if_SN=if_SN, dropout=encoder_dropout)\n self.down4 = down(512//feature_scale, 512//feature_scale,\n norm=norm, if_SN=if_SN, dropout=encoder_dropout)\n self.up1 = up(512//feature_scale, 512//feature_scale, 256 //\n feature_scale, norm=norm, dropout=decoder_dropout, if_SN=if_SN)\n self.up2 = up(256//feature_scale, 256//feature_scale, 128 //\n feature_scale, norm=norm, dropout=decoder_dropout, if_SN=if_SN)\n self.up3 = up(128//feature_scale, 128//feature_scale, 64 //\n feature_scale, norm=norm, dropout=decoder_dropout, if_SN=if_SN)\n self.up4 = up(64//feature_scale, 64//feature_scale, 64 //\n feature_scale, norm=norm, dropout=decoder_dropout, if_SN=if_SN)\n if self_attention:\n self.self_atn = Self_Attn(512//feature_scale, 'relu', if_SN=False)\n self.self_attention = self_attention\n self.outc = outconv(64//feature_scale, num_classes)\n self.n_classes = num_classes\n self.attention_map = None\n self.last_act = last_layer_act\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init_weights(m, init_type='kaiming')\n elif isinstance(m, nn.BatchNorm2d):\n init_weights(m, init_type='kaiming')\n\n def forward(self, x):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n self.hidden_feature = x5\n if self.self_attention:\n x5, w_out, attention = self.self_atn(x5)\n self.attention_map = attention\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n x = self.outc(x)\n\n if not self.last_act is None:\n x = self.last_act(x)\n\n return x\n\n def predict(self, x):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n if self.self_attention:\n x5, w_out, attention = self.self_atn(x5)\n self.attention_map = attention\n x = self.up1(x5, x4)\n x = self.up2(x, x3,)\n x = self.up3(x, x2,)\n x = self.up4(x, x1,)\n x = self.outc(x)\n if self.self_attention:\n return x, w_out, attention\n\n return x\n\n def get_net_name(self):\n return 'unet'\n\n def adaptive_bn(self, if_enable=False):\n if if_enable:\n for name, module in self.named_modules():\n if isinstance(module, nn.BatchNorm2d) or isinstance(module, BatchInstanceNorm2d):\n module.train()\n module.track_running_stats = True\n\n def init_bn(self):\n for name, module in self.named_modules():\n # print(name, module)\n if isinstance(module, nn.BatchNorm2d) or isinstance(module, BatchInstanceNorm2d):\n # print (module)\n module.running_mean.zero_()\n module.running_var.fill_(1)\n\n def fix_conv_params(self):\n for name, module in self.named_modules():\n if isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.Conv2d):\n # print(name)\n for k in module.parameters(): # fix all conv layers\n k.requires_grad = False\n\n else:\n for k in module.parameters():\n k.requires_grad = True\n\n def activate_conv_params(self):\n for name, module in self.named_modules():\n if isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.Conv2d):\n # print(name)\n for k in module.parameters():\n k.requires_grad = True\n\n def print_bn(self):\n for name, module in self.named_modules():\n # print(name, module)\n if isinstance(module, nn.BatchNorm2d) or isinstance(module, BatchInstanceNorm2d):\n print(module.running_mean)\n print(module.running_var)\n\n def fix_params(self):\n for name, module in self.named_modules():\n if isinstance(module, nn.BatchNorm2d):\n for k in module.parameters(): # fix all conv layers\n k.requires_grad = False\n elif 'outc' in name:\n if isinstance(module, nn.Conv2d):\n for k in module.parameters(): # except last layers\n k.requires_grad = True\n else:\n for k in module.parameters(): # fix all conv layers\n k.requires_grad = False\n\n def get_adapted_params(self):\n for name, module in self.named_modules():\n # if isinstance(module,nn.BatchNorm2d):\n # for p in module.parameters():\n # yield p\n # if 'outc' in name:\n # if isinstance(module,nn.Conv2d):\n # for p in module.parameters(): ##fix all conv layers\n # yield p\n for k in module.parameters(): # fix all conv layers\n if k.requires_grad:\n yield k\n\n def get_1x_lr_params_NOscale(self):\n \"\"\"\n This generator returns all the parameters of the net except for\n the last classification layer. Note that for each batchnorm layer,\n requires_grad is set to False in deeplab_resnet.py, therefore this function does not return\n any batchnorm parameter\n \"\"\"\n b = []\n\n b.append(self.inc)\n b.append(self.down1)\n b.append(self.down2)\n b.append(self.down3)\n b.append(self.down4)\n b.append(self.up1)\n b.append(self.up2)\n b.append(self.up3)\n b.append(self.up4)\n for i in range(len(b)):\n for j in b[i].modules():\n jj = 0\n for k in j.parameters():\n jj += 1\n if k.requires_grad:\n yield k\n\n def get_10x_lr_params(self):\n \"\"\"\n This generator returns all the parameters for the last layer of the net,\n which does the classification of pixel into classes\n \"\"\"\n b = []\n b.append(self.outc.parameters())\n for j in range(len(b)):\n for i in b[j]:\n yield i\n\n def optim_parameters(self, args):\n return [{'params': self.get_1x_lr_params_NOscale(), 'lr': args.learning_rate},\n {'params': self.get_10x_lr_params(), 'lr': 10 * args.learning_rate}]\n\n def cal_num_conv_parameters(self):\n cnt = 0\n\n for module_name, module in self.named_modules():\n if isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.Conv2d):\n # print(module_name)\n for name, param in module.named_parameters():\n if param.requires_grad:\n if 'weight' in name:\n print(name, param.data)\n param = param.view(-1, 1)\n param.squeeze()\n cnt += len(param.data)\n return cnt\n\n\nclass DeeplySupervisedUNet(nn.Module):\n def __init__(self, input_channel, num_classes, base_n_filters=64, dropout=None, activation=nn.ReLU):\n super(DeeplySupervisedUNet, self).__init__()\n self.inc = inconv(input_channel, base_n_filters, activation=activation)\n self.down1 = down(base_n_filters, base_n_filters *\n 2, activation=activation)\n self.down2 = down(base_n_filters*2, base_n_filters *\n 4, activation=activation)\n self.down3 = down(base_n_filters*4, base_n_filters *\n 8, activation=activation)\n self.down4 = down(base_n_filters*8, base_n_filters *\n 8, activation=activation)\n self.up1 = up(base_n_filters*8, base_n_filters*8,\n base_n_filters*4, activation=activation, dropout=dropout)\n self.up2 = up(base_n_filters*4, base_n_filters*4,\n base_n_filters*2, activation=activation, dropout=dropout)\n self.up3 = up(base_n_filters*2, base_n_filters*2,\n base_n_filters, activation=activation, dropout=dropout)\n self.up4 = up(base_n_filters, base_n_filters,\n base_n_filters, activation=activation)\n self.up2_conv1 = outconv_relu(\n base_n_filters*2, num_classes, activation=None)\n self.up2_up = nn.Upsample(\n scale_factor=2, mode='bilinear', align_corners=True)\n self.up3_conv1 = outconv_relu(\n base_n_filters, num_classes, activation=None)\n self.up3_up = nn.Upsample(\n scale_factor=2, mode='bilinear', align_corners=True)\n\n self.outc = outconv(base_n_filters, num_classes)\n self.n_classes = num_classes\n self.dropout = dropout\n if dropout is not None:\n self.dropoutlayer = nn.Dropout2d(p=dropout)\n else:\n self.dropoutlayer = nn.Dropout2d(p=0)\n\n def forward(self, x, multi_out=False):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.dropoutlayer(self.down2(x2)) # tail it after pooling\n x4 = self.dropoutlayer(self.down3(x3))\n x5 = self.dropoutlayer(self.down4(x4))\n\n x = self.up1(x5, x4)\n x_2 = self.up2(x, x3) # insert dropout after concat\n dsv_x_2 = self.up2_conv1(x_2)\n dsv_x_2_up = self.up2_up(dsv_x_2)\n\n x_3 = self.up3(x_2, x2)\n dsv_x_3 = self.up3_conv1(x_3)\n dsv_mixed = dsv_x_2_up+dsv_x_3\n dsv_mixed_up = self.up3_up(dsv_mixed)\n\n x_4 = self.up4(x_3, x1)\n out = self.outc(x_4)\n final_output = torch.add(out, dsv_mixed_up)\n if multi_out:\n return out, dsv_mixed_up, final_output\n\n return final_output\n\n def predict(self, x):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x = self.up1(x5, x4)\n x_2 = self.up2(x, x3)\n dsv_x_2 = self.up2_conv1(x_2)\n dsv_x_2_up = self.up2_up(dsv_x_2)\n\n x_3 = self.up3(x_2, x2)\n dsv_x_3 = self.up3_conv1(x_3)\n dsv_mixed = dsv_x_2_up + dsv_x_3\n dsv_mixed_up = self.up3_up(dsv_mixed)\n\n x_4 = self.up4(x_3, x1)\n out = self.outc(x_4)\n final_output = torch.add(out, dsv_mixed_up)\n\n return final_output\n\n def get_net_name(self):\n return 'dsv_unet'\n\n def adaptive_bn(self, if_enable=False):\n if if_enable:\n for name, module in self.named_modules():\n if isinstance(module, nn.BatchNorm2d) or isinstance(module, BatchInstanceNorm2d):\n # if 'down' in name or 'up' in name or 'inc' in name:\n # print (module.name)\n module.train()\n module.track_running_stats = True\n\n def init_bn(self):\n for name, module in self.named_modules():\n # print(name, module)\n if isinstance(module, nn.BatchNorm2d) or isinstance(module, BatchInstanceNorm2d):\n # print(module)\n module.running_mean.zero_()\n module.running_var.fill_(1)\n\n def fix_params(self):\n for name, param in self.named_parameters():\n if 'outc' in name:\n # initialize\n if 'conv' in name and 'weight' in name:\n n = param.size(0) * param.size(2) * param.size(3)\n param.data.normal_().mul_(math.sqrt(2. / n))\n else:\n param.requires_grad = False\n\n def cal_num_conv_parameters(self):\n cnt = 0\n\n for module_name, module in self.named_modules():\n if isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.Conv2d):\n # print (module_name)\n for name, param in module.named_parameters():\n if param.requires_grad:\n if 'weight' in name:\n print(name, param.data)\n param = param.view(-1, 1)\n param.squeeze()\n cnt += len(param.data)\n return cnt\n\n\nclass UNetv2(nn.Module):\n def __init__(self, input_channel, num_classes, feature_scale=1, encoder_dropout=None, decoder_dropout=None, norm=nn.BatchNorm2d, self_attention=False, if_SN=False, last_layer_act=None):\n super(UNetv2, self).__init__()\n self.inc = inconv(input_channel, 64//feature_scale,\n norm=norm, dropout=encoder_dropout)\n self.down1 = down(64//feature_scale, 128//feature_scale,\n norm=norm, if_SN=if_SN, dropout=encoder_dropout)\n self.down2 = down(128//feature_scale, 256//feature_scale,\n norm=norm, if_SN=if_SN, dropout=encoder_dropout)\n self.down3 = down(256//feature_scale, 512//feature_scale,\n norm=norm, if_SN=if_SN, dropout=encoder_dropout)\n self.down4 = down(512//feature_scale, 1024//feature_scale,\n norm=norm, if_SN=if_SN, dropout=encoder_dropout)\n self.up1 = up(1024//feature_scale, 512//feature_scale, 256 //\n feature_scale, norm=norm, dropout=decoder_dropout, if_SN=if_SN)\n self.up2 = up(256//feature_scale, 256//feature_scale, 128 //\n feature_scale, norm=norm, dropout=decoder_dropout, if_SN=if_SN)\n self.up3 = up(128//feature_scale, 128//feature_scale, 64 //\n feature_scale, norm=norm, dropout=decoder_dropout, if_SN=if_SN)\n self.up4 = up(64//feature_scale, 64//feature_scale, 64 //\n feature_scale, norm=norm, dropout=decoder_dropout, if_SN=if_SN)\n if self_attention:\n self.self_atn = Self_Attn(512//feature_scale, 'relu', if_SN=False)\n self.self_attention = self_attention\n self.outc = outconv(64//feature_scale, num_classes)\n self.n_classes = num_classes\n self.attention_map = None\n self.last_act = last_layer_act\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init_weights(m, init_type='kaiming')\n elif isinstance(m, nn.BatchNorm2d):\n init_weights(m, init_type='kaiming')\n\n def forward(self, x):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n self.hidden_feature = x5\n if self.self_attention:\n x5, w_out, attention = self.self_atn(x5)\n self.attention_map = attention\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n x = self.outc(x)\n\n if not self.last_act is None:\n x = self.last_act(x)\n\n return x\n\n def predict(self, x):\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n if self.self_attention:\n x5, w_out, attention = self.self_atn(x5)\n self.attention_map = attention\n x = self.up1(x5, x4)\n x = self.up2(x, x3,)\n x = self.up3(x, x2,)\n x = self.up4(x, x1,)\n x = self.outc(x)\n if self.self_attention:\n return x, w_out, attention\n\n return x\n\n def get_net_name(self):\n return 'unet'\n\n def adaptive_bn(self, if_enable=False):\n if if_enable:\n for name, module in self.named_modules():\n if isinstance(module, nn.BatchNorm2d) or isinstance(module, BatchInstanceNorm2d):\n module.train()\n module.track_running_stats = True\n\n def init_bn(self):\n for name, module in self.named_modules():\n # print(name, module)\n if isinstance(module, nn.BatchNorm2d) or isinstance(module, BatchInstanceNorm2d):\n # print(module)\n module.running_mean.zero_()\n module.running_var.fill_(1)\n\n\nif __name__ == '__main__':\n model = UNet(input_channel=1, feature_scale=1,\n num_classes=4, encoder_dropout=0.3)\n model.train()\n image = torch.autograd.Variable(torch.randn(2, 1, 224, 224), volatile=True)\n result = model(image)\n print(model.hidden_feature.size())\n print(result.size())\n" ]
[ [ "torch.nn.Upsample", "torch.nn.Dropout2d" ] ]
yf225/functorch
[ "4f0603569827ff0249c5f58f36d39b3fc6ee7103" ]
[ "test/discover_coverage.py" ]
[ "import torch\nimport copy\nfrom torch.testing._internal.common_methods_invocations import op_db\nfrom functorch_additional_op_db import additional_op_db\nfrom enum import Enum\nimport functorch._src.top_operators_github_usage as top_ops\nimport pprint\nimport unittest\nimport enum\nfrom functorch_lagging_op_db import in_functorch_lagging_op_db\nfrom torch.testing._internal.common_device_type import toleranceOverride\n\n# Importing these files make modifications to the op_db that we need\nimport test_ops # noqa: F401\nimport test_vmap # noqa: F401\n\nall_overridable = list(torch.overrides.get_testing_overrides().keys())\n\npublic_docs = [\n (torch.nn.functional, 'torch.nn.functional', 'docs/source/nn.functional.rst'),\n (torch.fft, 'torch.fft', 'docs/source/fft.rst'),\n (torch.special, 'torch.special', 'docs/source/special.rst'),\n (torch.linalg, 'torch.linalg', 'docs/source/linalg.rst'),\n (torch, 'torch', 'docs/source/torch.rst'),\n (torch.Tensor, 'torch.Tensor', 'docs/source/tensors.rst'),\n]\n\n# torch.abs, Tensor.abs, Tensor.abs_ are all considered to be different\n\n\ndef get_public_overridable_apis(pytorch_root='/raid/rzou/pt/debug-cpu'):\n results = {}\n all_overridable_apis = set(torch.overrides.get_testing_overrides().keys())\n for module, module_name, src in public_docs:\n with open(f'{pytorch_root}/{src}') as f:\n lines = f.readlines()\n # APIs eitehr begin with 4 spaces or \".. autofunction::\"\n api_lines1 = [line.strip() for line in lines if line.startswith(' ' * 4)]\n api_lines2 = [line.strip()[len('.. autofunction:: '):]\n for line in lines if line.startswith('.. autofunction::')]\n lines = api_lines1 + api_lines2\n lines = [line[7:] if line.startswith('Tensor.') else line for line in lines]\n lines = [line for line in lines if hasattr(module, line)]\n for line in lines:\n api = getattr(module, line)\n if api in all_overridable_apis:\n results[f'{module_name}.{line}'] = api\n return results\n\n\ndenylist = {\n 'torch.Tensor.data_ptr',\n 'torch.Tensor.dim',\n 'torch.Tensor.element_size',\n 'torch.Tensor.backward',\n 'torch.Tensor.as_strided',\n 'torch.Tensor.register_hook',\n 'torch.Tensor.record_stream',\n 'torch.Tensor.qscheme',\n 'torch.Tensor.ndimension',\n 'torch.Tensor.smm',\n 'torch.Tensor.sspaddmm',\n 'torch.Tensor.retain_grad',\n 'torch.Tensor.sparse_mask',\n 'torch.Tensor.sparse_dim',\n 'torch.Tensor.dense_dim',\n 'torch.Tensor.values',\n 'torch.Tensor.indices',\n 'torch.Tensor.numel',\n 'torch.Tensor.size',\n 'torch.Tensor.nelement',\n 'torch.Tensor.q_scale',\n 'torch.Tensor.q_zero_point',\n 'torch.Tensor.q_per_channel_scales',\n 'torch.Tensor.q_per_channel_zero_points',\n 'torch.Tensor.q_per_channel_axis',\n 'torch.Tensor.int_repr',\n 'torch.Tensor.to_sparse',\n 'torch.Tensor.is_inference',\n 'torch.Tensor.storage',\n 'torch.Tensor.storage_type',\n}\n\n\ndef get_method_only_ops_we_care_about():\n apis = get_public_overridable_apis()\n result = []\n for key, _ in apis.items():\n if not key.startswith('torch.Tensor'):\n continue\n if key in denylist:\n continue\n api = key.split('.')[2]\n # filter out in-place\n if api.endswith('_'):\n continue\n if f'torch.{api}' not in apis.keys():\n result.append(api)\n return result\n\n# Deduplicates torch.abs and Tensor.abs\n\n\ndef get_public_overridable_ops():\n results = get_public_overridable_apis()\n cpy = copy.deepcopy(results)\n for key, _ in cpy.items():\n if not key.startswith('torch.Tensor'):\n continue\n api = key.split('.')[2]\n if f'torch.{api}' in results.keys():\n del results[key]\n return results\n\n\ndef get_public_overridable_outplace_ops():\n results = get_public_overridable_ops()\n cpy = copy.deepcopy(results)\n for key, _ in cpy.items():\n # NB: there are no dunder methods bcs we don't document those\n if key.endswith('_'):\n del results[key]\n return results\n\n\ndef get_public_overridable_outplace_we_care_about():\n results = get_public_overridable_outplace_ops()\n cpy = copy.deepcopy(results)\n for key, _ in cpy.items():\n # quantization\n if 'quant' in key or '.q_' in key:\n del results[key]\n\n # is_cpu, etc. It doesn't make sense to have OpInfos for these\n if '.is_' in key:\n del results[key]\n\n if key in denylist and key in results:\n del results[key]\n return results\n\n# e.g. nn.functional.softmax\n\n\ndef get_op(dotted_name):\n names = dotted_name.split('.')\n mod = torch\n for name in names:\n if not hasattr(mod, name):\n return None\n mod = getattr(mod, name)\n return mod\n\n# Maps function -> [OpInfo]\n\n\ndef get_ops_covered_by_opinfos():\n ops = {}\n\n def safe_append(dct, key, val):\n if key in dct:\n dct[key].append(val)\n else:\n dct[key] = [val]\n\n for opinfo in op_db:\n func_op = get_op(opinfo.name)\n if func_op:\n safe_append(ops, func_op, opinfo)\n if opinfo.method_variant:\n safe_append(ops, opinfo.method_variant, opinfo)\n if opinfo.inplace_variant:\n safe_append(ops, opinfo.inplace_variant, opinfo)\n for alias in opinfo.aliases:\n safe_append(ops, alias.op, opinfo)\n return ops\n\n\nfactory_fns = {\n 'tensor', 'zeros', 'ones', 'randn', 'arange', 'rand', 'empty', 'randperm',\n 'linspace', 'logspace', 'hann_window', 'full', 'eye', 'blackman_window',\n 'barlett_window', 'randint', 'range', 'arange',\n}\n\n\ndef get_top_ops(torch_threshold, nn_fn_threshold):\n denylist = set({\n # These are either not real \"operators\", factory functions\n # that trivially work, or not-documented ops.\n 'load', 'no_grad', 'save', 'from_numpy',\n 'manual_seed', 'set_grad_enabled',\n 'set_default_tensor_type', 'set_num_threads',\n 'set_printoptions', 'numel',\n 'set_default_dtype', 'sparse_coo_tensor', 'set_rng_state',\n 'get_rng_state', 'get_default_dtype', 'initial_seed',\n 'get_num_threads', 'quantize_per_tensor',\n 'hann_window', 'is_tensor', 'as_tensor',\n 'equal', 'enable_grad', 'seed', 'is_storage',\n 'is_floating_point', 'nn.functional.torch',\n 'set_flush_denormal', 'set_num_interop_threads', 'dequantize',\n 'get_num_interop_threads', 'nn.functional.math',\n 'nn.functional.threshold_',\n 'nn.functional.selu_',\n 'nn.functional.elu_',\n 'nn.functional.rrelu_',\n 'nn.functional.leaky_relu_',\n 'nn.functional.hardtanh_',\n 'nn.functional.has_torch_function',\n 'nn.functional.has_torch_function_unary',\n 'nn.functional.has_torch_function_variadic',\n 'nn.functional.handle_torch_function',\n 'nn.functional.adaptive_max_pool1d_with_indices',\n 'nn.functional.adaptive_max_pool2d_with_indices',\n 'nn.functional.adaptive_max_pool3d_with_indices',\n 'nn.functional.fractional_max_pool2d_with_indices',\n 'nn.functional.fractional_max_pool3d_with_indices',\n 'is_complex',\n 'grad',\n 'quantize_per_channel',\n 'nn.functional.max_pool2d_with_indices',\n 'nn.functional.max_pool3d_with_indices',\n 'nn.functional.max_pool1d_with_indices',\n 'nn.functional.celu_',\n 'nn.functional.grad',\n 'nn.functional.relu_',\n 'nn.functional.boolean_dispatch',\n 'nn.functional.assert_int_or_pair',\n 'fft', # is namespace\n })\n\n torch_ops = [op[0] for op in top_ops.top_torch]\n nn_fn_ops = [op[0] for op in top_ops.get_nn_functional_top_list()]\n torch_ops = [op for op in torch_ops if op not in denylist]\n nn_fn_ops = [op for op in nn_fn_ops if op not in denylist]\n\n ops = torch_ops[:torch_threshold] + nn_fn_ops[:nn_fn_threshold]\n return ops\n\n\ndef get_top_ops_not_covered_by_opinfo(torch_threshold=0, nn_fn_threshold=0):\n ops = get_top_ops(torch_threshold, nn_fn_threshold)\n\n ops_with_opinfo = []\n for op in op_db:\n ops_with_opinfo.append(op.name)\n ops_with_opinfo.extend([op.name for op in op.aliases])\n ops_with_opinfo = set(ops_with_opinfo)\n\n result = [op for op in ops if op not in ops_with_opinfo]\n result = [op for op in result if op not in denylist]\n result = [op for op in result if op not in factory_fns]\n return result\n\n\ndef get_covered_ops(ops_list, invert=False):\n ops_covered_by_opinfo = get_ops_covered_by_opinfos()\n overridable_outplace_ops = ops_list\n results = {}\n for key, op in overridable_outplace_ops.items():\n cond = op in ops_covered_by_opinfo\n if invert:\n cond = not cond\n if cond:\n results[key] = op\n return results\n\n\nclass Status(Enum):\n Correct = 0\n Fast = 1\n\n\ntests = {\n 'test_vmap_exhaustive',\n 'test_op_has_batch_rule',\n 'test_vjp',\n 'test_vmapvjp',\n 'test_vmapvjp_has_batch_rule',\n 'test_jvp',\n 'test_vmapjvp',\n}\n\n\ndef is_decorateinfo_skip_or_xfail(decorateinfo):\n assert len(decorateinfo.decorators) == 1\n actual_decorator = decorateinfo.decorators[0]\n if isinstance(actual_decorator, toleranceOverride):\n return False\n if actual_decorator == unittest.expectedFailure:\n return True\n # Assume the rest are skips\n return True\n\n\ndef get_all_tested_ops():\n overridable_outplace_we_care_about = get_public_overridable_outplace_we_care_about()\n op_to_opinfo = get_ops_covered_by_opinfos()\n result = set({})\n for name, op in get_covered_ops(overridable_outplace_we_care_about).items():\n opinfos = op_to_opinfo[op]\n for opinfo in opinfos:\n result.add(opinfo.name)\n return result\n\n\ndef get_skipped_or_xfailed_ops_for(test_name):\n overridable_outplace_we_care_about = get_public_overridable_outplace_we_care_about()\n op_to_opinfo = get_ops_covered_by_opinfos()\n result = set({})\n for name, op in get_covered_ops(overridable_outplace_we_care_about).items():\n opinfos = op_to_opinfo[op]\n for opinfo in opinfos:\n for decorator in opinfo.decorators:\n if not hasattr(decorator, 'test_name'):\n continue\n if decorator.test_name != test_name:\n continue\n if is_decorateinfo_skip_or_xfail(decorator):\n result.add(opinfo.name)\n return result\n\n\n# import pdb; pdb.set_trace()\n\ndef get_statuses(for_subset=None, invert=False):\n overridable_outplace_we_care_about = get_public_overridable_outplace_we_care_about()\n if for_subset is not None:\n overridable_outplace_we_care_about = {\n k: v\n for k, v in overridable_outplace_we_care_about.items()\n # Removes \"torch.\"\n if k[6:] in for_subset\n }\n op_to_opinfo = get_ops_covered_by_opinfos()\n result = {}\n _ = get_covered_ops(overridable_outplace_we_care_about)\n\n def get_covered_tests(op):\n opinfos = op_to_opinfo[op]\n result = copy.deepcopy(tests)\n for opinfo in opinfos:\n for decorator in opinfo.decorators:\n if not hasattr(decorator, 'test_name'):\n continue\n if decorator.test_name in tests and decorator.test_name in result:\n result.remove(decorator.test_name)\n return result\n\n def get_all_aliases(op):\n opinfos = op_to_opinfo[op]\n result = []\n for opinfo in opinfos:\n result.append(opinfo.name)\n result.extend(opinfo.aliases)\n return set(result)\n\n for name, op in get_covered_ops(overridable_outplace_we_care_about).items():\n successful_tests = get_covered_tests(op)\n failed_tests = tests - successful_tests\n result[name] = failed_tests if invert else successful_tests\n return result\n\n\ndef transpose_statuses(for_subset=None, invert=False):\n statuses = get_statuses(for_subset, invert=invert)\n result = {}\n for test in tests:\n result[test] = set({})\n for op, supported in statuses.items():\n for test in supported:\n result[test].add(op)\n return result\n\n\noverridable_apis = get_public_overridable_apis()\n\noverridable_ops = get_public_overridable_ops()\n\noverridable_outplace_ops = get_public_overridable_outplace_ops()\n\noverridable_outplace_we_care_about = get_public_overridable_outplace_we_care_about()\n\ntested_overridable_outplace_ops = get_covered_ops(overridable_outplace_we_care_about)\nuntested_overridable_outplace_ops = get_covered_ops(overridable_outplace_we_care_about, invert=True)\n\n# print(\"List of OpInfos we need:\")\n# for key in untested_overridable_outplace_ops.keys():\n# print(key)\n# print(\"-\" * 80)\n# print(\"\")\n\nprint(f'Overridable public APIs: {len(overridable_apis)}')\nprint(f'Overridable public ops: {len(overridable_ops)}')\nprint(f'Overridable public outplace ops: {len(overridable_outplace_ops)}')\nprint(f'Overridable public outplace ops we care about: {len(overridable_outplace_we_care_about)}')\nprint(f'OpInfo-tested overridable public outplace ops: {len(tested_overridable_outplace_ops)}')\n\n\ndef remove_torch(name):\n assert name[:6] == 'torch.'\n return name[6:]\n\n\ndef get_list_of_all_tests():\n all_tests = list(tested_overridable_outplace_ops.keys())\n return set([remove_torch(test) for test in all_tests])\n\n\nmytest = {\n 'test_vmap_exhaustive',\n 'test_op_has_batch_rule',\n 'test_vjp',\n 'test_vmapvjp',\n 'test_vmapvjp_has_batch_rule',\n}\n\nprint('*' * 80)\nall_tests = get_list_of_all_tests()\nfor test in mytest:\n result = get_skipped_or_xfailed_ops_for(test)\n diff = len(all_tests - result)\n print(f'{test}: {diff}')\n\n\ndef get_jvp_coverage(subset=None):\n # - number that support autograd\n # - number that support forward_ad (in pytorch core)\n # - number that support functorch.jvp\n op_to_opinfo = get_ops_covered_by_opinfos()\n ops_dct = tested_overridable_outplace_ops\n if subset is not None:\n ops_dct = {name: op for name, op in ops_dct.items()\n if remove_torch(name) in subset}\n supports_autograd_ops_dct = {name: op_to_opinfo[fn] for name, fn in ops_dct.items()\n if op_to_opinfo[fn][0].supports_autograd}\n supports_forwardad_ops_dct = {name: op_to_opinfo[fn] for name, fn in ops_dct.items()\n if op_to_opinfo[fn][0].supports_forward_ad}\n\n ops = set([remove_torch(test) for test in list(ops_dct.keys())])\n supports_autograd = set([remove_torch(test)\n for test in list(supports_autograd_ops_dct.keys())])\n supports_forward_ad = set([remove_torch(test)\n for test in list(supports_forwardad_ops_dct.keys())])\n assert supports_forward_ad.issubset(supports_autograd)\n assert supports_autograd.issubset(ops)\n\n failed_ops = get_skipped_or_xfailed_ops_for('test_jvp')\n\n coverage = len(supports_forward_ad - failed_ops)\n no_forward_ad = len(supports_autograd) - len(supports_forward_ad)\n print(f'test_jvp, {coverage}, {no_forward_ad}, {len(ops)}')\n\n\nget_jvp_coverage()\nget_jvp_coverage(get_top_ops(100, 25))\nfor op in get_top_ops(100, 25):\n print(op)\nprint('*' * 80)\n\n# result = get_skipped_or_xfailed_ops_for('test_vmap_exhaustive')\n# result = get_skipped_or_xfailed_ops_for('test_op_has_batch_rule')\n# result = get_skipped_or_xfailed_ops_for('test_vjp')\n# result = get_skipped_or_xfailed_ops_for('test_vmapvjp')\n# result = get_skipped_or_xfailed_ops_for('test_vmapvjp_has_batch_rule')\n# import pdb; pdb.set_trace()\n\nstatuses = transpose_statuses()\nfor test in tests:\n print(f'{test} coverage {len(statuses[test])}')\n\nmethod_only_ops = get_method_only_ops_we_care_about()\n# for op in method_only_ops:\n# print(f' {op},')\n\ntop_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(100, 25)\nprint('=' * 80)\nfor op in top_ops_not_covered_by_opinfo:\n print(f'{op}, {top_ops.usage_count[op]}')\n\n# print(\"top ops not covered by opinfo: \")\n# top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(200, 50)\n# for op in top_ops_not_covered_by_opinfo:\n# print(f'{op}, {top_ops.usage_count[op]}')\n\n# print(\"top ops not covered by opinfo: \")\n# top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(220, 92)\n# for op in top_ops_not_covered_by_opinfo:\n# print(f'{op}, {top_ops.usage_count[op]}')\n\n# print(\"top ops not covered by opinfo: \")\n# top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(999, 999)\n# for op in top_ops_not_covered_by_opinfo:\n# print(f'{op}, {top_ops.usage_count[op]}')\n\n\ndef remove_from_set(parent, to_remove):\n for to_remove_elt in to_remove:\n if to_remove_elt in parent:\n parent.remove(to_remove_elt)\n\n\ndef print_coverage_info(th=100, nn=25):\n print('=' * 80)\n print(f\"top {th}, {nn} coverage\")\n statuses = transpose_statuses(get_top_ops(th, nn), invert=True)\n top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(th, nn)\n\n # testing problems\n exemptions = {\n 'torch.nn.functional.dropout', # randomness\n }\n\n # Allowed exemptions\n vmap_exemptions = {\n 'torch.randn_like', # randomness\n 'torch.rand_like', # randomness\n 'torch.allclose', # number output\n 'torch.unique', # dynamic\n 'torch.nonzero', # dynamic\n 'torch.masked_select', # dynamic\n 'torch.prod', # dynamic (backward)\n 'torch.norm', # norm with nuc is not commonly used; we support the other cases.\n 'torch.svd', # There isn't a bug, it is just nondeterministic so we can't test it.\n 'torch.nn.functional.embedding', # We support everything except the sparse option.\n }\n remove_from_set(statuses['test_vmap_exhaustive'], vmap_exemptions)\n remove_from_set(statuses['test_vmapvjp'], vmap_exemptions)\n remove_from_set(statuses['test_vmapvjp_has_batch_rule'], vmap_exemptions)\n remove_from_set(statuses['test_op_has_batch_rule'], vmap_exemptions)\n remove_from_set(statuses['test_vmapjvp'], vmap_exemptions)\n for test in tests:\n remove_from_set(statuses[test], exemptions)\n\n print(f\"total ops in set: {th + nn}\")\n print(f\"tested by OpInfo: {th + nn - len(top_ops_not_covered_by_opinfo)}\")\n for test in tests:\n if test in {'test_jvp', 'test_vmapjvp'}:\n continue\n print(f'{test} failing coverage {len(statuses[test])}')\n\n # We don't care about these yet\n del statuses['test_jvp']\n del statuses['test_vmapjvp']\n\n pprint.pprint(statuses)\n\n\ndef get_name_to_opinfo_map():\n dct = {}\n for op in (op_db + additional_op_db):\n def add(name, op):\n if name not in dct:\n dct[name] = []\n dct[name].append(op)\n add(op.name, op)\n for alias in op.aliases:\n add(alias.name, op)\n return dct\n\n\nNAME_TO_OPINFO = get_name_to_opinfo_map()\n\n\nclass Support(enum.Enum):\n NO = 0\n YES = 1\n UNKNOWN = 2\n\n\nFACTORY_FNS = {\n 'tensor', 'zeros', 'ones', 'randn', 'arange', 'rand', 'empty', 'range',\n 'full', 'randperm', 'eye', 'randint', 'linspace', 'logspace',\n}\n\nVJP_EXEMPTIONS = {\n 'nn.functional.dropout', # not actually problem, randomness testing artifact\n 'nn.functional.dropout2d', # not actually problem, randomness testing artifact\n 'nn.functional.rrelu', # not actually problem, randomness testing artifact\n}\n\nVMAP_EXEMPTIONS = {\n 'randn_like', # randomness\n 'rand_like', # randomness\n 'allclose', # number output\n 'unique', # dynamic\n 'nonzero', # dynamic\n 'masked_select', # dynamic\n 'prod', # dynamic (backward)\n 'norm', # norm with nuc is not commonly used; we support the other cases.\n 'svd', # There isn't a bug, it is just nondeterministic so we can't test it.\n 'nn.functional.embedding', # We support everything except the sparse option.\n 'nn.functional.dropout', # randomness\n 'nn.functional.dropout2d', # randomness\n 'bernoulli', # randomness\n 'multinomial', # randomness\n 'normal', # randomness\n}\n\nJVP_EXEMPTIONS = {\n 'nn.functional.dropout', # not actually problem, randomness testing artifact\n 'nn.functional.dropout2d', # not actually problem, randomness testing artifact\n 'nn.functional.rrelu', # not actually problem, randomness testing artifact\n 'normal', # not actually problem, randomness testing artifact\n 'bernoulli', # not actually problem, randomness testing artifact\n # 'multinomial',\n}\n\n\nclass Operator:\n def __init__(self, name):\n self.name = name\n self.opinfos = NAME_TO_OPINFO.get(name, None)\n assert self.opinfos is None or len(self.opinfos) > 0\n\n def has_opinfo(self):\n return self.opinfos is not None\n\n def __repr__(self):\n return f'Operator(\"{self.name}\")'\n\n def __hash__(self):\n return hash(self.name)\n\n def no_opinfos_skip_test(self, test_name):\n \"\"\"Returns NO if any opinfos have a skip or xfail for the test\"\"\"\n if not self.has_opinfo():\n return Support.UNKNOWN\n if not any([o in additional_op_db for o in self.opinfos]):\n if not any([in_functorch_lagging_op_db(o) for o in self.opinfos]):\n return Support.UNKNOWN\n for opinfo in self.opinfos:\n for decorator in opinfo.decorators:\n if not hasattr(decorator, 'test_name'):\n continue\n if decorator.test_name != test_name:\n continue\n if is_decorateinfo_skip_or_xfail(decorator):\n return Support.NO\n return Support.YES\n\n def any_opinfo_attr(self, attr):\n if not self.has_opinfo():\n raise RuntimeError()\n return any([getattr(opinfo, attr) for opinfo in self.opinfos])\n\n def all_opinfo_attr(self, attr):\n if not self.has_opinfo():\n raise RuntimeError()\n return all([getattr(opinfo, attr) for opinfo in self.opinfos])\n\n def supports_vjp(self):\n if self.name in FACTORY_FNS:\n return Support.YES\n if self.name in VJP_EXEMPTIONS:\n return Support.YES\n return self.no_opinfos_skip_test('test_vjp')\n\n def supports_vmap(self):\n if self.name in FACTORY_FNS:\n return Support.YES\n if self.name in VMAP_EXEMPTIONS:\n return Support.YES\n return self.no_opinfos_skip_test('test_vmap_exhaustive')\n\n def supports_fast_vmap(self):\n if self.name in FACTORY_FNS:\n return Support.YES\n if self.name in VMAP_EXEMPTIONS:\n return Support.YES\n return self.no_opinfos_skip_test('test_op_has_batch_rule')\n\n def supports_vmapvjp(self):\n if self.name in FACTORY_FNS:\n return Support.YES\n if self.name in VMAP_EXEMPTIONS:\n return Support.YES\n return self.no_opinfos_skip_test('test_vmapvjp')\n\n def supports_fast_vmapvjp(self):\n if self.name in FACTORY_FNS:\n return Support.YES\n if self.name in VMAP_EXEMPTIONS:\n return Support.YES\n return self.no_opinfos_skip_test('test_vmapvjp_has_batch_rule')\n\n def supports_jvp(self):\n if self.name in FACTORY_FNS:\n return Support.YES\n if self.name in JVP_EXEMPTIONS:\n return Support.YES\n if not self.has_opinfo():\n return Support.UNKNOWN\n if self.any_opinfo_attr('supports_autograd') and \\\n not self.all_opinfo_attr('supports_forward_ad'):\n return Support.NO\n return self.no_opinfos_skip_test('test_jvp')\n\n def supports_jvpvjp(self):\n if self.name in FACTORY_FNS:\n return Support.YES\n exemptions = {\n # we have support (see OpInfo), testing artifact\n 'torch.nn.functional.dropout2d',\n # exception: we dont even support double backward for this\n 'torch.nn.functional.hardswish',\n 'bernoulli', # this isn't differentiable\n 'normal', # not differentiable\n }\n if self.name in exemptions:\n return Support.YES\n return self.no_opinfos_skip_test('test_jvpvjp')\n\n def _supports_vmapjvp_base(self, test):\n if self.name in FACTORY_FNS:\n return Support.YES\n if self.name in JVP_EXEMPTIONS:\n return Support.YES\n VMAPJVP_EXEMPTIONS = {\n 'prod', # dynamic (backward)\n 'nn.functional.batch_norm', # testing problem\n 'normal', # not actually problem, randomness testing artifact\n 'bernoulli', # not actually problem, randomness testing artifact\n }\n if self.name in VMAPJVP_EXEMPTIONS:\n return Support.YES\n if not self.has_opinfo():\n return Support.UNKNOWN\n if self.any_opinfo_attr('supports_autograd') and \\\n not self.all_opinfo_attr('supports_forward_ad'):\n return Support.NO\n return self.no_opinfos_skip_test(test)\n\n def supports_vmapjvp(self):\n return self._supports_vmapjvp_base('test_vmapjvpall')\n\n def supports_fast_vmapjvp(self):\n return self._supports_vmapjvp_base('test_vmapjvpall_has_batch_rule')\n\n\nclass OperatorSet:\n def __init__(self, operators):\n self.data = set(operators)\n\n @classmethod\n def from_names(cls, names):\n return OperatorSet([Operator(name) for name in names])\n\n @classmethod\n def from_top_ops_threshold(cls, torch_threshold, nn_fn_threshold):\n names = get_top_ops(torch_threshold, nn_fn_threshold)\n return cls.from_names(names)\n\n @classmethod\n def from_top125(cls):\n return cls.from_top_ops_threshold(100, 25)\n\n @classmethod\n def from_top160(cls):\n return cls.from_top_ops_threshold(107, 53)\n\n @classmethod\n def all(cls):\n dct = get_public_overridable_outplace_we_care_about()\n names = dct.keys()\n names_sanitized = []\n for n in names:\n torch_tensor = 'torch.Tensor.'\n torch_dot = 'torch.'\n if n.startswith(torch_tensor):\n names_sanitized.append(n[len(torch_tensor):])\n elif n.startswith(torch_dot):\n names_sanitized.append(n[len(torch_dot):])\n else:\n assert False\n return cls.from_names(names_sanitized)\n\n def query(self, operator_method, filter=(Support.NO, Support.YES, Support.UNKNOWN)):\n result = {}\n for key in filter:\n result[key] = set([])\n for op in self.data:\n support_status = operator_method(op)\n if support_status in filter:\n result[support_status].add(op)\n return result\n\n def summary(self):\n checks = [\n 'supports_vjp',\n 'supports_vmap',\n 'supports_fast_vmap',\n 'supports_vmapvjp',\n 'supports_fast_vmapvjp',\n 'supports_jvp',\n 'supports_vmapjvp',\n 'supports_fast_vmapjvp',\n 'supports_jvpvjp',\n ]\n result = ['test, yes, no, unknown']\n for check in checks:\n accessor = getattr(Operator, check)\n all_results = self.query(accessor)\n yes_amt = len(all_results[Support.YES])\n no_amt = len(all_results[Support.NO])\n unknown_amt = len(all_results[Support.UNKNOWN])\n result.append(f'{check}, {yes_amt}, {no_amt}, {unknown_amt}')\n return '\\n'.join(result)\n\n\nopset = OperatorSet.all()\nhas_no_opinfo = opset.query(Operator.has_opinfo, (False,))\n\nprint(\"=\" * 30 + \" Summary \" + \"=\" * 30)\nprint(opset.summary())\n\n# sanity checks\nresult = opset.query(Operator.supports_vjp, (Support.NO, Support.UNKNOWN))\n# pprint.pprint(result)\n\nprint(\"=\" * 30 + \" Top 60 Summary \" + \"=\" * 30)\nopset = OperatorSet.from_top_ops_threshold(35, 25)\n# result = opset.query(Operator.supports_vmapjvp, (Support.NO, Support.UNKNOWN))\n# pprint.pprint(result)\n# result = opset.query(Operator.supports_jvp, (Support.NO, Support.UNKNOWN))\n# pprint.pprint(result)\n# kresult = opset.query(Operator.supports_jvpvjp, (Support.NO, Support.UNKNOWN))\n# kpprint.pprint(result)\n# result = opset.query(Operator.supports_vmapjvp, (Support.NO, Support.UNKNOWN))\n# pprint.pprint(result)\n# result = opset.query(Operator.supports_fast_vmapjvp, (Support.NO, Support.UNKNOWN))\n# pprint.pprint(result)\n# pprint.pprint(result)\nprint(opset.summary())\n\nprint(\"=\" * 30 + \" Top 125 Summary \" + \"=\" * 30)\nopset = OperatorSet.from_top125()\n# result = opset.query(Operator.supports_vmap, (Support.NO, Support.UNKNOWN))\n# pprint.pprint(result)\n# result = opset.query(Operator.supports_jvpvjp, (Support.NO, Support.UNKNOWN))\n# pprint.pprint(result)\nprint(\"supports_jvp\")\nresult = opset.query(Operator.supports_jvp, (Support.NO, Support.UNKNOWN))\npprint.pprint(result)\nprint(\"supports_vmapjvp\")\nresult = opset.query(Operator.supports_vmapjvp, (Support.NO, Support.UNKNOWN))\npprint.pprint(result)\nprint(\"supports_jvpvjp\")\nresult = opset.query(Operator.supports_jvpvjp, (Support.NO, Support.UNKNOWN))\npprint.pprint(result)\n# result = opset.query(Operator.supports_fast_vmapjvp, (Support.NO, Support.UNKNOWN))\n# pprint.pprint(result)\n# pprint.pprint(result)\nprint(opset.summary())\n\n# print(\"=\" * 30 + \" Top 160 Summary \" + \"=\" * 30)\n# opset = OperatorSet.from_top160()\n# result = opset.query(Operator.supports_jvpvjp, (Support.NO, Support.UNKNOWN))\n# pprint.pprint(result)\n# print(opset.summary())\n" ]
[ [ "torch.overrides.get_testing_overrides" ] ]
jeetbhatt-sys/DataPreProcess
[ "b2ead76c9369ee4e18c60bf244d5c2582d6958b1" ]
[ "src/preJPProcess.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 1 14:16:15 2021\r\n\r\n@author: jbhatt\r\n\"\"\"\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nclass preJPProcess():\r\n \r\n \"\"\"\r\n To initialize:\r\n \r\n e.g p = preProcess(dataFrame)\r\n \r\n A class used to represent an Pre Processing of Data Frame.\r\n\r\n ...\r\n\r\n Attributes\r\n ----------\r\n df : DataFrame\r\n pandas.DataFrame\r\n class pandas.DataFrame(data=None, index=None, columns=None, dtype=None, copy=None)[source]\r\n Two-dimensional, size-mutable, potentially heterogeneous tabular data.\r\n\r\n Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure.\r\n\r\n Parameters\r\n datandarray (structured or homogeneous), Iterable, dict, or DataFrame\r\n Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order.\r\n\r\n Changed in version 0.25.0: If data is a list of dicts, column order follows insertion-order.\r\n\r\n indexIndex or array-like\r\n Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided.\r\n\r\n columnsIndex or array-like\r\n Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, …, n). If data contains column labels, will perform column selection instead.\r\n\r\n dtypedtype, default None\r\n Data type to force. Only a single dtype is allowed. If None, infer.\r\n\r\n copybool or None, default None\r\n Copy data from inputs. For dict data, the default of None behaves like copy=True. For DataFrame or 2d ndarray input, the default of None behaves like copy=False.\r\n \r\n Methods\r\n -------\r\n preProcessData()\r\n Removes the null values from your dataframe.\r\n \r\n scaling()\r\n Standard Scaling of dataframe\r\n \"\"\"\r\n\r\n def __init__(self,df):\r\n self.df = df\r\n \r\n def preProcessData(self):\r\n print(\"Null values present in your data : \", self.df.isna().sum())\r\n print(\"Preprocessing the data, removing Null Values....\")\r\n columns = self.df.columns\r\n for col in columns:\r\n print(\"Processing : \" , col)\r\n self.df[col].fillna(self.df[col].mean(),inplace=True)\r\n return self.df\r\n \r\n def scaling(self):\r\n print(\"Scaling you data\")\r\n columns = self.df.columns\r\n scalar = StandardScaler()\r\n for col in columns:\r\n print(\"Processing : \" , col)\r\n self.df = scalar.fit_transform(self.df)\r\n return self.df" ]
[ [ "sklearn.preprocessing.StandardScaler" ] ]
Sargunan/Sargunan
[ "baf480213c8f3888bc4b168a767db00884982b8f" ]
[ "SignatureDataGenerator.py" ]
[ "import numpy as np\nnp.random.seed(1337) # for reproducibility\nfrom keras.preprocessing import image\nfrom scipy import linalg\nimport warnings\nfrom keras import backend as K\nimport getpass as gp\nimport random\nrandom.seed(1337)\n\nclass SignatureDataGenerator(object): \n \n def __init__(self, dataset, tot_writers, num_train_writers, num_valid_writers,\n num_test_writers, nsamples, batch_sz, img_height, img_width,\n featurewise_center=False,\n featurewise_std_normalization=False,\n zca_whitening=False):\n \n # check whether the total number of writers are less than num_train_writers + num_valid_writers + num_test_writers\n assert tot_writers >= num_train_writers + num_valid_writers + num_test_writers, 'Total writers is less than train and test writers'\n \n self.featurewise_center = featurewise_center\n self.featurewise_std_normalization = featurewise_std_normalization\n self.zca_whitening = zca_whitening\n \n #usr = gp.getuser() \n \n size = 500\n \n# self.image_dir = '/home/' + usr + '/Workspace/SignatureVerification/Datasets/' + dataset + '/'\n #self.image_dir = 'D:\\\\cocoapp-master\\\\cocoapp-master\\\\model\\\\'+dataset+ \"\\\\\"\n #data_file = self.image_dir + \"Bengali_pairs\" + '.txt'\n \n self.image_dir = 'D:\\\\cocoapp-master\\\\cocoapp-master\\\\model\\\\'+dataset+ \"\\\\\"\n data_file = self.image_dir + dataset+\"_pairs\" + '.txt'\n \n idx_writers = list(range(tot_writers))\n \n idx_train_writers = sorted(np.random.choice(idx_writers, num_train_writers, replace=False))\n idx_valid_writers = sorted(np.random.choice([x for x in idx_writers if x not in idx_train_writers], num_valid_writers, replace=False))\n idx_test_writers = sorted(np.random.choice([x for x in idx_writers if x not in idx_train_writers and x not in idx_valid_writers], num_test_writers, replace=False))\n \n idx_train_lines = []\n for iw in idx_train_writers:\n idx_train_lines += list(range(iw * size, (iw + 1) * size))\n \n idx_valid_lines = []\n for iw in idx_valid_writers:\n idx_valid_lines += list(range(iw * size, (iw + 1) * size))\n \n idx_test_lines = []\n for iw in idx_test_writers:\n idx_test_lines += list(range(iw * size, (iw + 1) * size))\n \n f = open( data_file, 'r' )\n lines = f.readlines()\n f.close() \n \n \n\n \n # with open('/home/sounak/Dropbox/signature_verification/gpds_pairs_icdar_train.txt') as f:\n # train_lines = f.read().splitlines()\n\n idx_train_lines = list(range(1,6000))\n idx_valid_lines = list(range(6001,7140))\n\n train_lines = [lines[i] for i in idx_train_lines]\n valid_lines = [lines[i] for i in idx_valid_lines]\n test_lines = [lines[i] for i in idx_valid_lines]\n\n del lines\n \n # for train writers \n self.train_lines = self.arrange_lines(train_lines, nsamples, size)\n # self.train_lines = self.arrange_lines(train_lines, nsamples, 552) \n # for valid writers \n self.valid_lines = self.arrange_lines(valid_lines, nsamples, size)\n # for test writers \n # self.test_lines = self.arrange_lines(test_lines, nsamples, size)\n self.test_lines = self.arrange_lines(test_lines, nsamples, size) \n \n # Set other parameters\n self.height=img_height\n self.width=img_width\n self.input_shape=(self.height, self.width, 1)\n self.cur_train_index = 0\n self.cur_valid_index = 0\n self.cur_test_index = 0\n self.batch_sz = batch_sz\n self.samples_per_train = 2*nsamples*num_train_writers\n self.samples_per_valid = 2*nsamples*num_valid_writers\n self.samples_per_test = 2*nsamples*num_test_writers\n # Incase dim_ordering = 'tf'\n self.channel_axis = 3\n self.row_axis = 1\n self.col_axis = 2\n \n self.train_labels = np.array([float(line.split(' ')[2].strip('\\n')) for line in self.train_lines])\n self.valid_labels = np.array([float(line.split(' ')[2].strip('\\n')) for line in self.valid_lines])\n self.test_labels = np.array([float(line.split(' ')[2].strip('\\n')) for line in self.test_lines])\n \n print \n \n def arrange_lines(self, lines, nsamples, size):\n \n idx_lines = []\n \n lp = []\n lin = []\n \n for iline, line in enumerate(lines):\n \n file1, file2, label = line.split(' ')\n \n label = int(label)\n \n lp += [label] \n lin += [iline]\n \n if(len(lp) != 0 and len(lp) % size == 0): \n \n idx1 = [i for i, x in enumerate(lp) if x == 1]\n idx2 = [i for i, x in enumerate(lp) if x == 0]\n if(len(idx1)>0):\n idx1 = np.random.choice(idx1, nsamples)\n if(len(idx2)>0):\n idx2 = np.random.choice(idx2, nsamples)\n \n print (len (idx1))\n print (len (idx2))\n idx = [None]*(len(idx1)+len(idx2))\n \n print (len(idx))\n \n if(len(idx1)>0):\n idx[::2] = idx1\n \n if(len(idx1) == 0):\n idx = idx2\n else:\n idx[1::2] = idx2\n \n \n del idx1\n del idx2\n \n idx_lines += [lin[i] for i in idx]\n \n lp = []\n lin = [] \n \n lines = [lines[i] for i in idx_lines]\n\n just_1 = lines[0:][::2]\n just_0 = lines[1:][::2]\n random.shuffle(just_1)\n random.shuffle(just_0)\n lines= [item for sublist in zip(just_1,just_0) for item in sublist]\n \n return lines\n \n def next_train(self):\n while True:\n \n if self.cur_train_index == self.samples_per_train:\n self.cur_train_index = 0\n \n cur_train_index = self.cur_train_index + self.batch_sz\n \n if cur_train_index > self.samples_per_train:\n cur_train_index = self.samples_per_train\n\n # print 'Cur Index', self.cur_train_index,'\\n'\n # print 'End Index', cur_train_index, '\\n'\n idx = list(range(self.cur_train_index, cur_train_index))\n print (idx, '\\n')\n print (len(self.train_lines))\n \n lines = [self.train_lines[i] for i in idx]\n \n image_pairs = []\n label_pairs = []\n \n for line in lines:\n file1, file2, label = line.split(' ')\n \n img1 = image.load_img(self.image_dir + file1, grayscale = True,\n target_size=(self.height, self.width))\n \n img1 = image.img_to_array(img1)#, dim_ordering='tf')\n \n img1 = self.standardize(img1)\n \n img2 = image.load_img(self.image_dir + file2, grayscale = True,\n target_size=(self.height, self.width))\n \n img2 = image.img_to_array(img2)#, dim_ordering='tf')\n \n img2 = self.standardize(img2)\n \n image_pairs += [[img1, img2]]\n label_pairs += [int(label)]\n \n self.cur_train_index = cur_train_index\n \n images = [np.array(image_pairs)[:,0], np.array(image_pairs)[:,1]]\n labels = np.array(label_pairs)\n yield(images,labels)\n \n def next_valid(self):\n while True: \n \n if self.cur_valid_index == self.samples_per_valid:\n self.cur_valid_index = 0\n \n cur_valid_index = self.cur_valid_index + self.batch_sz\n \n if cur_valid_index > self.samples_per_valid:\n cur_valid_index = self.samples_per_valid \n \n idx = list(range(self.cur_valid_index, cur_valid_index))\n \n lines = [self.valid_lines[i] for i in idx]\n \n image_pairs = []\n label_pairs = []\n \n for line in lines:\n file1, file2, label = line.split(' ')\n \n img1 = image.load_img(self.image_dir + file1, grayscale = True,\n target_size=(self.height, self.width))\n \n img1 = image.img_to_array(img1)#, dim_ordering='tf')\n \n img1 = self.standardize(img1)\n \n img2 = image.load_img(self.image_dir + file2, grayscale = True,\n target_size=(self.height, self.width))\n \n img2 = image.img_to_array(img2)#, dim_ordering='tf')\n \n img2 = self.standardize(img2)\n \n image_pairs += [[img1, img2]]\n label_pairs += [int(label)]\n\n self.cur_valid_index = cur_valid_index\n \n images = [np.array(image_pairs)[:,0], np.array(image_pairs)[:,1]]\n labels = np.array(label_pairs)\n yield(images,labels)\n \n def next_test(self, file1, file2):\n #while True:\n \n if self.cur_test_index == self.samples_per_test:\n self.cur_test_index = 0\n \n cur_test_index = self.cur_test_index + self.batch_sz\n \n if cur_test_index > self.samples_per_test:\n cur_test_index = self.samples_per_test\n \n idx = list(range(self.cur_test_index, cur_test_index))\n \n lines = [self.test_lines[i] for i in idx]\n image_pairs = []\n line = lines[4]\n #print (lines) \n #file1, file2, label = line.split(' ')\n #print (line)\n #print (lines[1])\n #print (label)\n '''\n file2='001/B-S-1-F-01.tif'\n file1='001/B-S-1-G-01.tif'\n file1= self.image_dir + file1\n file2= self.image_dir + file2\n '''\n #file1 = 'D:\\\\Sargunan\\\\Signature\\\\Dataset_Signature_Final\\\\Dataset\\\\dataset3\\\\real\\\\01401014.png'\n #file2 = 'D:\\\\Sargunan\\\\Signature\\\\Dataset_Signature_Final\\\\Dataset\\dataset3\\\\forge\\\\00101019.png'\n img1 = image.load_img(file1, grayscale = True,\n target_size=(self.height, self.width))\n\n img1 = image.img_to_array(img1)#, dim_ordering='tf')\n\n img1 = self.standardize(img1)\n \n img2 = image.load_img( file2, grayscale = True,\n target_size=(self.height, self.width))\n\n img2 = image.img_to_array(img2)#, dim_ordering='tf')\n\n img2 = self.standardize(img2)\n\n image_pairs += [[img1, img2]]\n\n self.cur_test_index = cur_test_index\n \n images = [np.array(image_pairs)[:,0], np.array(image_pairs)[:,1]]\n return images\n #yield(images)\n \n def fit(self, x, augment=False, rounds=1, seed=None):\n \n x = np.asarray(x, dtype=K.floatx())\n if x.ndim != 4:\n raise ValueError('Input to `.fit()` should have rank 4. '\n 'Got array with shape: ' + str(x.shape))\n if x.shape[self.channel_axis] not in {1, 3, 4}:\n raise ValueError(\n 'Expected input to be images (as Numpy array) '\n 'following the dimension ordering convention \"' + self.dim_ordering + '\" '\n '(channels on axis ' + str(self.channel_axis) + '), i.e. expected '\n 'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '\n 'However, it was passed an array with shape ' + str(x.shape) +\n ' (' + str(x.shape[self.channel_axis]) + ' channels).')\n\n if seed is not None:\n np.random.seed(seed)\n\n x = np.copy(x)\n if augment:\n ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())\n for r in range(rounds):\n for i in range(x.shape[0]):\n ax[i + r * x.shape[0]] = self.random_transform(x[i])\n x = ax\n\n if self.featurewise_center:\n self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.mean = np.reshape(self.mean, broadcast_shape)\n x -= self.mean\n\n if self.featurewise_std_normalization:\n self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.std = np.reshape(self.std, broadcast_shape)\n x /= (self.std + K.epsilon())\n\n if self.zca_whitening:\n flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))\n sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]\n u, s, _ = linalg.svd(sigma)\n self.principal_components = np.dot(np.dot(u, np.diag(1. / np.sqrt(s + 10e-7))), u.T)\n \n def standardize(self, x):\n \n if self.featurewise_center:\n if self.mean is not None:\n x -= self.mean\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_center`, but it hasn\\'t'\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.featurewise_std_normalization:\n if self.std is not None:\n x /= (self.std + 1e-7)\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_std_normalization`, but it hasn\\'t'\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.zca_whitening:\n if self.principal_components is not None:\n flatx = np.reshape(x, (x.size))\n whitex = np.dot(flatx, self.principal_components)\n x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening`, but it hasn\\'t'\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n return x" ]
[ [ "numpy.dot", "scipy.linalg.svd", "numpy.sqrt", "numpy.random.seed", "numpy.random.choice", "numpy.reshape", "numpy.copy", "numpy.std", "numpy.mean", "numpy.array" ] ]
kewlbear/TensorFlowTTS
[ "c54b7b34091e6b1dd66587a70cd11fa11bb436f9" ]
[ "examples/tacotron2/decode_tacotron2.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright 2020 Minh Nguyen (@dathudeptrai)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Decode Tacotron-2.\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nsys.path.append(\".\")\n\nimport numpy as np\nimport tensorflow as tf\nimport yaml\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nfrom examples.tacotron2.tacotron_dataset import CharactorMelDataset\nfrom tensorflow_tts.configs import Tacotron2Config\nfrom tensorflow_tts.models import TFTacotron2\n\n\ndef main():\n \"\"\"Running decode tacotron-2 mel-spectrogram.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Decode mel-spectrogram from folder ids with trained Tacotron-2 \"\n \"(See detail in tensorflow_tts/example/tacotron2/decode_tacotron2.py).\"\n )\n parser.add_argument(\n \"--rootdir\",\n default=None,\n type=str,\n required=True,\n help=\"directory including ids/durations files.\",\n )\n parser.add_argument(\n \"--outdir\", type=str, required=True, help=\"directory to save generated speech.\"\n )\n parser.add_argument(\n \"--checkpoint\", type=str, required=True, help=\"checkpoint file to be loaded.\"\n )\n parser.add_argument(\n \"--use-norm\", default=1, type=int, help=\"usr norm-mels for train or raw.\"\n )\n parser.add_argument(\"--batch-size\", default=8, type=int, help=\"batch size.\")\n parser.add_argument(\"--win-front\", default=3, type=int, help=\"win-front.\")\n parser.add_argument(\"--win-back\", default=3, type=int, help=\"win-front.\")\n parser.add_argument(\n \"--config\",\n default=None,\n type=str,\n required=True,\n help=\"yaml format configuration file. if not explicitly provided, \"\n \"it will be searched in the checkpoint directory. (default=None)\",\n )\n parser.add_argument(\n \"--verbose\",\n type=int,\n default=1,\n help=\"logging level. higher is more logging. (default=1)\",\n )\n args = parser.parse_args()\n\n # set logger\n if args.verbose > 1:\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n elif args.verbose > 0:\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n else:\n logging.basicConfig(\n level=logging.WARN,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n logging.warning(\"Skip DEBUG/INFO messages\")\n\n # check directory existence\n if not os.path.exists(args.outdir):\n os.makedirs(args.outdir)\n\n # load config\n with open(args.config) as f:\n config = yaml.load(f, Loader=yaml.Loader)\n config.update(vars(args))\n\n if config[\"format\"] == \"npy\":\n char_query = \"*-ids.npy\"\n mel_query = \"*-raw-feats.npy\" if args.use_norm is False else \"*-norm-feats.npy\"\n char_load_fn = np.load\n mel_load_fn = np.load\n else:\n raise ValueError(\"Only npy is supported.\")\n\n # define data-loader\n dataset = CharactorMelDataset(\n dataset=config[\"tacotron2_params\"][\"dataset\"],\n root_dir=args.rootdir,\n charactor_query=char_query,\n mel_query=mel_query,\n charactor_load_fn=char_load_fn,\n mel_load_fn=mel_load_fn,\n reduction_factor=config[\"tacotron2_params\"][\"reduction_factor\"]\n )\n dataset = dataset.create(allow_cache=True, batch_size=args.batch_size)\n\n # define model and load checkpoint\n tacotron2 = TFTacotron2(\n config=Tacotron2Config(**config[\"tacotron2_params\"]),\n name=\"tacotron2\",\n )\n tacotron2._build() # build model to be able load_weights.\n tacotron2.load_weights(args.checkpoint)\n\n # setup window\n tacotron2.setup_window(win_front=args.win_front, win_back=args.win_back)\n\n for data in tqdm(dataset, desc=\"[Decoding]\"):\n utt_ids = data[\"utt_ids\"]\n utt_ids = utt_ids.numpy()\n\n # tacotron2 inference.\n (\n mel_outputs,\n post_mel_outputs,\n stop_outputs,\n alignment_historys,\n ) = tacotron2.inference(\n input_ids=data[\"input_ids\"], \n input_lengths=data[\"input_lengths\"], \n speaker_ids=data[\"speaker_ids\"],\n )\n\n # convert to numpy\n post_mel_outputs = post_mel_outputs.numpy()\n\n for i, post_mel_output in enumerate(post_mel_outputs):\n stop_token = tf.math.round(tf.nn.sigmoid(stop_outputs[i])) # [T]\n real_length = tf.math.reduce_sum(\n tf.cast(tf.math.equal(stop_token, 0.0), tf.int32), -1\n )\n post_mel_output = post_mel_output[:real_length, :]\n\n saved_name = utt_ids[i].decode(\"utf-8\")\n\n # save D to folder.\n np.save(\n os.path.join(args.outdir, f\"{saved_name}-norm-feats.npy\"),\n post_mel_output.astype(np.float32),\n allow_pickle=False,\n )\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.math.equal", "tensorflow.nn.sigmoid" ] ]
JiaWeiTeh/BounceGame
[ "8a081bb66b03c71d4819e650e74b7425492d6c89" ]
[ "main.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.animation as animation\nimport functions\n\n# =============================================================================\n# Initial setup\n# =============================================================================\n# Dimension of board\nboardx, boardy = [800, 800]\n# Radius and center of circle\ncent = 400\nrad = 340\n# draw boundary\nfig = plt.figure(figsize = (7,7), dpi = 100)\nax = plt.axes(xlim=(0-200, boardx+200), ylim=(0-200, boardy+200))\nax.xaxis.set_ticks([])\nax.yaxis.set_ticks([])\nfunctions.draw(ax, rad, cent)\n# plot ball\nplot_ball, = ax.plot([], [], 'o', mec = 'k', mfc = 'w', ms = 10)\n# initialize timestep\nt = 0\ndt = .6 # fast/slow motion by changing dt\n# initial velocity\nv0 = 10\n# launch angle\ntheta = np.deg2rad(0)\n# gravitational acceleration\ng = 0.5\n# coeff of restitution\ne = 0.99\n# initial position of ball\nx0, y0 = [400,400]\n\n# =============================================================================\n# Initial condition\n# =============================================================================\n \ndef init():\n plot_ball.set_data(x0, y0)\n return plot_ball,\n\n# =============================================================================\n# Animation\n# =============================================================================\n\n\n\ndef animate(i):\n global t, x0, y0, theta, v0, g, dt\n t += dt\n xpos, ypos = functions.get_pos(t, theta, x0, y0, v0, g)\n \n # if ball outisde boundary, reset equation of trajectory with following steps:\n if (np.sqrt((xpos-cent)**2 + (ypos-cent)**2)) >= (rad-20): #-10 due to ball's radius\n # 1. calculate gradient from point to mid. This will be the\n # norm of contact point\n norm = (ypos - cent)/(xpos - cent)\n # 2. instantaneous gradient of circular boundary at point of contact\n grad = -1/norm\n # 3. gradient of ball\n grad_ball = ypos/xpos\n # 4. new angle calculation\n # incident angle between ball path and circle. This will also be the \n # reflected angle between ball path and circle.\n theta_iBC = theta_fBC = abs(np.arctan(abs((grad_ball - grad)/(1 + grad_ball*grad))))\n # incident angle between ball path and x-axis.\n theta_iBX = abs(np.arctan(abs(grad_ball)))\n # angle between circle and xaxis\n theta_CX = abs(np.arctan(abs(grad)))\n # get ball velocity\n xvel, yvel = functions.get_vel(t, theta, v0, g)\n # calculate final angle\n if xvel > 0 and xpos > cent:\n theta = theta_fBC + theta_CX + 0 # 0 radians\n elif xvel < 0 and xpos > cent:\n theta = 2 * np.pi - (np.pi/2 + theta_fBC + (np.pi/2 - theta_CX))\n elif xvel > 0 and xpos < cent:\n theta = abs((np.pi/2 - theta_CX) + theta_fBC )\n if theta > np.pi:\n theta = np.pi - theta\n elif theta < np.pi:\n theta = (3/2) * np.pi + theta\n elif theta == np.pi:\n theta = 0\n elif xvel < 0 and xpos < cent:\n theta = 2 * np.pi - (np.pi + theta_CX + theta_fBC)\n # 5. reset to next(t = 0) for equation of motion. Reset also x0, y0\n t = dt\n x0, y0 = [xpos, ypos]\n # 6. account for energy loss\n v0 *= e\n # 7. recalculate and plot\n xpos, ypos = functions.get_pos(t, theta, x0, y0, v0, g)\n plot_ball.set_data(xpos,ypos)\n \n return plot_ball,\n \n plot_ball.set_data(xpos,ypos)\n \n return plot_ball,\n\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=500, #change length of the animation\n interval=5, # change to slow/fast motion\n blit=True,\n repeat = True\n )\n\n# =============================================================================\n# Save movie\n# =============================================================================\n# path2save = r\"/Users/jwt/Documents/Code/Bounce_Game/\"\n# anim.save(path2save+'stage5.mp4', fps=30,\n# extra_args=['-vcodec', 'libx264'])\n\n\n\n\n\n\n" ]
[ [ "numpy.sqrt", "matplotlib.pyplot.axes", "numpy.deg2rad", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.figure" ] ]
9929105/KEEP
[ "a3e8b00f82367e13835e5137bd5c0eaa7c8d26d2" ]
[ "keep_backend/privacy/map.py" ]
[ "# -*-Python-*-\n###############################################################################\n#\n# File: map2d.py\n# RCS: $Header: $\n# Description: Transform 2d map coordinates providing Differential Privacy\n# Author: Staal Vinterbo\n# Created: Wed Mar 27 17:07:29 2013\n# Modified: Thu Mar 28 13:25:58 2013 (Staal Vinterbo) staal@mats\n# Language: Python\n# Package: N/A\n# Status: Experimental\n#\n# (c) Copyright 2013, Staal Vinterbo, all rights reserved.\n#\n###############################################################################\n\nfrom random import random\nfrom math import log\nimport numpy as np\n\n\ndef intervalq(point, bounds):\n '''find which interval a point lies in given interval bounds\n\n input: point - number to identify bucket for\n bounds - list of increasing bucket bounds including ends\n\n output: index such that bounds[index - 1] <= point < bounds[index]\n '''\n\n right = len(bounds) - 1\n left = 0\n\n assert(right > 0) # check that bounds contains at least two elements\n\n # deal with points outside bounds range\n if point >= bounds[right]:\n return right\n if point <= bounds[left]:\n return 1\n\n # binary search for interval\n while left < right - 1:\n assert(bounds[left] < bounds[right]) # check that bounds are sorted\n mid = (left + right)/2\n if point >= bounds[mid]:\n left = mid\n else:\n right = mid\n\n return right\n\n\ndef rlaplace(scale, location=0):\n '''genrate a random deviate from Laplace(location, scale)'''\n assert(scale > 0)\n r = random()\n signr = 1 if r >= 0.5 else -1\n rr = r if r < 0.5 else 1 - r\n return location - signr * scale * log(2 * rr)\n\n\ndef noisyh(h, epsilon=1.0, tau=0.5):\n '''make a histogram ina numpy array differentially private.\n\n Expected maximal noise added is O(lon(n)/epsilon) where\n n are the number of times noise is added, i.e., size of\n histogram. Using this, we set entries that are smaller than\n tau * log(n)/epsilon 0.'''\n hp = map(lambda x: rlaplace(scale=2/epsilon, location=x), h.flatten())\n threshold = tau * log(len(hp))/epsilon\n hpt = map(lambda y: 0 if y < threshold else y, hp)\n return np.reshape(hpt, h.shape)\n\n\ndef p2m(points, xbounds, ybounds):\n '''convert a list of points to histogram.\n\n xbounds and ybounds contain grid axis points\n into which points are discretized.'''\n xb = sorted(xbounds) # make sure boundaries are sorted\n yb = sorted(ybounds) # make sure boundaries are sorted\n\n nxb = len(xb) - 1 # number of x intervals\n nyb = len(yb) - 1 # number of y intervals\n\n h = np.zeros((nxb, nyb))\n\n for x, y in points:\n i = intervalq(x, xb) - 1\n j = intervalq(y, yb) - 1\n h[i, j] += 1\n\n return h\n\n\ndef m2p(h, xbounds, ybounds):\n '''transform histogram into points\n\n xbounds and ybounds give grid axis points,\n meaning that h[i,j] is translated into a\n point (x,y) such that x is uniformly distributed\n in [xbounds[i], xbounds[i + 1]), and similarly for y.'''\n xb = sorted(xbounds) # make sure boundaries are sorted\n yb = sorted(ybounds) # make sure boundaries are sorted\n\n nxb = len(xb) - 1 # number of x intervals\n nyb = len(yb) - 1 # number of y intervals\n\n assert(h.shape == (nxb, nyb))\n\n points = []\n\n for i in range(nxb):\n ax = xb[i]\n bx = xb[i + 1]\n xwidth = bx - ax\n for j in range(nyb):\n ay = yb[j]\n by = yb[j + 1]\n ywidth = by - ay\n\n pnts = map(lambda _: (ax + random() * xwidth,\n ay + random() * ywidth),\n range(int(h[i, j])))\n points = pnts + points\n\n return points\n\n\ndef privatize(points, xbounds, ybounds, epsilon=1.0, tau=1.5):\n '''create differentially private version of list of points using a grid\n\n the grid is defined by axis points in xbounds and ybounds.\n epsilon is the differential privacy level.\n tau is a filtering parameter, see noisyh().\n '''\n dph = np.array( noisyh( p2m(points, xbounds, ybounds), epsilon, tau).round(), int)\n return m2p(dph, xbounds, ybounds)\n" ]
[ [ "numpy.reshape", "numpy.zeros" ] ]
bjuggler/lvrobi
[ "f7831b471058414d9ca383ab3729f3f6f26990c9" ]
[ "_lib/data_preparation.py" ]
[ "import os\nimport pandas as pd\nimport numpy as np\nimport gpxpy\n\nfrom tqdm import tqdm\nfrom .helper import distance\n\n\ndef remove_substandard_trips(dataframe):\n df = dataframe.copy()\n\n tripids4removing = df[(df['latitude'] == 0.0) | (df['latitude'].isna()) \n | (df['longitude'] == 0.0) | (df['longitude'].isna()) \n | (df['timestamp'] == 0.0) | (df['timestamp'].isna())]['tripid'].unique()\n df = df[~df['tripid'].isin(tripids4removing)]\n df.reset_index(inplace=True, drop=True)\n print(f'Removed {len(tripids4removing)} substandard trips.')\n return df\n\ndef get_trip_start(df_lat_lon):\n df = df_lat_lon.copy()\n tqdm.pandas(desc='start')\n df['start'] = pd.concat([df['tripid'].shift().rename('tripid0'),\n df['tripid'].rename('tripid1')], axis=1\n ).progress_apply(lambda row: False if row[0] == row[1] else True, axis=1, raw=True)\n return df\n\ndef get_trip_end(df_lat_lon):\n df = df_lat_lon.copy()\n tqdm.pandas(desc='end')\n df['end'] = pd.concat([df['tripid'].shift(-1).rename('tripid0'),\n df['tripid'].rename('tripid1')], axis=1\n ).progress_apply(lambda row: False if row[0] == row[1] else True, axis=1, raw=True)\n df.loc[df.index[-1], 'end'] = True\n return df\n\ndef df_calc_basic(dataframe):\n df = dataframe.copy()\n\n tqdm.pandas(desc='distance')\n df['distance'] = pd.concat([df['latitude'].shift(-1).rename('x0'), \n df['latitude'].rename('x1'), \n df['longitude'].shift(-1).rename('y0'), \n df['longitude'].rename('y1'),\n df['tripid'].shift(-1),\n df['tripid']], \n axis=1).progress_apply(lambda row: 0.0 if row[4] != row[5] else distance(row[0], row[2], row[1], row[3]), axis=1, raw=True)\n\n tqdm.pandas(desc='duration')\n df['duration'] = pd.concat([df['timestamp'].shift(-1).rename('ts0'), \n df['timestamp'].rename('ts1'), \n df['tripid'].shift(-1),\n df['tripid']], \n axis=1).progress_apply(lambda row: 0.0 if row[2] != row[3] else row[0]-row[1], axis=1, raw=True)\n\n df['speed'] = df['distance'] / (df['duration'] / 60 / 60)\n df.replace([np.inf, -np.inf], np.nan, inplace=True)\n df.fillna({'speed': 0.0}, inplace=True)\n\n ''' Removing points with 0 distance passed '''\n df = df[(df['distance'] != 0)]\n\n df = get_trip_start(df)\n df = get_trip_end(df)\n\n tqdm.pandas(desc='stop')\n df['stop'] = pd.concat([df['timestamp'].shift(-1), \n df['timestamp'], \n df['start'],\n df['end'],\n df['duration']], \n axis=1).progress_apply(lambda row: 0.0 if row[2] | row[3] | (row[0]-row[1] == row[4]) else (row[0]-row[1]-row[4]), axis=1, raw=True)\n\n df.reset_index(inplace=True, drop=True)\n\n return df\n\n\ndef df_join_generic_with_gps(df_generic, df_gps):\n df_context = df_generic.copy()\n\n df_context.set_index('tripid', inplace=True)\n df_context = df_context[~df_context.index.duplicated()]\n df_context = pd.concat([df_context, calc_context(df_gps)], axis=1, join=\"inner\")\n\n df_context.drop_duplicates(subset=list(set(df_context.columns.tolist()) - set(['startts', 'endts'])), keep='first', inplace=True)\n\n df_context.reset_index(inplace=True)\n\n return df_context\n\n\ndef calc_context(df_gps):\n df = df_gps.groupby('tripid').agg({'timestamp': ['min', 'max'], 'distance': 'sum', 'speed': ['min', 'max', 'mean']})\n df.columns = [''.join(col).strip() for col in df.columns.values]\n df.rename({'timestampmin': 'startts', 'timestampmax': 'endts', 'distancesum': 'distance'}, axis=1, inplace=True)\n df['speedavg_real'] = df['distance'] / ((df['endts']-df['startts'])/60/60)\n\n return df\n\n\ndef read_gpx(path, prefix):\n fpaths = []\n for (dirpath, dirnames, filenames) in os.walk(path):\n for file in filenames:\n if file.endswith(\".gpx\"):\n fpaths.append(os.path.join(dirpath, file))\n\n id_ad, name, email =[], [], []\n id, lat, lon, alt, ts = [], [], [], [], []\n for fpath in tqdm(fpaths):\n tripid = prefix + ''.join(fpath.split('/')[-1]).split('.')[0]\n\n gpx_file = open(fpath, 'r')\n gpx = gpxpy.parse(gpx_file)\n \n for track in gpx.tracks:\n for segment in track.segments:\n for point in segment.points:\n id.append(tripid)\n lat.append(point.latitude)\n lon.append(point.longitude)\n alt.append(point.elevation)\n ts.append(point.time.timestamp())\n if segment != []:\n id_ad.append(tripid)\n name.append(gpx.author_name)\n email.append(gpx.author_email)\n\n df_context = pd.DataFrame(np.array([id_ad, name, email]).T, columns=['tripid', 'name', 'email'])\n\n df_main = pd.DataFrame(np.array([id, lat, lon, alt, ts]).T, columns=['tripid', 'latitude', 'longitude', 'altitude', 'timestamp'])\n df_main = df_main.astype({'latitude': 'float', 'longitude': 'float', 'altitude': 'float', 'timestamp': 'float'})\n\n df_main['timestamp'] = round(df_main['timestamp'])\n\n return df_main, df_context\n\n\ndef get_df_detail_final(df_detail, df_generic):\n df = df_detail.copy()\n df = df[['tripid', 'latitude', 'longitude', 'timestamp', 'stop', 'distance', 'duration']]\n if ~df_generic.empty:\n df = df[df['tripid'].isin(df_generic['tripid'].tolist())]\n return df\n\n\ndef get_df_generic_final(df_generic, columns):\n df = df_generic.copy()\n df = df[['tripid', 'speedmin', 'speedmax', 'speedmean', 'speedavg_real', 'distance', 'startts', 'endts'] + columns]\n return df" ]
[ [ "numpy.array" ] ]
carpeanon/input_convex
[ "1d3ade6b2c926fb2b1d06d57870b820da21458f1" ]
[ "RL/src/main.py" ]
[ "# Code from Repo SimonRamstedt/ddpg\n# Heavily modified\n\nimport os\nimport pprint\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nimport agent\nimport normalized_env\nimport runtime_env\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_string('env', '', 'gym environment')\nflags.DEFINE_string('outdir', 'output', 'output directory')\nflags.DEFINE_boolean('force', False, 'overwrite existing results')\nflags.DEFINE_integer('train', 1000, 'training timesteps between testing episodes')\nflags.DEFINE_integer('test', 1, 'testing episodes between training timesteps')\nflags.DEFINE_integer('tmax', 1000, 'maxium timesteps each episode')\nflags.DEFINE_integer('total', 100000, 'total training timesteps')\nflags.DEFINE_float('monitor', 0.01, 'probability of monitoring a test episode')\nflags.DEFINE_string('model', 'ICNN', 'reinforcement learning model[DDPG, NAF, ICNN]')\nflags.DEFINE_integer('tfseed', 0, 'random seed for tensorflow')\nflags.DEFINE_integer('gymseed', 0, 'random seed for openai gym')\nflags.DEFINE_integer('npseed', 0, 'random seed for numpy')\n\nif FLAGS.model == 'DDPG':\n import ddpg\n Agent = ddpg.Agent\nelif FLAGS.model == 'NAF':\n import naf\n Agent = naf.Agent\nelif FLAGS.model == 'ICNN':\n import icnn\n Agent = icnn.Agent\n\n\nclass Experiment(object):\n\n def run(self):\n self.train_timestep = 0\n self.test_timestep = 0\n\n # create normal\n self.env = normalized_env.make_normalized_env(gym.make(FLAGS.env))\n tf.set_random_seed(FLAGS.tfseed)\n np.random.seed(FLAGS.npseed)\n self.env.monitor.start(os.path.join(FLAGS.outdir, 'monitor'), force=FLAGS.force)\n self.env.seed(FLAGS.gymseed)\n gym.logger.setLevel(gym.logging.WARNING)\n\n dimO = self.env.observation_space.shape\n dimA = self.env.action_space.shape\n print(dimO, dimA)\n pprint.pprint(self.env.spec.__dict__)\n\n self.agent = Agent(dimO, dimA=dimA)\n simple_log_file = open(os.path.join(FLAGS.outdir, 'log.txt'), 'w')\n\n while self.train_timestep < FLAGS.total:\n\n # test\n reward_list = []\n for _ in xrange(FLAGS.test):\n reward, timestep = self.run_episode(test=True, monitor=np.random.rand() < FLAGS.monitor)\n reward_list.append(reward)\n self.test_timestep += timestep\n avg_reward = np.mean(reward_list)\n print('Average test return {} after {} timestep of training.'.format(avg_reward, self.train_timestep))\n print >> simple_log_file, \"{}\\t{}\".format(self.train_timestep, avg_reward)\n\n # train\n reward_list = []\n last_checkpoint = self.train_timestep / FLAGS.train\n while self.train_timestep / FLAGS.train == last_checkpoint:\n reward, timestep = self.run_episode(test=False, monitor=False)\n reward_list.append(reward)\n self.train_timestep += timestep\n avg_reward = np.mean(reward_list)\n print('Average train return {} after {} timestep of training.'.format(avg_reward, self.train_timestep))\n\n self.env.monitor.close()\n\n def run_episode(self, test=True, monitor=False):\n self.env.monitor.configure(lambda _: monitor)\n observation = self.env.reset()\n self.agent.reset(observation)\n sum_reward = 0\n timestep = 0\n term = False\n while not term:\n action = self.agent.act(test=test)\n\n observation, reward, term, info = self.env.step(action)\n term = (not test and timestep + 1 >= FLAGS.tmax) or term\n\n filtered_reward = self.env.filter_reward(reward)\n self.agent.observe(filtered_reward, term, observation, test=test)\n\n sum_reward += reward\n timestep += 1\n\n return sum_reward, timestep\n\n\ndef main():\n Experiment().run()\n\nif __name__ == '__main__':\n runtime_env.run(main, FLAGS.outdir)\n" ]
[ [ "tensorflow.set_random_seed", "numpy.mean", "numpy.random.rand", "numpy.random.seed" ] ]
aalikadic/transformer-location-prediction
[ "18a787794123a31cf99b22e80bb0ccf8c717e9ea" ]
[ "BERT_Quantized.py" ]
[ "import argparse\nimport baselineUtils\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport os\nimport time\nfrom transformer.batch import subsequent_mask\nfrom torch.optim import Adam,SGD,RMSprop,Adagrad\nfrom transformer.noam_opt import NoamOpt\nimport numpy as np\nimport scipy.io\nimport json\nimport pickle\nimport transformers\nfrom torch.utils.tensorboard import SummaryWriter\n\n\ndef transform_batch(src,trg):\n trg_y = trg.clone()\n trg = torch.cat((trg,torch.zeros((trg.shape[0],trg.shape[1],1))),2)\n start_seq = torch.zeros((trg.shape[0],1,trg.shape[-1]))\n start_seq[:,:,-1]=1\n trg=torch.cat((start_seq,trg[:,:-1,:]),1)\n src_mask=torch.ones((src.shape[0],1,src.shape[1]))\n trg_mask=subsequent_mask(trg.shape[1]).repeat((trg.shape[0],1,1))\n\n return src,src_mask,trg,trg_mask,trg_y\n\n\ndef train_epoch(model,optimizer,dataloader,device):\n start = time.time()\n total_tokens = 0\n total_loss = 0\n tokens = 0\n mean = torch.Tensor(model.mean)\n std = torch.Tensor(model.std)\n model.train()\n\n for i,batch in enumerate(dataloader):\n\n #\n\n inp = (batch['src']-mean)/std\n trg = (batch['trg']-mean)/std\n src, src_mask, trg, trg_mask, trg_y = transform_batch(inp, trg)\n src, src_mask, trg, trg_mask, trg_y = src.to(device), src_mask.to(device), trg.to(device), trg_mask.to(\n device), trg_y.to(device)\n n_tokens = trg.shape[0] * trg.shape[1]\n\n # calculate loss\n\n optimizer.optimizer.zero_grad()\n train_pred = model(src, trg, src_mask, trg_mask)\n loss = F.pairwise_distance(train_pred[:, :].view(-1, 2), trg_y[:, :].view(-1, 2)).mean()\n loss.backward()\n optimizer.step()\n\n loss=loss*n_tokens\n\n total_loss += loss\n total_tokens += n_tokens\n tokens += n_tokens\n if i % 10 == 1:\n elapsed = time.time() - start\n print('Epoch step: %d Loss %f Tokens per Sec: %f' % (i, loss / n_tokens, tokens / elapsed))\n start = time.time()\n tokens = 0\n return total_loss / total_tokens\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef main():\n parser=argparse.ArgumentParser(description='Train the individual Transformer model')\n parser.add_argument('--dataset_folder',type=str,default='datasets')\n parser.add_argument('--dataset_name',type=str,default='eth')\n parser.add_argument('--obs',type=int,default=8)\n parser.add_argument('--preds',type=int,default=12)\n parser.add_argument('--emb_size',type=int,default=1024)\n parser.add_argument('--heads',type=int, default=8)\n parser.add_argument('--layers',type=int,default=6)\n parser.add_argument('--dropout',type=float,default=0.1)\n parser.add_argument('--cpu',action='store_true')\n parser.add_argument('--output_folder',type=str,default='Output')\n parser.add_argument('--val_size',type=int, default=50)\n parser.add_argument('--gpu_device',type=str, default=\"0\")\n parser.add_argument('--verbose',action='store_true')\n parser.add_argument('--max_epoch',type=int, default=100)\n parser.add_argument('--batch_size',type=int,default=256)\n parser.add_argument('--validation_epoch_start', type=int, default=30)\n parser.add_argument('--resume_train',action='store_true')\n parser.add_argument('--delim',type=str,default='\\t')\n parser.add_argument('--name', type=str, default=\"eth_0.1\")\n parser.add_argument('--factor', type=float, default=0.1)\n parser.add_argument('--save_step', type=int, default=1)\n\n\n\n args=parser.parse_args()\n model_name=args.name\n\n try:\n os.mkdir('models')\n except:\n pass\n try:\n os.mkdir('output')\n except:\n pass\n try:\n os.mkdir('output/BERT_quantized')\n except:\n pass\n try:\n os.mkdir(f'models/BERT_quantized')\n except:\n pass\n\n try:\n os.mkdir(f'output/BERT_quantized/{args.name}')\n except:\n pass\n\n try:\n os.mkdir(f'models/BERT_quantized/{args.name}')\n except:\n pass\n\n log = SummaryWriter('logs/BERT_quant_%s' % model_name)\n\n log.add_scalar('eval/mad', 0, 0)\n log.add_scalar('eval/fad', 0, 0)\n\n try:\n os.mkdir(args.name)\n except:\n pass\n\n device=torch.device(\"cuda\")\n if args.cpu or not torch.cuda.is_available():\n device=torch.device(\"cpu\")\n\n args.verbose=True\n\n\n ## creation of the dataloaders for train and validation\n train_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=True,verbose=args.verbose)\n val_dataset, _ = baselineUtils.create_dataset(args.dataset_folder, args.dataset_name, 0, args.obs,\n args.preds, delim=args.delim, train=False,\n verbose=args.verbose)\n test_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=False,eval=True,verbose=args.verbose)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n #model.set_output_embeddings(GeneratorTS(1024,2))\n\n tr_dl=torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)\n val_dl = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)\n test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0)\n\n #optim = SGD(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01)\n #sched=torch.optim.lr_scheduler.StepLR(optim,0.0005)\n #optim=Adagrad(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01,lr_decay=0.001)\n epoch=0\n mat = scipy.io.loadmat(os.path.join(args.dataset_folder, args.dataset_name, \"clusters.mat\"))\n clusters = mat['centroids']\n config = transformers.BertConfig(vocab_size=clusters.shape[0] + 1)\n gen = nn.Linear(config.hidden_size, clusters.shape[0]).to(device)\n model = transformers.BertModel(config).to(device)\n gen = nn.Linear(config.hidden_size, clusters.shape[0]).to(device)\n optim = NoamOpt(args.emb_size, args.factor, len(tr_dl) * 5,\n torch.optim.Adam(list(model.parameters()) + list(gen.parameters()), lr=0, betas=(0.9, 0.98),\n eps=1e-9))\n\n mean=train_dataset[:]['src'][:,:,2:4].mean((0,1))*0\n std=train_dataset[:]['src'][:,:,2:4].std((0,1))*0+1\n\n while epoch<args.max_epoch:\n epoch_loss=0\n model.train()\n\n for id_b,batch in enumerate(tr_dl):\n optim.optimizer.zero_grad()\n scale = np.random.uniform(0.5, 2)\n # rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])\n n_in_batch = batch['src'].shape[0]\n speeds_inp = batch['src'][:, 1:, 2:4] * scale\n inp = torch.tensor(\n scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,\n -1)).to(device)\n speeds_trg = batch['trg'][:, :, 2:4] * scale\n target = torch.tensor(\n scipy.spatial.distance.cdist(speeds_trg.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,\n -1)).to(\n device)\n src_att = torch.ones((inp.shape[0], 1, inp.shape[1])).to(device)\n trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)\n dec_inp = torch.tensor([clusters.shape[0]]).repeat(n_in_batch, args.preds).to(device)\n bert_inp = torch.cat((inp, dec_inp), 1)\n\n out = gen(model(bert_inp, attention_mask=torch.ones(bert_inp.shape[0], bert_inp.shape[1]).to(device))[0])\n\n loss = F.cross_entropy(out.view(-1, out.shape[-1]), torch.cat((inp, target), 1).view(-1), reduction='mean')\n loss.backward()\n optim.step()\n print(\"epoch %03i/%03i frame %04i / %04i loss: %7.4f\" % (\n epoch, args.max_epoch, id_b, len(tr_dl), loss.item()))\n epoch_loss += loss.item()\n #sched.step()\n log.add_scalar('Loss/train', epoch_loss / len(tr_dl), epoch)\n with torch.no_grad():\n model.eval()\n gt = []\n pr = []\n for batch in val_dl:\n gt_b = batch['trg'][:, :, 0:2]\n\n optim.optimizer.zero_grad()\n # rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])\n n_in_batch = batch['src'].shape[0]\n speeds_inp = batch['src'][:, 1:, 2:4]\n inp = torch.tensor(\n scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(\n n_in_batch, -1)).to(device)\n\n dec_inp = torch.tensor([clusters.shape[0]]).repeat(n_in_batch, args.preds).to(device)\n bert_inp = torch.cat((inp, dec_inp), 1)\n\n out = gen(\n model(bert_inp, attention_mask=torch.ones(bert_inp.shape[0], bert_inp.shape[1]).to(device))[0])\n\n F.softmax(out)\n preds_tr_b = clusters[F.softmax(out, dim=-1).argmax(dim=-1).cpu().numpy()][:, -args.preds:].cumsum(\n axis=1) + batch['src'][:, -1:, 0:2].cpu().numpy()\n gt.append(gt_b)\n pr.append(preds_tr_b)\n\n gt = np.concatenate(gt, 0)\n pr = np.concatenate(pr, 0)\n mad, fad, errs = baselineUtils.distance_metrics(gt, pr)\n\n\n log.add_scalar('validation/mad', mad, epoch)\n log.add_scalar('validation/fad', fad, epoch)\n\n model.eval()\n gt = []\n pr = []\n for batch in test_dl:\n gt_b = batch['trg'][:, :, 0:2]\n\n optim.optimizer.zero_grad()\n # rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])\n n_in_batch = batch['src'].shape[0]\n speeds_inp = batch['src'][:, 1:, 2:4]\n inp = torch.tensor(\n scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(\n n_in_batch, -1)).to(device)\n\n dec_inp = torch.tensor([clusters.shape[0]]).repeat(n_in_batch, args.preds).to(device)\n bert_inp = torch.cat((inp, dec_inp), 1)\n\n out = gen(\n model(bert_inp, attention_mask=torch.ones(bert_inp.shape[0], bert_inp.shape[1]).to(device))[0])\n\n F.softmax(out)\n preds_tr_b = clusters[F.softmax(out, dim=-1).argmax(dim=-1).cpu().numpy()][:, -args.preds:].cumsum(\n axis=1) + batch['src'][:, -1:, 0:2].cpu().numpy()\n gt.append(gt_b)\n pr.append(preds_tr_b)\n\n gt = np.concatenate(gt, 0)\n pr = np.concatenate(pr, 0)\n mad, fad, errs = baselineUtils.distance_metrics(gt, pr)\n if epoch %args.save_step ==0 :\n torch.save(model.state_dict(), \"models/BERT_quantized/%s/model_%03i.pth\" % (args.name, epoch))\n torch.save(gen.state_dict(), \"models/BERT_quantized/%s/gen_%03i.pth\" % (args.name, epoch))\n\n log.add_scalar('eval/DET_mad', mad, epoch)\n log.add_scalar('eval/DET_fad', fad, epoch)\n\n\n epoch+=1\n\n ab=1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__=='__main__':\n main()\n" ]
[ [ "torch.nn.functional.softmax", "torch.ones", "torch.Tensor", "torch.cat", "torch.zeros", "torch.utils.data.DataLoader", "torch.tensor", "numpy.concatenate", "torch.nn.Linear", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.device", "numpy.random.uniform" ] ]
minhbau/PyCSP
[ "52ab6ebea39f047e9fac947d1a98b0826c52b4b4" ]
[ "PyCSP/ThermoKinetics.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Riccardo Malpica Galassi, Sapienza University, Roma, Italy\n\"\"\"\nimport numpy as np\nimport cantera as ct\n\n\nclass CanteraThermoKinetics(ct.Solution):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs) \n\n self.constP = 0.0001\n self.constRho = 0.0001\n self._problemtype = 'unset'\n self._nv = self.n_species + 1\n self._source = []\n self._jacobian = []\n self._generalized_Stoich_matrix = []\n self._R_vector = []\n self._jacobian_diagonal = []\n\n\n \"\"\" ~~~~~~~~~~~~ PROPERTIES ~~~~~~~~~~~~~\n \"\"\"\n @property\n def problemtype(self):\n return self._problemtype\n\n \n @property\n def constP(self):\n if(self.problemtype != 'const_p'):\n raise ValueError(\"Constant pressure is unset\")\n else:\n return self._constP\n \n @constP.setter\n def constP(self,value):\n if value > 0:\n self._constP = value\n self._problemtype = 'const_p'\n else:\n raise ValueError(\"Pressure must be positive\")\n\n @property\n def constRho(self):\n if(self.problemtype != 'const_v'):\n raise ValueError(\"Constant density is unset\")\n else:\n return self._constRho\n \n @constRho.setter\n def constRho(self,value):\n if value > 0:\n self._constRho = value\n self._problemtype = 'const_v'\n else:\n raise ValueError(\"Density must be positive\")\n \n @property\n def nv(self):\n return self._nv\n \n @nv.setter\n def nv(self,value):\n if self.n_species <= value <= self.n_species+1:\n self._nv = value\n else:\n raise ValueError(\"Number of variables must be Ns or Ns+1\")\n \n @property\n def source(self):\n if (self.problemtype == 'const_p'): \n return self.rhs_const_p()\n elif (self.problemtype == 'const_v'):\n return self.rhs_const_v()\n else:\n raise ValueError(\"Need to set either constP or constRho value\")\n\n\n @property\n def jacobian(self):\n if (self.problemtype == 'const_p'):\n return self.jacobian_const_p()\n elif (self.problemtype == 'const_v'):\n return self.jacobian_const_v()\n else:\n raise ValueError(\"Need to set either constP or constRho value\")\n\n @property \n def generalized_Stoich_matrix(self):\n if (self.problemtype == 'const_p'):\n return self.generalized_Stoich_matrix_const_p()\n elif (self.problemtype == 'const_v'):\n return self.generalized_Stoich_matrix_const_v()\n else:\n raise ValueError(\"Need to set either constP or constRho value\")\n \n @property \n def R_vector(self):\n return self.Rates_vector()\n\n @property\n def jacobian_diagonal(self):\n if (self.problemtype == 'const_p'):\n return self.jacobian_diagonal_const_p()\n elif (self.problemtype == 'const_v'):\n return self.jacobian_diagonal_const_v()\n else:\n raise ValueError(\"Need to set either constP or constRho value\")\n\n\n \"\"\" ~~~~~~~~~~~~ METHODS ~~~~~~~~~~~~~\n \"\"\"\n\n \n \"\"\" ~~~~~~~~~~~~ state ~~~~~~~~~~~~~\n \"\"\"\n \n def set_stateYT(self,y):\n if (self.problemtype == 'const_p'):\n return self.set_stateYT_const_p(y)\n elif (self.problemtype == 'const_v'):\n return self.set_stateYT_const_v(y)\n else:\n raise ValueError(\"Need to set either constP or constRho value\")\n \n def stateYT(self):\n y = np.zeros(self.n_species+1)\n y[-1] = self.T\n y[0:-1] = self.Y\n return y\n\n \n def set_stateYT_const_p(self,y):\n self.Y = y[0:-1]\n self.TP = y[-1],self.constP\n\n def set_stateYT_const_v(self,y):\n self.Y = y[0:-1]\n self.TD = y[-1],self.constRho \n \n \"\"\" ~~~~~~~~~~~~ rhs ~~~~~~~~~~~~~\n \"\"\"\n def rhs_const_p(self):\n \"\"\"Computes chemical RHS [shape:(ns+1)] for a constant pressure reactor. \n Input must be an instance of the CSPCantera class\"\"\"\n \n ns = self.n_species \n ydot = np.zeros(ns+1)\n Wk = self.molecular_weights\n R = ct.gas_constant\n \n wdot = self.net_production_rates #[kmol/m^3/s]\n orho = 1./self.density\n \n ydot[-1] = - R * self.T * np.dot(self.standard_enthalpies_RT, wdot) * orho / self.cp_mass\n ydot[0:-1] = wdot * Wk * orho\n return ydot\n \n \n def rhs_const_v(self):\n \"\"\"Computes chemical RHS [shape:(ns+1)] for a constant volume reactor. \n Input must be an instance of the CSPCantera class\"\"\"\n \n ns = self.n_species \n ydot = np.zeros(ns+1)\n Wk = self.molecular_weights\n R = ct.gas_constant\n \n wdot = self.net_production_rates #[kmol/m^3/s]\n orho = 1./self.density\n cp = self.cp_mass\n cv = self.cv_mass\n wmix = self.mean_molecular_weight\n \n gamma = cp / cv\n \n ydot[-1] = - R * self.T * np.dot(self.standard_enthalpies_RT, wdot) * gamma * orho / cp + ( (gamma - 1.0) * self.T * wmix * np.sum(wdot) * orho )\n ydot[0:-1] = wdot * Wk * orho\n return ydot\n \n \"\"\" ~~~~~~~~~~~~ Stoichiometric matrix and Rates vector ~~~~~~~~~~~~~\n \"\"\"\n \n def generalized_Stoich_matrix_const_p(self):\n \"\"\"N_v x 2*N_r matrix containing the S components in column major format, \n such that S dot Rvec yields RHS\"\"\"\n nu_p = self.product_stoich_coeffs()\n nu_r = self.reactant_stoich_coeffs()\n rho = self.density\n numat = np.concatenate((nu_p-nu_r,nu_r-nu_p),axis=1)\n smat = np.vstack([numat[i] * self.molecular_weights[i] for i in range(self.n_species)])/rho\n #compute last row (temperature) of the matrix\n cp = self.cp_mass #[J/Kg K]\n hspec = self.standard_enthalpies_RT #non-dimensional\n Hspec = ct.gas_constant * self.T * hspec #[J/Kmol]\n smatT = np.sum([- numat[i] * Hspec[i] for i in range(self.n_species)],axis=0)/(rho*cp)\n Smat = np.vstack((smat,smatT))\n return Smat[:self.nv]\n \n\n def generalized_Stoich_matrix_const_v(self):\n \"\"\"N_v x 2*N_r matrix containing the S components in column major format, \n such that S dot Rvec yields RHS\"\"\"\n Wk = self.molecular_weights\n R = ct.gas_constant\n cp = self.cp_mass\n cv = self.cv_mass\n #wmix = 1.0/(np.dot(self.Y, np.reciprocal(Wk)))\n wmix = self.mean_molecular_weight\n gamma = cp / cv\n \n nu_p = self.product_stoich_coeffs()\n nu_r = self.reactant_stoich_coeffs()\n rho = self.density\n \n c1g = gamma/(rho*cp)\n c2g = (gamma-1.0)*self.T*wmix/rho\n \n numat = np.concatenate((nu_p-nu_r,nu_r-nu_p),axis=1)\n smat = np.vstack([numat[i] * Wk[i] for i in range(self.n_species)])/rho\n #compute last row (temperature) of the matrix\n cp = self.cp_mass #[J/Kg K]\n hspec = self.standard_enthalpies_RT #non-dimensional\n Hspec = R * self.T * hspec #[J/Kmol]\n smatT = np.sum([numat[i] * (-Hspec[i] * c1g + c2g) for i in range(self.n_species)],axis=0)\n Smat = np.vstack((smat,smatT))\n return Smat[:self.nv]\n \n def Rates_vector(self):\n \"\"\" 2*Nr-long vector containing the rates of progress in [Kmol/m3/s]\"\"\" \n rvec = np.concatenate((self.forward_rates_of_progress,self.reverse_rates_of_progress))\n return rvec\n \n \"\"\" ~~~~~~~~~~~~ jacobian ~~~~~~~~~~~~~\n \"\"\"\n\n def jacobian_const_p(self):\n \"\"\"Computes numerical Jacobian.\n Returns a N_s+1 x N_s+1 array [jac]. Input must be an instance of the CSPCantera class\"\"\"\n roundoff = np.finfo(float).eps\n sro = np.sqrt(roundoff)\n #setup the state vector\n T = self.T\n p = self.P\n y = self.Y.copy() #ns-long\n ydot = self.rhs_const_p() #ns+1-long (Y1,...,Yn,T)\n \n #create a jacobian vector\n jac2D = np.zeros((self.n_species+1, self.n_species+1))\n \n #evaluate the Jacobian\n for i in range(self.n_species):\n dy = np.zeros(self.n_species)\n dy[i] = max(sro*abs(y[i]),1e-8)\n self.set_unnormalized_mass_fractions(y+dy)\n ydotp = self.rhs_const_p()\n dydot = ydotp-ydot\n jac2D[:,i] = dydot/dy[i]\n \n self.Y = y\n\n dT = max(sro*abs(T),1e-3)\n self.TP = T+dT,self.P\n ydotp = self.rhs_const_p()\n dydot = ydotp-ydot\n jac2D[:,-1] = dydot/dT\n \n self.TP = T,p\n \n return jac2D\n\n\n\n def jacobian_const_v(self):\n \"\"\"Computes numerical Jacobian.\n Returns a N_s+1 x N_s+1 array [jac]. Input must be an instance of the CSPCantera class\"\"\"\n roundoff = np.finfo(float).eps\n sro = np.sqrt(roundoff)\n #setup the state vector\n T = self.T\n rho = self.density\n y = self.Y.copy() #ns-long\n ydot = self.rhs_const_v() #ns+1-long (Y1,...,Yn,T)\n \n #create a jacobian vector\n jac2D = np.zeros((self.n_species+1, self.n_species+1))\n \n #evaluate the Jacobian\n for i in range(self.n_species):\n dy = np.zeros(self.n_species)\n dy[i] = max(sro*abs(y[i]),1e-8)\n self.set_unnormalized_mass_fractions(y+dy)\n ydotp = self.rhs_const_v()\n dydot = ydotp-ydot\n jac2D[:,i] = dydot/dy[i]\n \n self.Y = y\n\n dT = max(sro*abs(T),1e-3)\n self.TD = T+dT,rho\n ydotp = self.rhs_const_v()\n dydot = ydotp-ydot\n jac2D[:,-1] = dydot/dT\n \n self.TD = T,rho\n \n return jac2D\n \n \n \n def jac_contribution(self):\n \"\"\"Computes contributions of each reaction to numerical Jacobian.\n Given that g = Sr = Sum_k S_k r^k, it follows that \n J(g) = Sum_k^(2nr) J_k, where J_k = Jac(S_k r^k) \n S_k r^k is the product of the k-th column of the matrix S and the k-th \n component of the vector r. \n Returns a list of 2*Nr (N_s+1 x N_s+1) arrays [jacK]. Input must be an instance of the CSPCantera class\"\"\"\n roundoff = np.finfo(float).eps\n sro = np.sqrt(roundoff)\n nv = self.nv\n ns = self.n_species\n nr = self.n_reactions\n #setup the state vector\n T = self.T\n y = self.Y #ns-long\n Smat = self.generalized_Stoich_matrix # ns x 2nr\n rvec = self.R_vector # 2nr-long\n\n \n Smatp = np.zeros((nv,nv,2*nr))\n rvecp = np.zeros((nv,2*nr)) \n #evaluate Smat and Rvec in y+dy[i]\n for i in range(ns):\n dy = np.zeros(ns)\n dy[i] = max(sro*abs(y[i]),1e-8)\n self.set_unnormalized_mass_fractions(y+dy)\n Smatp[i] = self.generalized_Stoich_matrix\n rvecp[i] = self.R_vector\n \n if(nv==ns+1): \n self.Y = y #reset original Y\n dT = max(sro*abs(T),1e-3)\n self.TP = T+dT,self.P\n Smatp[-1] = self.generalized_Stoich_matrix\n rvecp[-1] = self.R_vector\n \n self.TP = T,self.P #reset original T,P\n \n \n JacK = np.zeros((2*nr,nv,nv))\n #evaluate derivatives per each reaction\n for k in range(2*nr):\n jac2D = np.zeros((nv,nv))\n for i in range(ns):\n dy = np.zeros(ns)\n dy[i] = max(sro*abs(y[i]),1e-8)\n ydotp = Smatp[i,:,k]*rvecp[i,k]\n ydot = Smat[:,k]*rvec[k]\n dydot = ydotp-ydot\n jac2D[:,i] = dydot/dy[i]\n \n if(nv==ns+1): \n ydotp = Smatp[-1,:,k]*rvecp[-1,k]\n ydot = Smat[:,k]*rvec[k]\n dydot = ydotp-ydot\n dT = max(sro*abs(T),1e-3)\n jac2D[:,-1] = dydot/dT\n \n JacK[k] = jac2D\n \n #to check for correctness, in main program:\n #jack = gas.jac_contribution()\n #jac=np.sum(jack,axis=0)\n #jacn = gas.jac_numeric()\n #np.allclose(jac,jacn,rtol=1e-8,atol=1e-12)\n \n return JacK\n \n \n def jacobian_diagonal_const_p(self):\n \"\"\"Computes (an approx. to) the diagonal of the numerical Jacobian.\n Returns a N_s+1 array [diagjac]. Input must be an instance of the CSPCantera class\"\"\"\n roundoff = np.finfo(float).eps\n sro = np.sqrt(roundoff)\n #setup the state vector\n T = self.T\n p = self.P\n y = self.Y.copy() #ns-long\n ydot = self.rhs_const_p() #ns+1-long (Y1,...,Yn,T)\n \n #create a jacobian vector\n diagjac = np.zeros((self.n_species+1))\n \n #evaluate the Jacobian\n dy = np.zeros(self.n_species)\n dy = [max(sro*abs(y[i]),1e-8) for i in range(self.n_species)]\n dT = max(sro*abs(T),1e-3)\n self.set_unnormalized_mass_fractions(y+dy)\n self.TP = T+dT,self.P\n ydotp = self.rhs_const_p()\n dydot = ydotp-ydot\n diagjac[:-1] = dydot[:-1]/dy\n diagjac[-1] = dydot[-1]/dT\n \n self.Y = y\n self.TP = T,p\n \n return diagjac\n\n\n\n def jacobian_diagonal_const_v(self):\n \"\"\"Computes (an approx. to) the diagonal of the numerical Jacobian.\n Returns a N_s+1 [diagjac]. Input must be an instance of the CSPCantera class\"\"\"\n roundoff = np.finfo(float).eps\n sro = np.sqrt(roundoff)\n #setup the state vector\n T = self.T\n rho = self.density\n y = self.Y.copy() #ns-long\n ydot = self.rhs_const_v() #ns+1-long (Y1,...,Yn,T)\n \n #create a jacobian vector\n diagjac = np.zeros((self.n_species+1))\n \n #evaluate the Jacobian\n dy = np.zeros(self.n_species)\n dy = [max(sro*abs(y[i]),1e-8) for i in range(self.n_species)]\n dT = max(sro*abs(T),1e-3)\n self.set_unnormalized_mass_fractions(y+dy)\n self.TD = T+dT,rho\n ydotp = self.rhs_const_p()\n dydot = ydotp-ydot\n diagjac[:-1] = dydot[:-1]/dy\n diagjac[-1] = dydot[-1]/dT\n \n self.Y = y\n self.TD = T,rho\n \n return diagjac\n \n \"\"\" ~~~~~~~~~~~~ OTHER JAC FORMULATIONS ~~~~~~~~~~~~~\n \"\"\"\n\n def jacThermal(self):\n ns = self.n_species\n R = ct.gas_constant\n hspec = self.standard_enthalpies_RT\n Hspec = hspec * R * self.T\n Wk = self.molecular_weights\n cp = self.cp_mass\n TJrow = Hspec / ( Wk * cp)\n TJcol = self.jacobian[0:ns,-1]\n JacThermal = np.outer(TJcol,TJrow)\n return JacThermal\n \n def jacKinetic(self):\n ns = self.n_species\n jacKinetic = self.jacobian[0:ns,0:ns] \n return jacKinetic\n\n\n \"\"\" ~~~~~~~~~~~~ REAC NAMES ~~~~~~~~~~~~~\n \"\"\" \n def reaction_names(self):\n nr = self.n_reactions\n rnames = self.reaction_equations()\n reacnames = np.zeros(2*nr,dtype=object)\n reacnames[0:nr] = ['(Rf-'+str(i+1)+') '+ s for s,i in zip(rnames,range(nr)) ]\n reacnames[nr:2*nr] = ['(Rb-'+str(i+1)+') '+ s for s,i in zip(rnames,range(nr))]\n return reacnames\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.finfo", "numpy.concatenate", "numpy.outer", "numpy.zeros", "numpy.sum", "numpy.vstack" ] ]
deacona/the-ball-is-round
[ "8e91a72084d13d754deb82e4852fa37a86a77084" ]
[ "notebooks/output/intl_02_euro_2020_live.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Euro 2020 (2021) Predictions\n# \n# <!-- Written report for this analysis can be found [here](../reports/boro_01_market_value.md) -->\n\n# ## 1. Business Understanding\n# \n# * Determine Busines Objectives\n# * Situation Assessment\n# * Determine Data Mining Goal\n# * Produce Project Plan\n\n# ```\n# # 1. Predict results of every match at Euro 2020\n# # 2. Make predictions before each round of competition\n# # 3. Ideally, at each round, use the predictions to simulate remainder of competition\n# # 4. Check against other predictions and actual results\n# # 5. Write up process (report/blog)\n# ```\n\n# ## 2. Data Understanding\n# \n# * Collect Initial Data\n# * Describe Data\n# * Explore Data\n# * Verify Data Quality\n\n# In[1]:\n\n\nimport pandas as pd\nimport os\nimport numpy as np\nimport pickle\n\nimport matplotlib.pyplot as plt\nplt.style.use(\"seaborn-whitegrid\")\nimport seaborn as sns\nsns.set()\n\nimport src.utilities as utilities\n\n\n# In[2]:\n\n\nmatch = utilities.get_master(\"nations_matches\")\n# match.info()\n\nmatch = match[['Round', 'Day', 'Date', 'Time', 'Team_1', 'Team_2',\n 'Year', 'Goals_1', 'Goals_2',\n 'Goal_diff', 'Venue', 'Venue_country', 'Venue_city', 'Home_1',\n 'Home_2']]\n\nmatch[\"Goal_total\"] = match.Goals_1 + match.Goals_2\nmatch[\"Result\"] = None\nmatch.loc[match.Goals_1 == match.Goals_2, \"Result\"] = \"Draw\"\nmatch.loc[match.Goals_1 > match.Goals_2, \"Result\"] = \"Win\"\nmatch.loc[match.Goals_1 < match.Goals_2, \"Result\"] = \"Loss\"\n\nmatch.describe(include=\"all\").T\n\n\n# In[3]:\n\n\ndef metric_histograms(df, metrics, discrete=False):\n# df = df_in.dropna(subset=[metric]).fillna(\"NULL\")\n for metric in metrics:\n print(\"\\n{0}\\n\".format(metric))\n# df[metric].hist()\n sns.histplot(data=df, x=metric, kde=True, discrete=discrete)\n plt.show()\n# sns.boxplot(x=df[metric])\n# plt.show()\n print(\"\\n--------------------\")\n\nmetric_histograms(match, [\"Goals_1\", \"Goals_2\", \"Goal_diff\", \"Goal_total\"], discrete=True)\n\n\n# In[4]:\n\n\ndef group_boxplots(df_in, cols, metric):\n df = df_in.dropna(subset=[metric]).fillna(\"NULL\")\n for col in cols:\n print(\"\\n{0}\\n\".format(col))\n print(df[col].value_counts())\n sns.boxplot(x=col, y=metric, data=df.sort_values(by=col), \n showmeans=True, width=0.6)\n plt.xticks(rotation=90)\n\n # plt.savefig(\"../reports/figures/club_01_boxplot_{0}.PNG\".format(col))\n plt.show()\n print(\"\\n--------------------\")\n \ncol_list = [\"Round\", \"Day\", \"Time\", \"Year\", \"Venue_country\", \"Result\"]\n\ngroup_boxplots(match, col_list, \"Goal_total\")\n\n\n# In[ ]:\n\n\n\n\n\n# In[5]:\n\n\nsummary = utilities.get_master(\"nations_summaries\")\n# summary.info()\n\nsummary = summary[['Rank Local', 'Rank Global', 'Team', 'Rating',\n 'Average Rank', 'Average Rating', '1 Year Change Rank',\n '1 Year Change Rating', 'Matches Total', 'Matches Home', 'Matches Away',\n 'Matches Neutral', 'Matches Wins', 'Matches Losses', 'Matches Draws',\n 'Goals For', 'Goals Against', 'Year', 'Country',\n 'Data Year', 'GDP (PPP)', 'Population']]\n\nsummary[\"GDP (PPP) Per Capita\"] = summary['GDP (PPP)'] / summary['Population']\n\nsummary.describe(include=\"all\").T\n\n\n# In[6]:\n\n\nmetric_histograms(summary, ['Rating', 'Average Rank',\n 'Average Rating', '1 Year Change Rank', '1 Year Change Rating',\n 'Matches Total', 'Matches Home', 'Matches Away', 'Matches Neutral',\n 'Matches Wins', 'Matches Losses', 'Matches Draws', 'Goals For',\n 'Goals Against', 'GDP (PPP)',\n 'Population', 'GDP (PPP) Per Capita'])\n\n\n# In[ ]:\n\n\n\n\n\n# ## 3. Data Preperation\n# \n# * Select Data\n# * Clean Data\n# * Construct Data\n# * Integrate Data\n# * Format Data\n\n# In[7]:\n\n\ndata = match.merge(summary, left_on=[\"Team_1\", \"Year\"], right_on=[\"Team\", \"Year\"]) #, suffixes=[\"\", \"_1\"])\ndata = data.merge(summary, left_on=[\"Team_2\", \"Year\"], right_on=[\"Team\", \"Year\"], suffixes=[\"\", \" (2)\"])\ndata.sort_values(by=[\"Date\", \"Team_1\"], inplace=True)\ndata.reset_index(drop=True, inplace=True)\n\ndata[\"Elo_rating_diff\"] = data[\"Rating\"] - data[\"Rating (2)\"]\ndata[\"Home_advantage\"] = data[\"Home_1\"] - data[\"Home_2\"]\n# data[\"Win_expectency_1\"] = ((10**((-(data.Elo_rating_diff + (100 * data.Home_advantage)))/400))+1)**-1\ndata[\"Relative_experience\"] = data[\"Matches Total\"] / data[\"Matches Total (2)\"]\ndata[\"Relative_population\"] = data[\"Population\"] / data[\"Population (2)\"]\ndata[\"Relative_GDP_per_capita\"] = data[\"GDP (PPP) Per Capita\"] / data[\"GDP (PPP) Per Capita (2)\"]\ndata[\"Relative_ELO_rating\"] = data[\"Rating\"] / data[\"Rating (2)\"]\n# data[\"Relative_ELO_rank_1yr_change\"] = data[\"1 Year Change Rank\"] / data[\"1 Year Change Rank (2)\"]\n# data[\"Relative_ELO_rating_1yr_change\"] = data[\"1 Year Change Rating\"] / data[\"1 Year Change Rating (2)\"]\n# data[\"Combined_ELO_rating_1yr_change\"] = data[\"1 Year Change Rating\"].abs() + data[\"1 Year Change Rating (2)\"].abs()\n\n# model_years = [2000, 2004, 2008, 2012, 2016]\nlive_years = [2021]\ndata[\"Usage\"] = \"Training\"\ndata.loc[data.Year.isin(live_years), \"Usage\"] = \"Live\"\n\n# data = data[[\"Date\", \"Year\", \"Team_1\", \"Team_2\", \"Goal_diff\", \"Goal_total\", \"Elo_rating_diff\", \"Home_advantage\", \"Win_expectency_1\",\n# \"Relative_experience\", \"Relative_population\", \"Relative_GDP_per_capita\",\n# \"Relative_ELO_rating\"]] #, \"Relative_ELO_rank_1yr_change\", \"Relative_ELO_rating_1yr_change\"\n# ]]\n\n# data.columns\ndata.describe().T\n\n\n# In[8]:\n\n\nskip_cols = [x+\" (2)\" for x in summary.columns]\n# print(skip_cols)\nskip_cols = summary.columns.tolist() + skip_cols\n# print(skip_cols)\nskip_cols = [x for x in skip_cols if x in data.columns ]\n# print(skip_cols)\n\ndata.drop(columns=skip_cols).corr().style.background_gradient(cmap='coolwarm')\n\n\n# In[9]:\n\n\n# data_lim.corr().style.background_gradient(cmap='coolwarm')\n\n\n# In[ ]:\n\n\n\n\n\n# ## 4. Modelling\n# \n# * Select Modelling Technique\n# * Generate Test Design\n# * Build Model\n# * Assess Model\n\n# In[10]:\n\n\n# from sklearn.dummy import DummyRegressor\n# from sklearn.linear_model import LinearRegression\n# from sklearn.linear_model import Lasso\n# from sklearn.linear_model import Ridge\n# # from sklearn.linear_model import BayesianRidge\n# from sklearn.ensemble import RandomForestRegressor\n# from sklearn.ensemble import GradientBoostingRegressor\n# # from sklearn.ensemble import VotingRegressor\n# # from sklearn.neighbors import KNeighborsRegressor\n# from sklearn.svm import SVR\n# # from sklearn.compose import TransformedTargetRegressor\nfrom sklearn.base import BaseEstimator, RegressorMixin\n\n# from sklearn.metrics import median_absolute_error\n# from sklearn.metrics import mean_squared_error\n# from sklearn.metrics import r2_score\n\n# # from sklearn.utils import resample\n# from sklearn.preprocessing import MinMaxScaler\n# from sklearn.preprocessing import StandardScaler\n# from sklearn.pipeline import Pipeline\n# # from sklearn.model_selection import learning_curve\n# # from sklearn.model_selection import KFold\n# from sklearn.model_selection import train_test_split\n\n# # np.random.seed(1)\n\n\n# In[11]:\n\n\nclass EloRegressor(BaseEstimator, RegressorMixin):\n def __init__(self): #, yType=\"Diff\", goalWeight=4., goalBoost=16.):\n self.yType = \"Diff\"\n self.goalWeight = 4.\n self.goalBoost = 1.\n \n def _show_params(self):\n print(\"_show_params...\")\n print(\"yType:\", self.yType)\n print(\"goalWeight:\", self.goalWeight)\n print(\"goalBoost:\", self.goalBoost)\n \n return\n \n def _calc_output(self, X):\n X_tmp = X.copy(deep=True)\n X_tmp[\"EloRatingDiffWithHomeAdv\"] = X_tmp[\"Elo_rating_diff\"] + (100 * X_tmp.Home_advantage)\n X_tmp[\"WinExpectency1Square\"] = (10**((-X_tmp.EloRatingDiffWithHomeAdv)/400))+1\n X_tmp[\"WinExpectency1\"] = X_tmp[\"WinExpectency1Square\"]**-1\n X_tmp[\"RawGoalDiff\"] = (self.goalWeight * (X_tmp.WinExpectency1 - 0.5)).round(0)\n X_tmp[\"RawGoalDiffAbs\"] = X_tmp[\"RawGoalDiff\"].abs()\n X_tmp[\"EitherWins\"] = 0\n X_tmp.loc[X_tmp.RawGoalDiffAbs > 0, \"EitherWins\"] = 1\n# X_tmp[\"QualifyGoalsRankAvg\"] = (X_tmp[\"QualifyGoalsRank1\"] + X_tmp[\"QualifyGoalsRank2\"]) / 2\n X_tmp[\"ApplyGoalBoost\"] = 0\n# X_tmp.loc[X_tmp.QualifyGoalsRankAvg <= self.goalBoost, \"ApplyGoalBoost\"] = 1\n X_tmp[\"Goals1\"] = X_tmp[\"ApplyGoalBoost\"]\n X_tmp.loc[X_tmp.RawGoalDiff > 0, \"Goals1\"] = X_tmp.RawGoalDiff + X_tmp.ApplyGoalBoost\n X_tmp[\"Goals2\"] = X_tmp[\"ApplyGoalBoost\"]\n X_tmp.loc[X_tmp.RawGoalDiff <= 0, \"Goals2\"] = X_tmp.ApplyGoalBoost - X_tmp.RawGoalDiff\n X_tmp[\"GoalDiff\"] = X_tmp.Goals1 - X_tmp.Goals2\n X_tmp[\"GoalDiffAbs\"] = X_tmp.GoalDiff.abs()\n X_tmp[\"GoalTotal\"] = X_tmp.Goals1 + X_tmp.Goals2\n \n return X_tmp[\"Goal\"+self.yType].values\n\n def fit(self, X, y=None):\n if y.name == \"Goal_total\":\n self.yType = \"Total\"\n# else:\n# self.yType = \"Diff\"\n y_tmp = self._calc_output(X).mean()\n y_low = y.quantile(q=0.2)\n y_high = y.quantile(q=0.8)\n while y_tmp < y_low:\n self.goalWeight += 0.05\n y_tmp = self._calc_output(X).mean()\n while y_tmp > y_high:\n self.goalWeight -= 0.05\n y_tmp = self._calc_output(X).mean()\n self._show_params()\n \n return self\n\n def predict(self, X, y=None):\n self._show_params()\n return self._calc_output(X)\n\n\n# ## 5. Evaluation\n# \n# * Evaluate Results\n# * Review Process\n# * Determine Next Steps\n\n# ## 6. Deployment\n# \n# * Plan Deployment\n# * Plan Monitoring and Maintenance\n# * Produce Final Report\n# * Review Project\n\n# In[12]:\n\n\npickle_base = \"../models/intl_02_{0}.pkl\"\n\nprint(\"Un-Pickling files...\")\n\nwith open(pickle_base.format(\"gd_model\"), \"rb\") as pkl_1:\n selected_gd_model = pickle.load(pkl_1)\nwith open(pickle_base.format(\"gd_features\"), \"rb\") as pkl_2:\n gd_features = pickle.load(pkl_2)\nwith open(pickle_base.format(\"gt_model\"), \"rb\") as pkl_3:\n selected_gt_model = pickle.load(pkl_3)\nwith open(pickle_base.format(\"gt_features\"), \"rb\") as pkl_4:\n gt_features = pickle.load(pkl_4)\n\nselected_gd_model, gd_features, selected_gt_model, gt_features\n\n\n# In[13]:\n\n\noutput_prev = pd.read_csv(\"../data/interim/intl_02_predictions.csv\")\noutput_prev[\"Year\"] = output_prev.Year.astype(str)\noutput_prev.sort_values(by=[\"Date\", \"Team_1\"], inplace=True)\noutput_prev.reset_index(drop=True, inplace=True)\noutput_prev.info()\n\n\n# In[14]:\n\n\noutput_new = data.copy(deep=True)[[\"Date\", \"Year\", \"Round\", \"Team_1\", \"Team_2\", \"Usage\", \"Goals_1\", \"Goals_2\", \"Goal_diff\", \"Goal_total\", \"Result\"]]\noutput_new.columns = [\"Date\", \"Year\", \"Round\", \"Team_1\", \"Team_2\", \"Usage\", \"Actual_score_1\", \"Actual_score_2\", \"Actual_goal_diff\", \"Actual_goal_total\", \"Actual_result\"]\noutput_new[\"Year\"] = output_new.Year.astype(str)\noutput_new.sort_values(by=[\"Date\", \"Team_1\"], inplace=True)\noutput_new.reset_index(drop=True, inplace=True)\n# output.loc[output.index.isin(gd_y_test.index), \"Usage\"] = \"Testing\"\n\ngd_pred = selected_gd_model.predict(data[gd_features])\ngt_pred = selected_gt_model.predict(data[gt_features])\n\ngd_weight = 1 #.05\ngt_weight = 1 #.15\n\n## add weights?\noutput_new[\"Predicted_score_1\"] = (gd_weight * (gt_pred + gd_pred) / 2).round()\noutput_new[\"Predicted_score_2\"] = (gt_weight * (gt_pred - gd_pred) / 2).round()\n\n## use earlier predictions where available\noutput = output_prev.combine_first(output_new)\noutput = output[output_prev.columns]\n# print(output.shape)\n\noutput[\"Predicted_goal_diff\"] = output.Predicted_score_1 - output.Predicted_score_2\noutput[\"Predicted_goal_total\"] = output.Predicted_score_1 + output.Predicted_score_2\noutput[\"Predicted_result\"] = None\noutput.loc[output.Predicted_score_1 == output.Predicted_score_2, \"Predicted_result\"] = \"Draw\"\noutput.loc[output.Predicted_score_1 > output.Predicted_score_2, \"Predicted_result\"] = \"Win\"\noutput.loc[output.Predicted_score_1 < output.Predicted_score_2, \"Predicted_result\"] = \"Loss\"\n\noutput[\"Correct_result\"] = (output.Actual_result == output.Predicted_result).astype(int)\noutput[\"Correct_goal_diff\"] = (output.Actual_goal_diff == output.Predicted_goal_diff).astype(int)\noutput[\"Correct_score\"] = ((output.Actual_score_1 == output.Predicted_score_1) & (output.Actual_score_2 == output.Predicted_score_2)).astype(int)\noutput[\"Points\"] = output.Correct_result + output.Correct_goal_diff + output.Correct_score\n\nfor col in [\"Correct_result\", \"Correct_goal_diff\", \"Correct_score\", \"Points\"]:\n output.loc[pd.isnull(output.Actual_result), col] = np.nan\n\noutput.to_csv(\"../data/interim/intl_02_predictions_live.csv\", index=False)\noutput.describe(include=\"all\").T\n\n\n# In[15]:\n\n\ndef agg_by_col(df, col, asc=True):\n \"\"\"\n INPUT:\n df - Match-level output dataframe\n col - Column to aggregate by\n asc - Sort ascending (True) or descending (False)\n \n OUTPUT:\n agg - Aggregated dataframe\n \"\"\"\n agg = pd.concat([\n df[pd.notnull(df.Actual_result)][col].value_counts().sort_index(),\n df[pd.notnull(df.Actual_result)].groupby(col)[[\"Points\", \"Correct_result\", \"Correct_goal_diff\", \"Correct_score\",\n \"Predicted_goal_total\", \"Actual_goal_total\"]].mean(),\n df[pd.notnull(df.Actual_result) & (df.Predicted_result != \"Draw\")][col].value_counts() / df[pd.notnull(df.Actual_result)][col].value_counts(),\n df[pd.notnull(df.Actual_result) & (df.Actual_result != \"Draw\")][col].value_counts() / df[pd.notnull(df.Actual_result)][col].value_counts(),\n ], axis=1)\n agg.sort_index(ascending=asc, inplace=True)\n agg.columns = ['Matches played', 'Points per game', '% correct result',\n '% correct goal diff', '% correct score', 'Goals per game (predicted)',\n 'Goals per game (actual)', '% games won (predicted)',\n '% games won (actual)']\n# print(agg.columns)\n \n return agg\n\noverall = pd.DataFrame({\n \"Matches played\": output[pd.notnull(output.Actual_result)].shape[0],\n \"Points per game\": output[pd.notnull(output.Actual_result)].Points.mean(),\n \"% correct result\": output[pd.notnull(output.Actual_result)].Correct_result.mean(),\n \"% correct goal diff\": output[pd.notnull(output.Actual_result)].Correct_goal_diff.mean(),\n \"% correct score\": output[pd.notnull(output.Actual_result)].Correct_score.mean(),\n \"Goals per game (predicted)\": output[pd.notnull(output.Actual_result)].Predicted_goal_total.mean(),\n \"Goals per game (actual)\": output[pd.notnull(output.Actual_result)].Actual_goal_total.mean(),\n \"% games won (predicted)\": output[pd.notnull(output.Actual_result) & (output.Predicted_result != \"Draw\")].shape[0] / output[pd.notnull(output.Actual_result)].shape[0],\n \"% games won (actual)\": output[pd.notnull(output.Actual_result) & (output.Actual_result != \"Draw\")].shape[0] / output[pd.notnull(output.Actual_result)].shape[0],\n}, index=[\"Overall\"])\n# print(overall.columns)\n\nsummary = pd.concat([agg_by_col(output, \"Year\"), agg_by_col(output, \"Usage\", asc=False), overall], axis=0)\nsummary = summary.round(2)\npct_cols = []\nfor col in summary.columns:\n if col.startswith(\"%\"):\n pct_cols.append(col)\nsummary[pct_cols] = (100 * summary[pct_cols]).astype(int).astype(str) + \"%\"\n\nsummary\n\n\n# In[16]:\n\n\noutput.loc[output.Usage == \"Live\"].describe().dropna(axis=1, how=\"any\").T\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "pandas.read_csv", "pandas.notnull", "pandas.isnull", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.style.use" ] ]
williamd4112/chainerrl
[ "a1fe94e95fb1577232b7cc5c45a7cd9bd4385090" ]
[ "chainerrl/distribution.py" ]
[ "from __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom builtins import * # NOQA\nfrom future import standard_library\nstandard_library.install_aliases() # NOQA\n\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom abc import abstractproperty\n\nfrom cached_property import cached_property\nimport chainer\nfrom chainer import functions as F\nfrom future.utils import with_metaclass\nimport numpy as np\n\nfrom chainerrl.functions import arctanh\nfrom chainerrl.functions import mellowmax\n\n\ndef _wrap_by_variable(x):\n if isinstance(x, chainer.Variable):\n return x\n else:\n return chainer.Variable(x)\n\n\ndef _unwrap_variable(x):\n if isinstance(x, chainer.Variable):\n return x.array\n else:\n return x\n\n\ndef sample_discrete_actions(batch_probs):\n \"\"\"Sample a batch of actions from a batch of action probabilities.\n\n Args:\n batch_probs (ndarray): batch of action probabilities BxA\n Returns:\n ndarray consisting of sampled action indices\n \"\"\"\n xp = chainer.cuda.get_array_module(batch_probs)\n return xp.argmax(\n xp.log(batch_probs) + xp.random.gumbel(size=batch_probs.shape),\n axis=1).astype(np.int32, copy=False)\n\n\nclass Distribution(with_metaclass(ABCMeta, object)):\n \"\"\"Batch of distributions of data.\"\"\"\n\n @abstractproperty\n def entropy(self):\n \"\"\"Entropy of distributions.\n\n Returns:\n chainer.Variable\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def sample(self):\n \"\"\"Sample from distributions.\n\n Returns:\n chainer.Variable\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def prob(self, x):\n \"\"\"Compute p(x).\n\n Returns:\n chainer.Variable\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def log_prob(self, x):\n \"\"\"Compute log p(x).\n\n Returns:\n chainer.Variable\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def copy(self, x):\n \"\"\"Copy a distribion unchained from the computation graph.\n\n Returns:\n Distribution\n \"\"\"\n raise NotImplementedError()\n\n @abstractproperty\n def most_probable(self):\n \"\"\"Most probable data points.\n\n Returns:\n chainer.Variable\n \"\"\"\n raise NotImplementedError()\n\n @abstractproperty\n def kl(self, distrib):\n \"\"\"Compute KL divergence D_KL(P|Q).\n\n Args:\n distrib (Distribution): Distribution Q.\n Returns:\n chainer.Variable\n \"\"\"\n raise NotImplementedError()\n\n @abstractproperty\n def params(self):\n \"\"\"Learnable parameters of this distribution.\n\n Returns:\n tuple of chainer.Variable\n \"\"\"\n raise NotImplementedError()\n\n def sample_with_log_prob(self):\n \"\"\"Do `sample` and `log_prob` at the same time.\n\n This can be more efficient than calling `sample` and `log_prob`\n separately.\n\n Returns:\n chainer.Variable: Samples.\n chainer.Variable: Log probability of the samples.\n \"\"\"\n y = self.samples()\n return y, self.log_prob(y)\n\n\nclass CategoricalDistribution(Distribution):\n \"\"\"Distribution of categorical data.\"\"\"\n\n @cached_property\n def entropy(self):\n with chainer.force_backprop_mode():\n return - F.sum(self.all_prob * self.all_log_prob, axis=1)\n\n @cached_property\n def most_probable(self):\n return chainer.Variable(\n np.argmax(self.all_prob.array, axis=1).astype(np.int32))\n\n def sample(self):\n return chainer.Variable(sample_discrete_actions(self.all_prob.array))\n\n def prob(self, x):\n return F.select_item(self.all_prob, x)\n\n def log_prob(self, x):\n return F.select_item(self.all_log_prob, x)\n\n @abstractmethod\n def all_prob(self):\n raise NotImplementedError()\n\n @abstractmethod\n def all_log_prob(self):\n raise NotImplementedError()\n\n def kl(self, distrib):\n return F.sum(\n self.all_prob * (self.all_log_prob - distrib.all_log_prob), axis=1)\n\n\nclass SoftmaxDistribution(CategoricalDistribution):\n \"\"\"Softmax distribution.\n\n Args:\n logits (ndarray or chainer.Variable): Logits for softmax\n distribution.\n beta (float): inverse of the temperature parameter of softmax\n distribution\n min_prob (float): minimum probability across all labels\n \"\"\"\n\n def __init__(self, logits, beta=1.0, min_prob=0.0):\n self.logits = logits\n self.beta = beta\n self.min_prob = min_prob\n self.n = logits.shape[1]\n assert self.min_prob * self.n <= 1.0\n\n @property\n def params(self):\n return (self.logits,)\n\n @cached_property\n def all_prob(self):\n with chainer.force_backprop_mode():\n if self.min_prob > 0:\n return (F.softmax(self.beta * self.logits)\n * (1 - self.min_prob * self.n)) + self.min_prob\n else:\n return F.softmax(self.beta * self.logits)\n\n @cached_property\n def all_log_prob(self):\n with chainer.force_backprop_mode():\n if self.min_prob > 0:\n return F.log(self.all_prob)\n else:\n return F.log_softmax(self.beta * self.logits)\n\n def copy(self):\n return SoftmaxDistribution(_unwrap_variable(self.logits).copy(),\n beta=self.beta, min_prob=self.min_prob)\n\n def __repr__(self):\n return 'SoftmaxDistribution(beta={}, min_prob={}) logits:{} probs:{} entropy:{}'.format( # NOQA\n self.beta, self.min_prob, self.logits.array,\n self.all_prob.array, self.entropy.array)\n\n def __getitem__(self, i):\n return SoftmaxDistribution(self.logits[i],\n beta=self.beta, min_prob=self.min_prob)\n\n\nclass MellowmaxDistribution(CategoricalDistribution):\n \"\"\"Maximum entropy mellowmax distribution.\n\n See: http://arxiv.org/abs/1612.05628\n\n Args:\n values (ndarray or chainer.Variable): Values to apply mellowmax.\n \"\"\"\n\n def __init__(self, values, omega=8.):\n self.values = values\n self.omega = omega\n\n @property\n def params(self):\n return (self.values,)\n\n @cached_property\n def all_prob(self):\n with chainer.force_backprop_mode():\n return mellowmax.maximum_entropy_mellowmax(self.values)\n\n @cached_property\n def all_log_prob(self):\n with chainer.force_backprop_mode():\n return F.log(self.all_prob)\n\n def copy(self):\n return MellowmaxDistribution(_unwrap_variable(self.values).copy(),\n omega=self.omega)\n\n def __repr__(self):\n return 'MellowmaxDistribution(omega={}) values:{} probs:{} entropy:{}'.format( # NOQA\n self.omega, self.values.array, self.all_prob.array,\n self.entropy.array)\n\n def __getitem__(self, i):\n return MellowmaxDistribution(self.values[i], omega=self.omega)\n\n\ndef clip_actions(actions, min_action, max_action):\n min_actions = F.broadcast_to(min_action, actions.shape)\n max_actions = F.broadcast_to(max_action, actions.shape)\n return F.maximum(F.minimum(actions, max_actions), min_actions)\n\n\ndef _eltwise_gaussian_log_likelihood(x, mean, var, ln_var):\n # log N(x|mean,var)\n # = -0.5log(2pi) - 0.5log(var) - (x - mean)**2 / (2*var)\n return -0.5 * np.log(2 * np.pi) - \\\n 0.5 * ln_var - \\\n ((x - mean) ** 2) / (2 * var)\n\n\nclass GaussianDistribution(Distribution):\n \"\"\"Gaussian distribution.\"\"\"\n\n def __init__(self, mean, var):\n self.mean = _wrap_by_variable(mean)\n self.var = _wrap_by_variable(var)\n self.ln_var = F.log(var)\n\n @property\n def params(self):\n return (self.mean, self.var)\n\n @cached_property\n def most_probable(self):\n return self.mean\n\n def sample(self):\n return F.gaussian(self.mean, self.ln_var)\n\n def prob(self, x):\n return F.exp(self.log_prob(x))\n\n def log_prob(self, x):\n eltwise_log_prob = _eltwise_gaussian_log_likelihood(\n x, self.mean, self.var, self.ln_var)\n return F.sum(eltwise_log_prob, axis=1)\n\n @cached_property\n def entropy(self):\n # Differential entropy of Gaussian is:\n # 0.5 * (log(2 * pi * var) + 1)\n # = 0.5 * (log(2 * pi) + log var + 1)\n with chainer.force_backprop_mode():\n return 0.5 * self.mean.array.shape[1] * (np.log(2 * np.pi) + 1) + \\\n 0.5 * F.sum(self.ln_var, axis=1)\n\n def copy(self):\n return GaussianDistribution(_unwrap_variable(self.mean).copy(),\n _unwrap_variable(self.var).copy())\n\n def kl(self, q):\n p = self\n return 0.5 * F.sum(q.ln_var - p.ln_var +\n (p.var + (p.mean - q.mean) ** 2) / q.var -\n 1, axis=1)\n\n def __repr__(self):\n return 'GaussianDistribution mean:{} ln_var:{} entropy:{}'.format(\n self.mean.array, self.ln_var.array, self.entropy.array)\n\n def __getitem__(self, i):\n return GaussianDistribution(self.mean[i], self.var[i])\n\n\ndef _tanh_forward_log_det_jacobian(x):\n \"\"\"Compute log|det(dy/dx)| except summation where y=tanh(x).\"\"\"\n # For the derivation of this formula, see:\n # https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/bijectors/tanh.py # NOQA\n return 2. * (np.log(2.) - x - F.softplus(-2. * x))\n\n\nclass SquashedGaussianDistribution(Distribution):\n \"\"\"Gaussian distribution squashed by tanh.\n\n This type of distribution was used in https://arxiv.org/abs/1812.05905.\n \"\"\"\n\n def __init__(self, mean, var):\n self.mean = _wrap_by_variable(mean)\n self.var = _wrap_by_variable(var)\n self.ln_var = F.log(var)\n\n @property\n def params(self):\n return (self.mean, self.var)\n\n @cached_property\n def most_probable(self):\n return F.tanh(self.mean)\n\n def sample_with_log_prob(self):\n x = F.gaussian(self.mean, self.ln_var)\n normal_log_prob = _eltwise_gaussian_log_likelihood(\n x, self.mean, self.var, self.ln_var)\n log_probs = normal_log_prob - _tanh_forward_log_det_jacobian(x)\n y = F.tanh(x)\n return y, F.sum(log_probs, axis=1)\n\n def sample(self):\n # Caution: If you would like to apply `log_prob` later, use\n # `sample_with_log_prob` instead for stability, especially when\n # tanh(x) can be close to -1 or 1.\n y = F.tanh(F.gaussian(self.mean, self.ln_var))\n return y\n\n def prob(self, x):\n return F.exp(self.log_prob(x))\n\n def log_prob(self, x):\n # Caution: If you would like to apply this to samples from the same\n # distribution, use `sample_with_log_prob` instead for stability,\n # especially when tanh(x) can be close to -1 or 1.\n raw_action = arctanh(x)\n normal_log_prob = _eltwise_gaussian_log_likelihood(\n raw_action, self.mean, self.var, self.ln_var)\n log_probs = normal_log_prob - _tanh_forward_log_det_jacobian(\n raw_action)\n return F.sum(log_probs, axis=1)\n\n @cached_property\n def entropy(self):\n raise NotImplementedError\n\n def copy(self):\n return SquashedGaussianDistribution(\n _unwrap_variable(self.mean).copy(),\n _unwrap_variable(self.var).copy())\n\n def kl(self, q):\n p = self\n return 0.5 * F.sum(q.ln_var - p.ln_var +\n (p.var + (p.mean - q.mean) ** 2) / q.var -\n 1, axis=1)\n\n def __repr__(self):\n return 'SquashedGaussianDistribution mean:{} ln_var:{} entropy:{}'.format( # NOQA\n self.mean.array, self.ln_var.array, self.entropy.array)\n\n def __getitem__(self, i):\n return SquashedGaussianDistribution(self.mean[i], self.var[i])\n\n\nclass ContinuousDeterministicDistribution(Distribution):\n \"\"\"Continous deterministic distribution.\n\n This distribution is supposed to be used in continuous deterministic\n policies.\n \"\"\"\n\n def __init__(self, x):\n self.x = _wrap_by_variable(x)\n\n @cached_property\n def entropy(self):\n raise RuntimeError('Not defined')\n\n @cached_property\n def most_probable(self):\n return self.x\n\n def sample(self):\n return self.x\n\n def prob(self, x):\n raise RuntimeError('Not defined')\n\n def copy(self):\n return ContinuousDeterministicDistribution(\n _unwrap_variable(self.x).copy())\n\n def log_prob(self, x):\n raise RuntimeError('Not defined')\n\n def kl(self, distrib):\n raise RuntimeError('Not defined')\n\n @property\n def params(self):\n return (self.x,)\n" ]
[ [ "numpy.log", "numpy.argmax" ] ]
bynoud/ray
[ "bfa06052828f83ab790e6b6bbfa5b56edb42b45e" ]
[ "python/ray/tests/test_advanced_3.py" ]
[ "# coding: utf-8\nimport glob\nimport logging\nimport os\nimport shutil\nimport json\nimport sys\nimport socket\nimport tempfile\nimport time\n\nimport numpy as np\nimport pickle\nimport pytest\n\nimport ray\nimport ray.ray_constants as ray_constants\nimport ray.cluster_utils\nimport ray.test_utils\nimport setproctitle\n\nfrom ray.test_utils import (check_call_ray, RayTestTimeoutException,\n wait_for_num_actors)\n\nlogger = logging.getLogger(__name__)\n\n\ndef attempt_to_load_balance(remote_function,\n args,\n total_tasks,\n num_nodes,\n minimum_count,\n num_attempts=100):\n attempts = 0\n while attempts < num_attempts:\n locations = ray.get(\n [remote_function.remote(*args) for _ in range(total_tasks)])\n names = set(locations)\n counts = [locations.count(name) for name in names]\n logger.info(\"Counts are {}.\".format(counts))\n if (len(names) == num_nodes\n and all(count >= minimum_count for count in counts)):\n break\n attempts += 1\n assert attempts < num_attempts\n\n\ndef test_load_balancing(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets\n # in a roughly equal manner.\n cluster = ray_start_cluster\n num_nodes = 3\n num_cpus = 7\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=num_cpus)\n ray.init(address=cluster.address)\n\n @ray.remote\n def f():\n time.sleep(0.01)\n return ray.worker.global_worker.node.unique_id\n\n attempt_to_load_balance(f, [], 100, num_nodes, 10)\n attempt_to_load_balance(f, [], 1000, num_nodes, 100)\n\n\ndef test_load_balancing_with_dependencies(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets in a\n # roughly equal manner even when the tasks have dependencies.\n cluster = ray_start_cluster\n num_nodes = 3\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=1)\n ray.init(address=cluster.address)\n\n @ray.remote\n def f(x):\n time.sleep(0.010)\n return ray.worker.global_worker.node.unique_id\n\n # This object will be local to one of the raylets. Make sure\n # this doesn't prevent tasks from being scheduled on other raylets.\n x = ray.put(np.zeros(1000000))\n\n attempt_to_load_balance(f, [x], 100, num_nodes, 25)\n\n\ndef wait_for_num_objects(num_objects, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.objects()) >= num_objects:\n return\n time.sleep(0.1)\n raise RayTestTimeoutException(\"Timed out while waiting for global state.\")\n\n\ndef test_global_state_api(shutdown_only):\n\n error_message = (\"The ray global state API cannot be used \"\n \"before ray.init has been called.\")\n\n with pytest.raises(Exception, match=error_message):\n ray.objects()\n\n with pytest.raises(Exception, match=error_message):\n ray.actors()\n\n with pytest.raises(Exception, match=error_message):\n ray.nodes()\n\n with pytest.raises(Exception, match=error_message):\n ray.jobs()\n\n ray.init(num_cpus=5, num_gpus=3, resources={\"CustomResource\": 1})\n\n assert ray.cluster_resources()[\"CPU\"] == 5\n assert ray.cluster_resources()[\"GPU\"] == 3\n assert ray.cluster_resources()[\"CustomResource\"] == 1\n\n # A driver/worker creates a temporary object during startup. Although the\n # temporary object is freed immediately, in a rare case, we can still find\n # the object ref in GCS because Raylet removes the object ref from GCS\n # asynchronously.\n # Because we can't control when workers create the temporary objects, so\n # We can't assert that `ray.objects()` returns an empty dict. Here we just\n # make sure `ray.objects()` succeeds.\n assert len(ray.objects()) >= 0\n\n job_id = ray.utils.compute_job_id_from_driver(\n ray.WorkerID(ray.worker.global_worker.worker_id))\n\n client_table = ray.nodes()\n node_ip_address = ray.worker.global_worker.node_ip_address\n\n assert len(client_table) == 1\n assert client_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n @ray.remote\n class Actor:\n def __init__(self):\n pass\n\n _ = Actor.remote() # noqa: F841\n # Wait for actor to be created\n wait_for_num_actors(1)\n\n actor_table = ray.actors()\n assert len(actor_table) == 1\n\n actor_info, = actor_table.values()\n assert actor_info[\"JobID\"] == job_id.hex()\n assert \"IPAddress\" in actor_info[\"Address\"]\n assert \"IPAddress\" in actor_info[\"OwnerAddress\"]\n assert actor_info[\"Address\"][\"Port\"] != actor_info[\"OwnerAddress\"][\"Port\"]\n\n job_table = ray.jobs()\n\n assert len(job_table) == 1\n assert job_table[0][\"JobID\"] == job_id.hex()\n assert job_table[0][\"DriverIPAddress\"] == node_ip_address\n\n\n# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we\n# should use those, but they seem to conflict with Ray's use of faulthandler.\nclass CaptureOutputAndError:\n \"\"\"Capture stdout and stderr of some span.\n\n This can be used as follows.\n\n captured = {}\n with CaptureOutputAndError(captured):\n # Do stuff.\n # Access captured[\"out\"] and captured[\"err\"].\n \"\"\"\n\n def __init__(self, captured_output_and_error):\n import io\n self.output_buffer = io.StringIO()\n self.error_buffer = io.StringIO()\n self.captured_output_and_error = captured_output_and_error\n\n def __enter__(self):\n sys.stdout.flush()\n sys.stderr.flush()\n self.old_stdout = sys.stdout\n self.old_stderr = sys.stderr\n sys.stdout = self.output_buffer\n sys.stderr = self.error_buffer\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.stdout.flush()\n sys.stderr.flush()\n sys.stdout = self.old_stdout\n sys.stderr = self.old_stderr\n self.captured_output_and_error[\"out\"] = self.output_buffer.getvalue()\n self.captured_output_and_error[\"err\"] = self.error_buffer.getvalue()\n\n\ndef test_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=True)\n\n @ray.remote\n def f():\n # It's important to make sure that these print statements occur even\n # without calling sys.stdout.flush() and sys.stderr.flush().\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n for i in range(200):\n assert str(i) in output_lines\n\n # TODO(rkn): Check that no additional logs appear beyond what we expect\n # and that there are no duplicate logs. Once we address the issue\n # described in https://github.com/ray-project/ray/pull/5462, we should\n # also check that nothing is logged to stderr.\n\n\ndef test_not_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=False)\n\n @ray.remote\n def f():\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n assert len(output_lines) == 0\n\n # TODO(rkn): Check that no additional logs appear beyond what we expect\n # and that there are no duplicate logs. Once we address the issue\n # described in https://github.com/ray-project/ray/pull/5462, we should\n # also check that nothing is logged to stderr.\n\n\n@pytest.mark.skipif(\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_workers(shutdown_only):\n num_workers = 3\n ray.init(num_cpus=num_workers)\n\n @ray.remote\n def f():\n return id(ray.worker.global_worker), os.getpid()\n\n # Wait until all of the workers have started.\n worker_ids = set()\n while len(worker_ids) != num_workers:\n worker_ids = set(ray.get([f.remote() for _ in range(10)]))\n\n\ndef test_specific_job_id():\n dummy_driver_id = ray.JobID.from_int(1)\n ray.init(num_cpus=1, job_id=dummy_driver_id)\n\n # in driver\n assert dummy_driver_id == ray.worker.global_worker.current_job_id\n\n # in worker\n @ray.remote\n def f():\n return ray.worker.global_worker.current_job_id\n\n assert dummy_driver_id == ray.get(f.remote())\n\n ray.shutdown()\n\n\ndef test_object_ref_properties():\n id_bytes = b\"00112233445566778899\"\n object_ref = ray.ObjectRef(id_bytes)\n assert object_ref.binary() == id_bytes\n object_ref = ray.ObjectRef.nil()\n assert object_ref.is_nil()\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectRef(id_bytes + b\"1234\")\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectRef(b\"0123456789\")\n object_ref = ray.ObjectRef.from_random()\n assert not object_ref.is_nil()\n assert object_ref.binary() != id_bytes\n id_dumps = pickle.dumps(object_ref)\n id_from_dumps = pickle.loads(id_dumps)\n assert id_from_dumps == object_ref\n\n\n@pytest.fixture\ndef shutdown_only_with_initialization_check():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n assert not ray.is_initialized()\n\n\ndef test_initialized(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0)\n assert ray.is_initialized()\n\n\ndef test_initialized_local_mode(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0, local_mode=True)\n assert ray.is_initialized()\n\n\ndef test_wait_reconstruction(shutdown_only):\n ray.init(\n num_cpus=1,\n object_store_memory=int(10**8),\n _internal_config=json.dumps({\n \"object_pinning_enabled\": 0\n }))\n\n @ray.remote\n def f():\n return np.zeros(6 * 10**7, dtype=np.uint8)\n\n x_id = f.remote()\n ray.wait([x_id])\n ray.wait([f.remote()])\n assert not ray.worker.global_worker.core_worker.object_exists(x_id)\n ready_ids, _ = ray.wait([x_id])\n assert len(ready_ids) == 1\n\n\ndef test_ray_setproctitle(ray_start_2_cpus):\n @ray.remote\n class UniqueName:\n def __init__(self):\n assert setproctitle.getproctitle() == \"ray::UniqueName.__init__()\"\n\n def f(self):\n assert setproctitle.getproctitle() == \"ray::UniqueName.f()\"\n\n @ray.remote\n def unique_1():\n assert \"unique_1\" in setproctitle.getproctitle()\n\n actor = UniqueName.remote()\n ray.get(actor.f.remote())\n ray.get(unique_1.remote())\n\n\ndef test_duplicate_error_messages(shutdown_only):\n ray.init(num_cpus=0)\n\n driver_id = ray.WorkerID.nil()\n error_data = ray.gcs_utils.construct_error_message(driver_id, \"test\",\n \"message\", 0)\n\n # Push the same message to the GCS twice (they are the same because we\n # do not include a timestamp).\n\n r = ray.worker.global_worker.redis_client\n\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n # Before https://github.com/ray-project/ray/pull/3316 this would\n # give an error\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n\n@pytest.mark.skipif(\n os.getenv(\"TRAVIS\") is None,\n reason=\"This test should only be run on Travis.\")\ndef test_ray_stack(ray_start_2_cpus):\n def unique_name_1():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_2():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_3():\n unique_name_1()\n\n unique_name_2.remote()\n unique_name_3.remote()\n\n success = False\n start_time = time.time()\n while time.time() - start_time < 30:\n # Attempt to parse the \"ray stack\" call.\n output = ray.utils.decode(\n check_call_ray([\"stack\"], capture_stdout=True))\n if (\"unique_name_1\" in output and \"unique_name_2\" in output\n and \"unique_name_3\" in output):\n success = True\n break\n\n if not success:\n raise Exception(\"Failed to find necessary information with \"\n \"'ray stack'\")\n\n\ndef test_pandas_parquet_serialization():\n # Only test this if pandas is installed\n pytest.importorskip(\"pandas\")\n\n import pandas as pd\n import pyarrow as pa\n import pyarrow.parquet as pq\n\n tempdir = tempfile.mkdtemp()\n filename = os.path.join(tempdir, \"parquet-test\")\n pd.DataFrame({\"col1\": [0, 1], \"col2\": [0, 1]}).to_parquet(filename)\n with open(os.path.join(tempdir, \"parquet-compression\"), \"wb\") as f:\n table = pa.Table.from_arrays([pa.array([1, 2, 3])], [\"hello\"])\n pq.write_table(table, f, compression=\"lz4\")\n # Clean up\n shutil.rmtree(tempdir)\n\n\ndef test_socket_dir_not_existing(shutdown_only):\n if sys.platform != \"win32\":\n random_name = ray.ObjectRef.from_random().hex()\n temp_raylet_socket_dir = os.path.join(ray.utils.get_ray_temp_dir(),\n \"tests\", random_name)\n temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,\n \"raylet_socket\")\n ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)\n\n\ndef test_raylet_is_robust_to_random_messages(ray_start_regular):\n node_manager_address = None\n node_manager_port = None\n for client in ray.nodes():\n if \"NodeManagerAddress\" in client:\n node_manager_address = client[\"NodeManagerAddress\"]\n node_manager_port = client[\"NodeManagerPort\"]\n assert node_manager_address\n assert node_manager_port\n # Try to bring down the node manager:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((node_manager_address, node_manager_port))\n s.send(1000 * b\"asdf\")\n\n @ray.remote\n def f():\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\ndef test_non_ascii_comment(ray_start_regular):\n @ray.remote\n def f():\n # 日本語 Japanese comment\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\ndef test_shutdown_disconnect_global_state():\n ray.init(num_cpus=0)\n ray.shutdown()\n\n with pytest.raises(Exception) as e:\n ray.objects()\n assert str(e.value).endswith(\"ray.init has been called.\")\n\n\n@pytest.mark.parametrize(\n \"ray_start_object_store_memory\", [150 * 1024 * 1024], indirect=True)\ndef test_put_pins_object(ray_start_object_store_memory):\n obj = np.ones(200 * 1024, dtype=np.uint8)\n x_id = ray.put(obj)\n x_binary = x_id.binary()\n assert (ray.get(ray.ObjectRef(x_binary)) == obj).all()\n\n # x cannot be evicted since x_id pins it\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n assert (ray.get(x_id) == obj).all()\n assert (ray.get(ray.ObjectRef(x_binary)) == obj).all()\n\n # now it can be evicted since x_id pins it but x_binary does not\n del x_id\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n assert not ray.worker.global_worker.core_worker.object_exists(\n ray.ObjectRef(x_binary))\n\n # weakref put\n y_id = ray.put(obj, weakref=True)\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n with pytest.raises(ray.exceptions.UnreconstructableError):\n ray.get(y_id)\n\n\ndef test_decorated_function(ray_start_regular):\n def function_invocation_decorator(f):\n def new_f(args, kwargs):\n # Reverse the arguments.\n return f(args[::-1], {\"d\": 5}), kwargs\n\n return new_f\n\n def f(a, b, c, d=None):\n return a, b, c, d\n\n f.__ray_invocation_decorator__ = function_invocation_decorator\n f = ray.remote(f)\n\n result_id, kwargs = f.remote(1, 2, 3, d=4)\n assert kwargs == {\"d\": 4}\n assert ray.get(result_id) == (3, 2, 1, 5)\n\n\ndef test_get_postprocess(ray_start_regular):\n def get_postprocessor(object_refs, values):\n return [value for value in values if value > 0]\n\n ray.worker.global_worker._post_get_hooks.append(get_postprocessor)\n\n assert ray.get(\n [ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]\n\n\ndef test_export_after_shutdown(ray_start_regular):\n # This test checks that we can use actor and remote function definitions\n # across multiple Ray sessions.\n\n @ray.remote\n def f():\n pass\n\n @ray.remote\n class Actor:\n def method(self):\n pass\n\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray and use the remote function and actor again.\n ray.init(num_cpus=1)\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray again and make sure that these definitions can be exported from\n # workers.\n ray.init(num_cpus=2)\n\n @ray.remote\n def export_definitions_from_worker(remote_function, actor_class):\n ray.get(remote_function.remote())\n actor_handle = actor_class.remote()\n ray.get(actor_handle.method.remote())\n\n ray.get(export_definitions_from_worker.remote(f, Actor))\n\n\ndef test_invalid_unicode_in_worker_log(shutdown_only):\n info = ray.init(num_cpus=1)\n\n logs_dir = os.path.join(info[\"session_dir\"], \"logs\")\n\n # Wait till first worker log file is created.\n while True:\n log_file_paths = glob.glob(\"{}/worker*.out\".format(logs_dir))\n if len(log_file_paths) == 0:\n time.sleep(0.2)\n else:\n break\n\n with open(log_file_paths[0], \"wb\") as f:\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.flush()\n\n # Wait till the log monitor reads the file.\n time.sleep(1.0)\n\n # Make sure that nothing has died.\n assert ray.services.remaining_processes_alive()\n\n\n@pytest.mark.skip(reason=\"This test is too expensive to run.\")\ndef test_move_log_files_to_old(shutdown_only):\n info = ray.init(num_cpus=1)\n\n logs_dir = os.path.join(info[\"session_dir\"], \"logs\")\n\n @ray.remote\n class Actor:\n def f(self):\n print(\"function f finished\")\n\n # First create a temporary actor.\n actors = [\n Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES)\n ]\n ray.get([a.f.remote() for a in actors])\n\n # Make sure no log files are in the \"old\" directory before the actors\n # are killed.\n assert len(glob.glob(\"{}/old/worker*.out\".format(logs_dir))) == 0\n\n # Now kill the actors so the files get moved to logs/old/.\n [a.__ray_terminate__.remote() for a in actors]\n\n while True:\n log_file_paths = glob.glob(\"{}/old/worker*.out\".format(logs_dir))\n if len(log_file_paths) > 0:\n with open(log_file_paths[0], \"r\") as f:\n assert \"function f finished\\n\" in f.readlines()\n break\n\n # Make sure that nothing has died.\n assert ray.services.remaining_processes_alive()\n\n\ndef test_lease_request_leak(shutdown_only):\n ray.init(\n num_cpus=1,\n _internal_config=json.dumps({\n \"initial_reconstruction_timeout_milliseconds\": 200\n }))\n assert len(ray.objects()) == 0\n\n @ray.remote\n def f(x):\n time.sleep(0.1)\n return\n\n # Submit pairs of tasks. Tasks in a pair can reuse the same worker leased\n # from the raylet.\n tasks = []\n for _ in range(10):\n obj_ref = ray.put(1)\n for _ in range(2):\n tasks.append(f.remote(obj_ref))\n del obj_ref\n ray.get(tasks)\n\n time.sleep(\n 1) # Sleep for an amount longer than the reconstruction timeout.\n assert len(ray.objects()) == 0, ray.objects()\n\n\n@pytest.mark.parametrize(\n \"ray_start_cluster\", [{\n \"num_cpus\": 0,\n \"num_nodes\": 1,\n \"do_init\": False,\n }],\n indirect=True)\ndef test_ray_address_environment_variable(ray_start_cluster):\n address = ray_start_cluster.address\n # In this test we use zero CPUs to distinguish between starting a local\n # ray cluster and connecting to an existing one.\n\n # Make sure we connect to an existing cluster if\n # RAY_ADDRESS is set.\n os.environ[\"RAY_ADDRESS\"] = address\n ray.init()\n assert \"CPU\" not in ray.state.cluster_resources()\n del os.environ[\"RAY_ADDRESS\"]\n ray.shutdown()\n\n # Make sure we start a new cluster if RAY_ADDRESS is not set.\n ray.init()\n assert \"CPU\" in ray.state.cluster_resources()\n ray.shutdown()\n\n\nif __name__ == \"__main__\":\n import pytest\n sys.exit(pytest.main([\"-v\", __file__]))\n" ]
[ [ "numpy.zeros", "pandas.DataFrame", "numpy.ones" ] ]
ehultee/VE-cauldrons
[ "52afe581c0ab32b93cee305e86e023c39e84ee69" ]
[ "ESkafta-2015/Skafta-ArcticDEM-transecting.py" ]
[ "# Reading in ArcticDEM, sampling transect across Skafta Cauldron\n# 4 Dec 2018 EHU\n# Edit 21 Feb 2019 - plot analytical elastic/viscoelastic\n# Edit 16 July - move functions to helper module\n\nimport numpy as np\nimport scipy.misc as scp\nfrom scipy import interpolate\nfrom scipy.ndimage import gaussian_filter\nfrom osgeo import gdal\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.patches import Rectangle\nimport math\nimport sys\nsys.path.insert(0, '/Users/lizz/Documents/GitHub/VE-cauldrons')\nfrom cauldron_funcs import *\n\n\n## Read in ArcticDEM surface\nskafta_region_path = '/Users/lizz/Documents/UMich MBP import/Documents - UM_MBP/6. MIT/Skaftar collapse/data/arcticDEM/'\nnc_20121015_path = skafta_region_path + 'subset_nc/SETSM_WV02_20121015_skaftar_east_ll.nc'\nnc_20151010_path = skafta_region_path + 'subset_nc/SETSM_WV02_20151010_skaftar_east_ll.nc'\n\nlon_2012, lat_2012, se_2012 = read_ArcticDEM_nc(nc_20121015_path)\nSE_2012 = np.ma.masked_where(se_2012==0, se_2012)\nlon_2015, lat_2015, se_2015 = read_ArcticDEM_nc(nc_20151010_path)\nSE_2015 = np.ma.masked_where(se_2015==0, se_2015)\n\n## Interpolating surface elevation and sampling transect\nsefunc_2012 = interpolate.interp2d(lon_2012, lat_2012, SE_2012)\nsefunc_2015 = interpolate.interp2d(lon_2015, lat_2015, SE_2015)\n\n#npoints = 1000\n#endpoints = [(-17.542113802658239, 64.488141277357315),\n# (-17.48586677277758, 64.486397775690023)] #coordinates at either side of the cauldron, selected by inspection with ginput. \n#lonvals = np.linspace(endpoints[0][0], endpoints[1][0], npoints)\n#latvals = np.linspace(endpoints[0][1], endpoints[1][1], npoints)\n#sevals_2012 = np.asarray([sefunc_2012(lonvals[i], latvals[i]) for i in range(npoints)]).squeeze()\n#sevals_2015 = np.asarray([sefunc_2015(lonvals[i], latvals[i]) for i in range(npoints)]).squeeze()\n\n## Prepare transect for plotting, with x-axis of distance along transect in m\ndef haversine(coord1, coord2):\n R = 6372800 # Earth radius in meters\n lat1, lon1 = coord1\n lat2, lon2 = coord2\n \n phi1, phi2 = math.radians(lat1), math.radians(lat2) \n dphi = math.radians(lat2 - lat1)\n dlambda = math.radians(lon2 - lon1)\n \n a = math.sin(dphi/2)**2 + \\\n math.cos(phi1)*math.cos(phi2)*math.sin(dlambda/2)**2\n \n return 2*R*math.atan2(math.sqrt(a), math.sqrt(1 - a))\n\n#transect_length = haversine(endpoints[0][::-1], endpoints[1][::-1])\n#xaxis = np.linspace(0, transect_length, num=npoints)\n\ndef sample_transect(endpts, DEM_surface1, DEM_surface2=None, cauldron_name='Eastern_Skafta', npoints=1000, elastic=True, viscoelastic=True, days_simulated = 5, timestep=20000, stresses=False):\n \"\"\" Function to standardize transecting procedure. Sets up a cauldron with appropriate radius and computes analytical profiles.\n Arguments:\n endpts = (lat, lon) of the two endpoints of the transect\n DEM_surface1 = a 2D interpolated function of the initial observed surface to plot\n DEM_surface2 = another (optional) 2D interpolated function of the observed surface, possibly from a later time. Default None will use only 1 surface\n Default settings:\n cauldron_name = name (string) of the Cauldron instance we set up. Default is 'Eastern_Skafta'.\n npoints = how many points to sample along transect. Default 1000\n elastic = whether to calculate elastic profile. Default True\n viscoelastic = whether to calculate viscoelastic profile/stresses. Default True\n days_simulated = time period over which to simulate viscoelastic collapse. Default 5 (for Eastern Skafta)\n stresses = whether to calculate elastic and VE stresses. Default False\n Returns dictionary of profiles and stresses, as specified in arguments.\n \"\"\"\n out_dict = {}\n out_dict['name'] = cauldron_name\n lonvals = np.linspace(endpts[0][0], endpts[1][0], npoints)\n latvals = np.linspace(endpts[0][1], endpts[1][1], npoints)\n surfvals_1 = np.asarray([DEM_surface1(lonvals[i], latvals[i]) for i in range(npoints)]).squeeze() # for Eastern Skafta, use 2012 vals here\n out_dict['initial_surface_obs'] = surfvals_1\n if DEM_surface2 is not None:\n surfvals_2 = np.asarray([DEM_surface2(lonvals[i], latvals[i]) for i in range(npoints)]).squeeze()\n out_dict['second_surface_obs'] = surfvals_2\n \n transect_length = haversine(endpts[0][::-1], endpts[1][::-1])\n out_dict['xaxis'] = np.linspace(0, transect_length, num=npoints) #needed for plotting\n x_cylcoords = np.linspace(-0.5*transect_length, 0.5*transect_length, num=npoints)\n initial_surf_val = np.mean((surfvals_1[0], surfvals_1[-1])) #surface elevation at edges before loading\n initial_surf = interpolate.interp1d(x_cylcoords, surfvals_1, kind='quadratic')\n cldrn = Cauldron(name=cauldron_name, initial_surface = initial_surf, radius = 0.5*transect_length)\n cldrn.set_viscoelastic_bendingmod()\n \n out_dict['Cauldron_instance'] = cldrn #return the Cauldron instance in case further manipulations wanted\n\n if elastic:\n out_dict['elastic_profile'] = [cldrn.LL_profile(x) for x in x_cylcoords]\n if viscoelastic:\n nseconds = days_simulated*24*60*60 #number of seconds in days_simulated\n times = np.arange(0, nseconds, step=timestep)\n out_dict['VE_times'] = times\n out_dict['VE_profiles'] = [[cldrn.viscoelastic_profile(x, t0) for x in x_cylcoords] for t0 in times]\n if stresses:\n if elastic:\n out_dict['elastic_stress'] = [cldrn.elastic_stress(x, config='radial_plate') for x in x_cylcoords]\n if viscoelastic:\n out_dict['max_VE_stress'] = [cldrn.viscoelastic_stress(x, times[4]) for x in x_cylcoords]\n\n return out_dict\n\ndef plot_plain_transect(in_dict, colormap=cm.get_cmap('winter_r')):\n \"\"\"Read in quantities and plot raw profile from a transect dictionary\n \"\"\"\n xaxis = in_dict['xaxis']\n sevals_1 = in_dict['initial_surface_obs']\n try:\n sevals_2 = in_dict['second_surface_obs']\n except KeyError:\n print('No secondary surface observations saved on transect {}. Setting identical to first surface for plotting.'.format(in_dict['name']))\n sevals_2 = sevals_1\n transect_length = max(xaxis)\n\n fig = plt.figure('Transect profile, {}'.format(in_dict['name']), figsize=(7, 3))\n plt.plot(xaxis, sevals_1, color='k', ls='-.', label='15 Oct 2012')\n plt.plot(xaxis, sevals_2, color='k', ls='-', label='10 Oct 2015')\n plt.fill_between(xaxis, sevals_1, sevals_2, color='Gainsboro', hatch='/', edgecolor='DimGray', linewidth=0, alpha=0.7)\n plt.fill_between(xaxis, sevals_2, (plt.axes().get_ylim()[0]), color='Azure')\n plt.legend(loc='upper right')\n plt.axes().set_aspect(5)\n plt.axes().set_xlim(0, transect_length)\n plt.axes().set_yticks([1550, 1600, 1650, 1700])\n plt.axes().set_ylim((1525, 1750))\n #plt.axes().set_yticklabels(['1550', '1600', '1650', '1700'], fontsize=14)\n plt.axes().tick_params(which='both', labelsize=14)\n #plt.axes().set_xticklabels(['0', '1', '2', '3', '4', '5', '6'], fontsize=14)\n plt.axes().set_xlabel('Along-transect distance [m]', fontsize=16)\n plt.axes().set_ylabel('Surface elevation [m a.s.l.]', fontsize=16)\n #plt.title('Eastern Skafta cauldron transect: observed, ideal elastic, ideal viscoelastic. E={:.1E}'.format(ESkafta.youngmod), fontsize=18)\n plt.show()\n\n return fig #return the figure instance so it can be modified\n\n\ndef plot_elastic_transect(in_dict, colormap=cm.get_cmap('winter_r')):\n \"\"\"Read in quantities and plot elastic profile from a transect dictionary\n \"\"\"\n xaxis = in_dict['xaxis']\n sevals_1 = in_dict['initial_surface_obs']\n try:\n sevals_2 = in_dict['second_surface_obs']\n except KeyError:\n print('No secondary surface observations saved on transect {}. Setting identical to first surface for plotting.'.format(in_dict['name']))\n sevals_2 = sevals_1\n elastic_profile = in_dict['elastic_profile']\n transect_length = max(xaxis)\n \n elas_color = colormap(np.linspace(0.1, 0.9, num=len(times)+1))[0]\n \n fig = plt.figure('Elastic profile, {}'.format(in_dict['name']), figsize=(7, 3))\n plt.plot(xaxis, sevals_1, color='k', ls='-.') #, label='15 Oct 2012'\n plt.plot(xaxis, sevals_2, color='k', ls='-', label='Obs.') #, label='10 Oct 2015'\n plt.plot(xaxis, elastic_profile, color=elas_color, lw=2, label='Elastic plate')\n plt.fill_between(xaxis, sevals1, sevals2, color='Gainsboro', hatch='/', edgecolor='DimGray', linewidth=0, alpha=0.7)\n plt.fill_between(xaxis, sevals2, (plt.axes().get_ylim()[0]), color='Azure')\n plt.legend(loc='lower left')\n plt.axes().set_aspect(5)\n plt.axes().set_xlim(0, transect_length)\n plt.axes().set_yticks([1550, 1600, 1650, 1700])\n #plt.axes().set_yticklabels(['1550', '1600', '1650', '1700'], fontsize=14)\n plt.axes().tick_params(which='both', labelsize=14)\n #plt.axes().set_xticklabels(['0', '1', '2', '3', '4', '5', '6'], fontsize=14)\n plt.axes().set_xlabel('Along-transect distance [m]', fontsize=16)\n plt.axes().set_ylabel('Surface elevation [m a.s.l.]', fontsize=16)\n #plt.title('Eastern Skafta cauldron transect: observed, ideal elastic, ideal viscoelastic. E={:.1E}'.format(ESkafta.youngmod), fontsize=18)\n plt.show()\n \n return fig #return the figure instance so it can be modified\n \n\ndef plot_VE_transect(in_dict, colormap=cm.get_cmap('winter_r'), make_legend=False, ylim_lower=1520):\n \"\"\"Read in quantities and plot a viscoelastic progression from a transect dictionary\n Arguments:\n in_dict = a dictionary from sample_transect\n colormap = Matplotlib colormap instance, color scheme to use for plotting\n \"\"\"\n xaxis = in_dict['xaxis']\n transect_length = max(xaxis)\n sevals_1 = in_dict['initial_surface_obs']\n try:\n sevals_2 = in_dict['second_surface_obs']\n except KeyError:\n print('No secondary surface observations saved on transect {}. Setting identical to first surface for plotting.'.format(in_dict['name']))\n sevals_2 = sevals_1\n try:\n ve_profile_series = in_dict['VE_profiles']\n times = in_dict['VE_times'][::10] \n except KeyError:\n print('No viscoelastic profiles saved. Unable to proceed.')\n return #exit the function\n try:\n elastic_profile = in_dict['elastic_profile']\n except KeyError:\n elastic_profile = ve_profile_series[0] #use first VE profile, from time t=0, as stand-in for pure elastic\n \n colors = colormap(np.linspace(0.1, 0.9, num=len(times)+1))\n \n fig = plt.figure('Viscoelastic progression, {}'.format(in_dict['name']), figsize=(7, 3))\n plt.plot(xaxis, sevals_1, color='k', ls='-.') #, label='15 Oct 2012'\n plt.plot(xaxis, sevals_2, color='k', ls='-', label='Obs.') #, label='10 Oct 2015'\n #plt.plot(xaxis, elas_profile_array, color='r', ls=':', label='Elastic beam')\n plt.plot(xaxis, elastic_profile, color=colors[0], lw=2, label='Elastic plate')\n for i,ti in enumerate(times):\n labeltime = int(round(ti/86400)) #time in days\n plt.plot(xaxis, ve_profile_series[i][:], ls='--', color=colors[i+1], lw=2, label='Viscoelastic, t = {} days'.format(labeltime))\n plt.fill_between(xaxis, sevals_1, sevals_2, color='Gainsboro', hatch='/', edgecolor='DimGray', linewidth=0, alpha=0.7)\n plt.fill_between(xaxis, sevals_2, ylim_lower, color='Azure')\n if make_legend:\n plt.legend(loc='lower left')\n else:\n pass\n plt.axes().set_aspect(5)\n plt.axes().set_xlim(0, transect_length)\n plt.axes().set_ylim(ylim_lower, 1700)\n plt.axes().set_yticks([1550, 1600, 1650, 1700])\n plt.axes().set_yticklabels(['', '', '', ''], fontsize=14)\n plt.axes().tick_params(which='both', labelsize=14)\n plt.axes().set_xticklabels([])\n plt.axes().set_xlabel('Along-transect distance [m]', fontsize=16)\n plt.axes().set_ylabel('Surface elevation [m a.s.l.]', fontsize=16)\n #plt.title('Eastern Skafta cauldron transect: observed, ideal elastic, ideal viscoelastic. E={:.1E}, eta={:.1E}'.format(ESkafta.youngmod, ESkafta.dyn_viscos), fontsize=18)\n plt.show()\n \n return fig \n\n\n## Plot transects for manuscript\n#endpoints_1 = [(-17.542113802658239, 64.488141277357315),\n# (-17.48586677277758, 64.486397775690023)] #previous preset\nendpoints_1 = [(-17.535314402804026, 64.495192470298178),\n (-17.491964721477643, 64.476306805753708)] #not much crevassing\nendpoints_2 = [(-17.530965405648303, 64.478974272497283),\n (-17.49448994563258, 64.495192470298178)] #medium crevassing\nendpoints_3 = [(-17.543170655730489, 64.487616864746443),\n (-17.484529339243668, 64.486123083370046)] #more crevassing\n\n# transect_dict_1 = sample_transect(endpoints_1, sefunc_2012, sefunc_2015, cauldron_name='Transect 1')\n# transect_dict_2 = sample_transect(endpoints_2, sefunc_2012, sefunc_2015, cauldron_name='Transect 2')\ntransect_dict_3 = sample_transect(endpoints_3, sefunc_2012, sefunc_2015, cauldron_name='Transect 3')\n\n\n#f1 = plot_VE_transect(transect_dict_1, colormap=cm.get_cmap('viridis'))\n#f2 = plot_VE_transect(transect_dict_2, colormap=cm.get_cmap('viridis'))\n#f3 = plot_VE_transect(transect_dict_3, colormap=cm.get_cmap('viridis'))" ]
[ [ "matplotlib.pyplot.legend", "numpy.linspace", "numpy.arange", "matplotlib.pyplot.plot", "matplotlib.pyplot.axes", "scipy.interpolate.interp1d", "numpy.mean", "matplotlib.pyplot.fill_between", "matplotlib.cm.get_cmap", "scipy.interpolate.interp2d", "numpy.ma.masked_where", "matplotlib.pyplot.show" ] ]
minouei-kl/CBNetV2
[ "3eeb20b1aaab101091164800e954df719c446bba" ]
[ "mmdet/models/detectors/two_stage.py" ]
[ "import warnings\n\nimport torch\n\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\n\n\n@DETECTORS.register_module()\nclass _TwoStageDetector(BaseDetector):\n \"\"\"Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n rpn_head=None,\n roi_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(_TwoStageDetector, self).__init__(init_cfg)\n if pretrained:\n warnings.warn('DeprecationWarning: pretrained is deprecated, '\n 'please use \"init_cfg\" instead')\n backbone.pretrained = pretrained\n self.backbone = build_backbone(backbone)\n\n if neck is not None:\n self.neck = build_neck(neck)\n\n if rpn_head is not None:\n rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n rpn_head_ = rpn_head.copy()\n rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)\n self.rpn_head = build_head(rpn_head_)\n\n if roi_head is not None:\n # update train and test cfg here for now\n # TODO: refactor assigner & sampler\n rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None\n roi_head.update(train_cfg=rcnn_train_cfg)\n roi_head.update(test_cfg=test_cfg.rcnn)\n roi_head.pretrained = pretrained\n self.roi_head = build_head(roi_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n @property\n def with_rpn(self):\n \"\"\"bool: whether the detector has RPN\"\"\"\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n @property\n def with_roi_head(self):\n \"\"\"bool: whether the detector has a RoI head\"\"\"\n return hasattr(self, 'roi_head') and self.roi_head is not None\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck.\"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n \"\"\"\n outs = ()\n # backbone\n x = self.extract_feat(img)\n # rpn\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n outs = outs + (rpn_outs, )\n proposals = torch.randn(1000, 4).to(img.device)\n # roi_head\n roi_outs = self.roi_head.forward_dummy(x, proposals)\n outs = outs + (roi_outs, )\n return outs\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None,\n **kwargs):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n x = self.extract_feat(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n rpn_losses, proposal_list = self.rpn_head.forward_train(\n x,\n img_metas,\n gt_bboxes,\n gt_labels=None,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n losses.update(rpn_losses)\n else:\n proposal_list = proposals\n\n roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n gt_bboxes, gt_labels,\n gt_bboxes_ignore, gt_masks,\n **kwargs)\n losses.update(roi_losses)\n\n return losses\n\n async def async_simple_test(self,\n img,\n img_meta,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = await self.rpn_head.async_simple_test_rpn(\n x, img_meta)\n else:\n proposal_list = proposals\n\n return await self.roi_head.async_simple_test(\n x, proposal_list, img_meta, rescale=rescale)\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n if proposals is None:\n proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n else:\n proposal_list = proposals\n\n return self.roi_head.simple_test(\n x, proposal_list, img_metas, rescale=rescale)\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n x = self.extract_feats(imgs)\n proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n return self.roi_head.aug_test(\n x, proposal_list, img_metas, rescale=rescale)\n\n def onnx_export(self, img, img_metas):\n\n img_shape = torch._shape_as_tensor(img)[2:]\n img_metas[0]['img_shape_for_onnx'] = img_shape\n x = self.extract_feat(img)\n proposals = self.rpn_head.onnx_export(x, img_metas)\n return self.roi_head.onnx_export(x, proposals, img_metas)\n\n\n@DETECTORS.register_module()\nclass TwoStageDetector(_TwoStageDetector):\n \"\"\"Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None,\n loss_weights=None,\n **kwargs):\n xs = self.extract_feat(img)\n\n if not isinstance(xs[0], (list, tuple)):\n xs = [xs]\n loss_weights = None\n elif loss_weights is None:\n loss_weights = [0.5] + [1]*(len(xs)-1) # Reference CBNet paper\n\n\n def upd_loss(losses, idx, weight):\n new_losses = dict()\n for k,v in losses.items():\n new_k = '{}{}'.format(k,idx)\n if weight != 1 and 'loss' in k:\n new_k = '{}_w{}'.format(new_k, weight)\n if isinstance(v,list) or isinstance(v,tuple):\n new_losses[new_k] = [i*weight for i in v]\n else:new_losses[new_k] = v*weight\n return new_losses\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n for i,x in enumerate(xs):\n rpn_losses, proposal_list = self.rpn_head.forward_train(\n x,\n img_metas,\n gt_bboxes,\n gt_labels=None,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n if len(xs) > 1:\n rpn_losses = upd_loss(rpn_losses, idx=i, weight=loss_weights[i])\n losses.update(rpn_losses)\n else:\n proposal_list = proposals\n\n for i,x in enumerate(xs):\n roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n gt_bboxes, gt_labels,\n gt_bboxes_ignore, gt_masks,\n **kwargs)\n if len(xs) > 1:\n roi_losses = upd_loss(roi_losses, idx=i, weight=loss_weights[i]) \n losses.update(roi_losses)\n\n return losses\n" ]
[ [ "torch.randn", "torch._shape_as_tensor" ] ]
federico-giannoni/pyramid-nested-ner
[ "cc5247a72a936219a11fcf53cf69b75b42f4b61c" ]
[ "pyramid_nested_ner/vectorizers/labels/__init__.py" ]
[ "from pyramid_nested_ner.utils.text import default_tokenizer\nfrom torch.nn.utils.rnn import pad_sequence\n\nimport torch\n\n\nclass PyramidLabelEncoder(object):\n \"\"\"\n Label encoder class responsible for transforming entity annotations\n into torch.Tensors. The `transform` API returns two tensors: one fo-\n r the pyramid layer and a second one for the so-called 'remedy solu-\n tion', that uses IOB2 notation in a multi-label setting.\n \"\"\"\n\n default_tokenizer = default_tokenizer\n\n def __init__(self):\n self.entities = list()\n\n def tokenize(self, text):\n return self.default_tokenizer(text)\n\n def set_tokenizer(self, tokenizer: callable):\n self.default_tokenizer = tokenizer\n\n @property\n def iob2_entities(self):\n return [f'{iob2}-{entity}' for entity in self.entities for iob2 in 'IB' if entity]\n\n def fit(self, entities):\n self.entities = [None, *{entity for entity in entities}]\n\n def transform(self, data, max_depth=None):\n \"\"\"\n Transforms the (list of) DataPoint(s) into tensors. More precisely, this method\n returns two items: a list containing one tensor for each layer up to `max_depth`,\n a second tensor that encodes entities using the 'remedy solution'. The tensors\n in the first list enumerate all possible spans in `data` and assign a single cl-\n ass to each span, so each tensor in the list has a shape B x S x 1, where B is\n the \"batch size\" (the number of samples in `data`) and S is the maximum sequen-\n ce length (in tokens). The remedy solution instead has shape B x S x 2 * C, wh-\n ere C are the unique entities that were passed to `fit` (entity lexicon). Tens-\n ors in the first list are to be used with a `CrossEntropyLoss`, while the reme-\n dy solution is in multi-label format and requires `BCEWithLogitsLoss`.\n :param data:\n :param max_depth: if None, max_depth = inf, therefore remedy solution will be None.\n :return:\n \"\"\"\n y_layers = list()\n y_remedy = None\n if max_depth is not None:\n for layer in range(max_depth):\n y_layer = self._transform_layer(data, layer + 1)\n if y_layer.numel():\n y_layers.append(y_layer)\n else:\n break\n if len(y_layers) == max_depth:\n # use remedy solution to cover entities longer than `max_depth`\n y_remedy = self._remedy_encoding_transform(data, max_depth + 1)\n if not y_remedy.numel():\n y_remedy = None\n else:\n while 1:\n y_layer = self._transform_layer(data, len(y_layers) + 1)\n if y_layer.numel():\n y_layers.append(y_layer)\n else:\n break\n return y_layers, y_remedy\n\n def _ngrams(self, text, order):\n start = 0\n tokens = self.tokenize(text)\n while start + order <= len(tokens):\n yield tokens[start:start + order]\n start = start + 1\n\n def _entity_ngram_bitmap(self, data_point, order):\n for i, ngram in enumerate(self._ngrams(data_point.text, order)):\n ngram_start = i\n ngram_stop = i + len(ngram)\n valid_candidate = False\n for entity in data_point.entities:\n entity_start = len(self.tokenize(data_point.text[:entity.start]))\n entity_stop = entity_start + len(self.tokenize(entity.value))\n if entity_start == ngram_start and entity_stop == ngram_stop:\n yield self.entities.index(entity.name)\n valid_candidate = True\n break # a span can only have one annotation\n if not valid_candidate:\n yield 0\n\n def _remedy_solution_bitmap(self, data_point, order):\n remedy_solution_bitmap = list()\n for i, ngram in enumerate(self._ngrams(data_point.text, order)):\n ngram_start = i\n ngram_stop = i + len(ngram)\n ngram_bitmap = torch.zeros(len(self.iob2_entities))\n for entity in data_point.entities:\n entity_start = len(self.tokenize(data_point.text[:entity.start]))\n entity_stop = entity_start + len(self.tokenize(entity.value))\n if ngram_start >= entity_start and ngram_stop <= entity_stop:\n # current n-gram is inside an entity span:\n if entity_start == ngram_start:\n iob2_entity = f'B-{entity.name}'\n elif ngram_stop <= entity_stop:\n iob2_entity = f'I-{entity.name}'\n else:\n raise AssertionError(\" \")\n ngram_bitmap[self.iob2_entities.index(iob2_entity)] = 1\n remedy_solution_bitmap.append(ngram_bitmap.clone())\n if remedy_solution_bitmap:\n return torch.stack(remedy_solution_bitmap)\n return torch.tensor([])\n\n def _remedy_encoding_transform(self, data, order):\n y_layer = list()\n for x in (data if isinstance(data, list) else [data]):\n y_layer.append(self._remedy_solution_bitmap(x, order))\n try:\n return pad_sequence(y_layer, batch_first=True)\n except RuntimeError:\n # pad_sequence can crash if some sequences in `data` are shorter than the number of lay-\n # ers and therefore their encoding yields an empty tensor, while other sequences are tr-\n # ansformed into tensors of shape (n, |iob2_entities|).\n y_layer = [y if y.numel() else torch.zeros(1, len(self.iob2_entities)) for y in y_layer]\n return pad_sequence(y_layer, batch_first=True)\n\n def _transform_layer(self, data, layer):\n y_layer = list()\n for x in (data if isinstance(data, list) else [data]):\n y_layer.append(torch.tensor([bit for bit in self._entity_ngram_bitmap(x, layer)]))\n return pad_sequence(y_layer, batch_first=True).long()\n\n def inverse_transform(self, y):\n return [self._inverse_layer_transform(y_layer) for y_layer in y]\n\n def inverse_remedy_transform(self, y_remedy):\n\n def _recover_span(tensor_slice, entity_name):\n for j, vect in enumerate(tensor_slice[1:]):\n if not vect[self.iob2_entities.index(f'I-{entity_name}')]:\n return tensor_slice[:j + 1]\n return tensor_slice\n\n longest_span, sequences_tags = 0, list()\n\n for sequence in y_remedy:\n sequence_tags = dict()\n for offset, logits in enumerate(sequence):\n for entity in self.entities[1:]:\n if logits[self.iob2_entities.index(f'B-{entity}')]:\n span = _recover_span(sequence[offset:], entity)\n if len(span) not in sequence_tags:\n sequence_tags[len(span)] = ['O' for _ in range(len(sequence) - (len(span) - 1))]\n if 'O' == sequence_tags[len(span)][offset]:\n sequence_tags[len(span)][offset] = f'B-{entity}'\n longest_span = max(len(span), longest_span)\n else:\n sequence_tags[len(span)][offset] = None\n # print(\n # f\"Tokens {span} have two different annotations: \"\n # f\"{sequence_tags[len(span)][2:]}, and {entity}. \"\n # f\"Due to this conflict, both annotations will be\"\n # f\" discarded.\"\n # )\n sequences_tags.append(sequence_tags)\n\n decoded_labels = list()\n for i in range(1, longest_span + 1):\n decoded_labels_for_order = list()\n for sequence, sequence_tags in zip(y_remedy, sequences_tags):\n sequence_length = max(0, len(sequence) - (i - 1))\n if i in sequence_tags:\n span = [iob2_tag or 'O' for iob2_tag in sequence_tags[i]]\n else:\n span = ['O' for _ in range(sequence_length)]\n decoded_labels_for_order.append(span)\n decoded_labels.append(decoded_labels_for_order)\n\n return decoded_labels\n\n def _inverse_layer_transform(self, y_layer):\n sequences_tags = list()\n for sequence in y_layer:\n tags = [f'B-{self.entities[index]}' if index else 'O' for index in sequence]\n sequences_tags.append(tags)\n return sequences_tags\n" ]
[ [ "torch.stack", "torch.nn.utils.rnn.pad_sequence", "torch.tensor" ] ]
mwtoews/pandas
[ "896622165ce73f1c0fdf64e085fa80a3227bb51d" ]
[ "pandas/tests/extension/test_boolean.py" ]
[ "\"\"\"\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n\"\"\"\nimport numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import _np_version_under1p14\n\nimport pandas as pd\nfrom pandas.core.arrays.boolean import BooleanDtype\nfrom pandas.tests.extension import base\nimport pandas.util.testing as tm\n\n\ndef make_data():\n return [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False]\n\n\n@pytest.fixture\ndef dtype():\n return BooleanDtype()\n\n\n@pytest.fixture\ndef data(dtype):\n return pd.array(make_data(), dtype=dtype)\n\n\n@pytest.fixture\ndef data_for_twos(dtype):\n return pd.array(np.ones(100), dtype=dtype)\n\n\n@pytest.fixture\ndef data_missing(dtype):\n return pd.array([np.nan, True], dtype=dtype)\n\n\n@pytest.fixture\ndef data_for_sorting(dtype):\n return pd.array([True, True, False], dtype=dtype)\n\n\n@pytest.fixture\ndef data_missing_for_sorting(dtype):\n return pd.array([True, np.nan, False], dtype=dtype)\n\n\n@pytest.fixture\ndef na_cmp():\n # we are np.nan\n return lambda x, y: np.isnan(x) and np.isnan(y)\n\n\n@pytest.fixture\ndef na_value():\n return np.nan\n\n\n@pytest.fixture\ndef data_for_grouping(dtype):\n b = True\n a = False\n na = np.nan\n return pd.array([b, b, na, na, a, a, b], dtype=dtype)\n\n\nclass TestDtype(base.BaseDtypeTests):\n pass\n\n\nclass TestInterface(base.BaseInterfaceTests):\n pass\n\n\nclass TestConstructors(base.BaseConstructorsTests):\n pass\n\n\nclass TestGetitem(base.BaseGetitemTests):\n pass\n\n\nclass TestSetitem(base.BaseSetitemTests):\n pass\n\n\nclass TestMissing(base.BaseMissingTests):\n pass\n\n\nclass TestArithmeticOps(base.BaseArithmeticOpsTests):\n def check_opname(self, s, op_name, other, exc=None):\n # overwriting to indicate ops don't raise an error\n super().check_opname(s, op_name, other, exc=None)\n\n def _check_op(self, s, op, other, op_name, exc=NotImplementedError):\n if exc is None:\n if op_name in (\"__sub__\", \"__rsub__\"):\n # subtraction for bools raises TypeError (but not yet in 1.13)\n if _np_version_under1p14:\n pytest.skip(\"__sub__ does not yet raise in numpy 1.13\")\n with pytest.raises(TypeError):\n op(s, other)\n\n return\n\n result = op(s, other)\n expected = s.combine(other, op)\n\n if op_name in (\n \"__floordiv__\",\n \"__rfloordiv__\",\n \"__pow__\",\n \"__rpow__\",\n \"__mod__\",\n \"__rmod__\",\n ):\n # combine keeps boolean type\n expected = expected.astype(\"Int8\")\n elif op_name in (\"__truediv__\", \"__rtruediv__\"):\n # combine with bools does not generate the correct result\n # (numpy behaviour for div is to regard the bools as numeric)\n expected = s.astype(float).combine(other, op)\n if op_name == \"__rpow__\":\n # for rpow, combine does not propagate NaN\n expected[result.isna()] = np.nan\n self.assert_series_equal(result, expected)\n else:\n with pytest.raises(exc):\n op(s, other)\n\n def _check_divmod_op(self, s, op, other, exc=None):\n # override to not raise an error\n super()._check_divmod_op(s, op, other, None)\n\n @pytest.mark.skip(reason=\"BooleanArray does not error on ops\")\n def test_error(self, data, all_arithmetic_operators):\n # other specific errors tested in the boolean array specific tests\n pass\n\n\nclass TestComparisonOps(base.BaseComparisonOpsTests):\n def check_opname(self, s, op_name, other, exc=None):\n # overwriting to indicate ops don't raise an error\n super().check_opname(s, op_name, other, exc=None)\n\n def _compare_other(self, s, data, op_name, other):\n self.check_opname(s, op_name, other)\n\n\nclass TestReshaping(base.BaseReshapingTests):\n pass\n\n\nclass TestMethods(base.BaseMethodsTests):\n @pytest.mark.parametrize(\"na_sentinel\", [-1, -2])\n def test_factorize(self, data_for_grouping, na_sentinel):\n # override because we only have 2 unique values\n labels, uniques = pd.factorize(data_for_grouping, na_sentinel=na_sentinel)\n expected_labels = np.array(\n [0, 0, na_sentinel, na_sentinel, 1, 1, 0], dtype=np.intp\n )\n expected_uniques = data_for_grouping.take([0, 4])\n\n tm.assert_numpy_array_equal(labels, expected_labels)\n self.assert_extension_array_equal(uniques, expected_uniques)\n\n def test_combine_le(self, data_repeated):\n # override because expected needs to be boolean instead of bool dtype\n orig_data1, orig_data2 = data_repeated(2)\n s1 = pd.Series(orig_data1)\n s2 = pd.Series(orig_data2)\n result = s1.combine(s2, lambda x1, x2: x1 <= x2)\n expected = pd.Series(\n [a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],\n dtype=\"boolean\",\n )\n self.assert_series_equal(result, expected)\n\n val = s1.iloc[0]\n result = s1.combine(val, lambda x1, x2: x1 <= x2)\n expected = pd.Series([a <= val for a in list(orig_data1)], dtype=\"boolean\")\n self.assert_series_equal(result, expected)\n\n def test_searchsorted(self, data_for_sorting, as_series):\n # override because we only have 2 unique values\n data_for_sorting = pd.array([True, False], dtype=\"boolean\")\n b, a = data_for_sorting\n arr = type(data_for_sorting)._from_sequence([a, b])\n\n if as_series:\n arr = pd.Series(arr)\n assert arr.searchsorted(a) == 0\n assert arr.searchsorted(a, side=\"right\") == 1\n\n assert arr.searchsorted(b) == 1\n assert arr.searchsorted(b, side=\"right\") == 2\n\n result = arr.searchsorted(arr.take([0, 1]))\n expected = np.array([0, 1], dtype=np.intp)\n\n tm.assert_numpy_array_equal(result, expected)\n\n # sorter\n sorter = np.array([1, 0])\n assert data_for_sorting.searchsorted(a, sorter=sorter) == 0\n\n\nclass TestCasting(base.BaseCastingTests):\n pass\n\n\nclass TestGroupby(base.BaseGroupbyTests):\n \"\"\"\n Groupby-specific tests are overridden because boolean only has 2\n unique values, base tests uses 3 groups.\n \"\"\"\n\n def test_grouping_grouper(self, data_for_grouping):\n df = pd.DataFrame(\n {\"A\": [\"B\", \"B\", None, None, \"A\", \"A\", \"B\"], \"B\": data_for_grouping}\n )\n gr1 = df.groupby(\"A\").grouper.groupings[0]\n gr2 = df.groupby(\"B\").grouper.groupings[0]\n\n tm.assert_numpy_array_equal(gr1.grouper, df.A.values)\n tm.assert_extension_array_equal(gr2.grouper, data_for_grouping)\n\n @pytest.mark.parametrize(\"as_index\", [True, False])\n def test_groupby_extension_agg(self, as_index, data_for_grouping):\n df = pd.DataFrame({\"A\": [1, 1, 2, 2, 3, 3, 1], \"B\": data_for_grouping})\n result = df.groupby(\"B\", as_index=as_index).A.mean()\n _, index = pd.factorize(data_for_grouping, sort=True)\n\n index = pd.Index(index, name=\"B\")\n expected = pd.Series([3, 1], index=index, name=\"A\")\n if as_index:\n self.assert_series_equal(result, expected)\n else:\n expected = expected.reset_index()\n self.assert_frame_equal(result, expected)\n\n def test_groupby_extension_no_sort(self, data_for_grouping):\n df = pd.DataFrame({\"A\": [1, 1, 2, 2, 3, 3, 1], \"B\": data_for_grouping})\n result = df.groupby(\"B\", sort=False).A.mean()\n _, index = pd.factorize(data_for_grouping, sort=False)\n\n index = pd.Index(index, name=\"B\")\n expected = pd.Series([1, 3], index=index, name=\"A\")\n self.assert_series_equal(result, expected)\n\n def test_groupby_extension_transform(self, data_for_grouping):\n valid = data_for_grouping[~data_for_grouping.isna()]\n df = pd.DataFrame({\"A\": [1, 1, 3, 3, 1], \"B\": valid})\n\n result = df.groupby(\"B\").A.transform(len)\n expected = pd.Series([3, 3, 2, 2, 3], name=\"A\")\n\n self.assert_series_equal(result, expected)\n\n def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):\n df = pd.DataFrame({\"A\": [1, 1, 2, 2, 3, 3, 1], \"B\": data_for_grouping})\n df.groupby(\"B\").apply(groupby_apply_op)\n df.groupby(\"B\").A.apply(groupby_apply_op)\n df.groupby(\"A\").apply(groupby_apply_op)\n df.groupby(\"A\").B.apply(groupby_apply_op)\n\n def test_groupby_apply_identity(self, data_for_grouping):\n df = pd.DataFrame({\"A\": [1, 1, 2, 2, 3, 3, 1], \"B\": data_for_grouping})\n result = df.groupby(\"A\").B.apply(lambda x: x.array)\n expected = pd.Series(\n [\n df.B.iloc[[0, 1, 6]].array,\n df.B.iloc[[2, 3]].array,\n df.B.iloc[[4, 5]].array,\n ],\n index=pd.Index([1, 2, 3], name=\"A\"),\n name=\"B\",\n )\n self.assert_series_equal(result, expected)\n\n def test_in_numeric_groupby(self, data_for_grouping):\n df = pd.DataFrame(\n {\n \"A\": [1, 1, 2, 2, 3, 3, 1],\n \"B\": data_for_grouping,\n \"C\": [1, 1, 1, 1, 1, 1, 1],\n }\n )\n result = df.groupby(\"A\").sum().columns\n\n if data_for_grouping.dtype._is_numeric:\n expected = pd.Index([\"B\", \"C\"])\n else:\n expected = pd.Index([\"C\"])\n\n tm.assert_index_equal(result, expected)\n\n\nclass TestNumericReduce(base.BaseNumericReduceTests):\n def check_reduce(self, s, op_name, skipna):\n result = getattr(s, op_name)(skipna=skipna)\n expected = getattr(s.astype(\"float64\"), op_name)(skipna=skipna)\n # override parent function to cast to bool for min/max\n if op_name in (\"min\", \"max\") and not pd.isna(expected):\n expected = bool(expected)\n tm.assert_almost_equal(result, expected)\n\n\nclass TestBooleanReduce(base.BaseBooleanReduceTests):\n pass\n\n\nclass TestPrinting(base.BasePrintingTests):\n pass\n\n\n# TODO parsing not yet supported\n# class TestParsing(base.BaseParsingTests):\n# pass\n" ]
[ [ "pandas.util.testing.assert_extension_array_equal", "pandas.util.testing.assert_numpy_array_equal", "pandas.Series", "numpy.isnan", "pandas.factorize", "pandas.array", "pandas.Index", "pandas.DataFrame", "numpy.ones", "pandas.util.testing.assert_almost_equal", "pandas.util.testing.assert_index_equal", "pandas.core.arrays.boolean.BooleanDtype", "pandas.isna", "numpy.array" ] ]
timeeehd/modern-ai-course
[ "78d1dd8e0150b7455848991119bf60ff9a448274" ]
[ "lecture-09/exercise_DL_pcg/exercise_DL_pcg/example_sampling_random_levels.py" ]
[ "\"\"\"\nIn this example I show how to load the network,\nsample 4 levels at random from the latent space\nand then plot them using matplotlib.\n\"\"\"\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom vae_mario import VAEMario\nfrom plotting_utilities import plot_decoded_level\n\n# Loading the model\nmodel_name = \"mario_vae_zdim_2_overfitted\"\nz_dim = 2\nvae = VAEMario(z_dim=z_dim)\nvae.load_state_dict(torch.load(f\"./models/{model_name}.pt\"))\n\n# Sampling random zs\nzs = 2.5 * torch.randn((4, z_dim))\n\n# Getting levels from them using the decoder\nlevels = vae.decode(zs)\n# print(levels.shape)\nlevel_imgs = [plot_decoded_level(level) for level in levels]\n\n# Plotting\n_, axes = plt.subplots(1, 4, figsize=(7 * 4, 7))\nfor level_img, ax in zip(level_imgs, axes):\n ax.imshow(level_img)\n ax.axis(\"off\")\n\nplt.tight_layout()\nplt.show()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "torch.load", "torch.randn", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
youngeun1209/beetlCompetiton
[ "8d3b0669cbc5809cdcee55f828789aa4e5b375a2" ]
[ "task1_sleep/train_sleep_DSN.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 28 00:10:41 2021\n\n@author: yelee\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 23 17:17:58 2021\n\n@author: yelee\n\"\"\"\n\nfrom braindecode.util import set_random_seeds\nfrom braindecode.util import np_to_var, var_to_np\nimport matplotlib.pyplot as plt \nimport numpy as np\nfrom numpy.random import RandomState\nimport os.path as osp\nimport pickle\nimport time\nimport torch\nfrom torch import nn\nimport torch.backends.cudnn as cudnn\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch import optim\nimport torch.nn.functional as F\n\nimport argparse\nimport sys\nimport os\n__file__ = '/exSSD/projects/beetlCompetition/code'\nsys.path.append(__file__)\nfrom util.dsn import DeepSleepNet\nfrom util.utilfunc import get_balanced_batches\nfrom util.preproc import plot_confusion_matrix\nfrom util.focalloss import FocalLoss as FocalLoss\n\nimport tensorflow as tf\n# from EEGModels import EEGNet, ShallowConvNet, DeepConvNet\n\ncuda = torch.cuda.is_available()\nprint('gpu: ', cuda)\ndevice = 'cuda' if cuda else 'cpu'\n\n\nseed=42\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\nnp.random.seed(seed)\ntorch.backends.cudnn.deterministic = True\nrng = RandomState(seed)\nset_random_seeds(seed=seed, cuda=cuda)\n\n# %% pytorch GPU allocation\nGPU_NUM = 0 # 원하는 GPU 번호 입력\ndevice = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')\ntorch.cuda.set_device(device) # change allocation of current GPU\nprint ('Current cuda device ', torch.cuda.current_device()) # check\n\n\n# %% Load data\n# Directly download data and indicate their location:\nsavebase = '/exSSD/projects/beetlCompetition/data/'\n\n# source\nsourcebase = savebase + 'SleepSource/'\n\nwith open(osp.join(sourcebase, \"headerInfo.npy\"), 'rb') as f:\n info_source = pickle.load(f)\n\nX_source, y_source = [], []\ny_sub = []\nsubN = 0\n\nfor i_sub in range(39):\n for i_record in [1,2]:\n with open(osp.join(sourcebase, \"training_s%dr%dX.npy\" %(i_sub,i_record)), 'rb') as f:\n X = pickle.load(f)\n X_source.append(X)\n with open(osp.join(sourcebase, \"training_s%dr%dy.npy\" %(i_sub,i_record)), 'rb') as f:\n y = pickle.load(f)\n y_source.append(y)\n y_sub.append(np.tile(i_sub,len(y)))\nX_source = np.concatenate(X_source)\ny_source = np.concatenate(y_source)\ny_sub = np.concatenate(y_sub)\nprint(\"Source: there are {} trials with {} electrodes and {} time samples\".format(*X_source.shape))\n\n# %% phase 1 target data use\n# target - s0r1, s0r2, s1r1, s1r2, s2r1, s2r2, s3r1, s3r2, s4r1, s4r2, s5r1, s5r2\ntargetbase = savebase + 'leaderboardSleep/sleep_target/'\n\nwith open(osp.join(targetbase, \"headerInfo.npy\"), 'rb') as f:\n info_target = pickle.load(f)\n \nX_target, y_target = [], []\nfor i_sub in [0,1,4,5]:\n for i_record in [1]:\n with open(osp.join(targetbase, \"leaderboard_s%dr%dX.npy\" %(i_sub,i_record)), 'rb') as f:\n X_target.append(pickle.load(f))\n with open(osp.join(targetbase, \"leaderboard_s%dr%dy.npy\" %(i_sub,i_record)), 'rb') as f:\n y_target.append(pickle.load(f))\nfor i_sub in [2,3]:\n for i_record in [1,2]:\n with open(osp.join(targetbase, \"leaderboard_s%dr%dX.npy\" %(i_sub,i_record)), 'rb') as f:\n X_target.append(pickle.load(f))\n with open(osp.join(targetbase, \"leaderboard_s%dr%dy.npy\" %(i_sub,i_record)), 'rb') as f:\n y_target.append(pickle.load(f))\nX_target = np.concatenate(X_target)\ny_target = np.concatenate(y_target)\nprint(\"Target: there are {} trials with {} electrodes and {} time samples\".format(*X_target.shape))\n\nprint(\"Combining source and target for training:\")\n\nX_val, y_val = [], []\nfor i_sub in [4,5]:\n for i_record in [2]:\n with open(osp.join(targetbase, \"leaderboard_s%dr%dX.npy\" %(i_sub,i_record)), 'rb') as f:\n X_val.append(pickle.load(f))\n with open(osp.join(targetbase, \"leaderboard_s%dr%dy.npy\" %(i_sub,i_record)), 'rb') as f:\n y_val.append(pickle.load(f))\nX_val = np.concatenate(X_val)\ny_val = np.concatenate(y_val)\nprint(\"\\nValidation: there are {} trials with {} electrodes and {} time samples\".format(*X_val.shape))\n\nX_test, y_test = [], []\nfor i_sub in [0,1]:\n for i_record in [2]:\n with open(osp.join(targetbase, \"leaderboard_s%dr%dX.npy\" %(i_sub,i_record)), 'rb') as f:\n X_test.append(pickle.load(f))\n with open(osp.join(targetbase, \"leaderboard_s%dr%dy.npy\" %(i_sub,i_record)), 'rb') as f:\n y_test.append(pickle.load(f))\nX_test = np.concatenate(X_test)\ny_test = np.concatenate(y_test)\nprint(\"\\nTest: there are {} trials with {} electrodes and {} time samples\".format(*X_test.shape))\n\n# %% combine\nX_train = np.concatenate([X_source, X_target])\ny_train = np.concatenate([y_source, y_target])\nprint(\"Combining source and target for training:\")\nprint(\"Train: there are {} trials with {} electrodes and {} time samples\".format(*X_train.shape))\n\n\n# % the number of training samples\nimport collections\nprint(collections.Counter(y_train))\nprint(collections.Counter(y_val))\nprint(collections.Counter(y_test))\n\n# %%\nclass TrainObject(object):\n def __init__(self, X, y):\n assert len(X) == len(y)\n mean = np.mean(X, axis=2, keepdims=True)\n # Here normalise across the window, when channel size is not large enough\n # In motor imagery kit, we put axis = 1, across channel as an example\n std = np.std(X, axis=2, keepdims=True)\n X = (X - mean) / std\n # we scale it to 1000 as a better training scale of the shallow CNN\n # according to the orignal work of the paper referenced above\n self.X = X.astype(np.float32)*1e3\n self.y = y.astype(np.int64)\n\ntrain_set = TrainObject(X_train, y=y_train)\nvalid_set = TrainObject(X_val, y=y_val)\ntest_set = TrainObject(X_test, y=y_test)\n\n\n\n# %%\ninput_time_length = X_train.shape[2]\nin_chans = X_train.shape[1]\nn_classes = len(np.unique(y_train))\n\n# %% DeepSleepNet\n\nmodel = DeepSleepNet(Fs = 100, ch=in_chans, nclass=n_classes)\nmodel.cuda()\n# loss\nparser = argparse.ArgumentParser(description=\"Feature Mearusement\")\nparser.add_argument(\"--lr\",default=0.05, type=float, help=\"learning rate\")\n# parser.add_argument(\"--resume\", \"-r\", action=\"store_true\", help=\"resume from checkpoint\")\nargs = parser.parse_args()\n\ncriterion = nn.CrossEntropyLoss()\n# criterion = seq_cel if args.loss == 'ce' else gdl\n\noptimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=0.5*1e-3)\n# optimizer = optim.AdamW(model.parameters(), lr=1e-3, weight_decay=0.5*1e-3)\n\nlr_scheduler = StepLR(optimizer, step_size=100, gamma=0.1)\n\n\n\n# %%\nsavebase_log='/exSSD/projects/beetlCompetition/learning_logs/task1_Sleep/check_final3/'\n\nsavename = savebase_log + 'DSN_model_sleep.pth'\n\ntotal_epoch = -1\nTlosses, Taccuracies = [], []\nVlosses, Vaccuracies = [], []\nhighest_acc = 0\n\nstart = time.time()\nbatch_size = 2000\nn_epoch = 2000\n\nfor i_epoch in range(n_epoch):\n total_epoch += 1\n # Randomize batches ids and get iterater 'i_trials_in_batch'\n i_trials_in_batch = get_balanced_batches(len(train_set.X), rng, shuffle=True, batch_size=batch_size)\n # Set model to training mode\n model.train()\n for i_trials in i_trials_in_batch:\n # Have to add empty fourth dimension to X for training\n batch_X = train_set.X[i_trials]\n batch_y = train_set.y[i_trials]\n # convert from ndarray to torch tensor\n net_in = np_to_var(batch_X)\n net_target = np_to_var(F.one_hot(torch.tensor(batch_y), num_classes=n_classes))\n if cuda:\n net_in = net_in.cuda()\n net_target = net_target.cuda()\n # Remove gradients of last backward pass from all parameters\n optimizer.zero_grad()\n # Compute outputs of the network\n outputs = model(net_in)\n # Compute the loss\n loss = criterion(outputs.cpu(), torch.from_numpy(batch_y))\n # Do the backpropagation\n loss.backward()\n # Update parameters with the optimizer\n optimizer.step()\n # Set model to evaluation mode\n model.eval()\n print(\"Epoch {:d}\".format(total_epoch))\n average_acc, average_loss = [], []\n \n # here we compute training accuracy and validation accuracy of current model\n for setname, dataset in (('Train', train_set), ('Valid', valid_set)):\n i_trials_in_batch = get_balanced_batches(len(dataset.X), rng, shuffle=False, batch_size=60)\n outputs=None\n for i_trials in i_trials_in_batch:\n batch_X = dataset.X[i_trials]\n batch_y = dataset.y[i_trials]\n net_in = np_to_var(batch_X)\n if cuda:\n net_in = net_in.cuda()\n toutputs = model(net_in)\n if outputs is None:\n temp=toutputs.cpu()\n outputs=temp.detach().numpy()\n else:\n temp=toutputs.cpu()\n outputs=np.concatenate((outputs,temp.detach().numpy()))\n loss = criterion(torch.tensor(outputs), torch.tensor(dataset.y))\n print(\"{:6s} Loss: {:.5f}\".format(setname, float(var_to_np(loss))))\n predicted_labels = np.argmax((outputs), axis=1)\n accuracy = np.mean(dataset.y == predicted_labels)\n print(\"{:6s} Accuracy: {:.1f}%\".format(setname, accuracy * 100))\n \n if setname == 'Train':\n Tlosses.append(loss)\n Taccuracies.append(accuracy)\n current_Tacc=accuracy\n elif setname == 'Valid':\n Vlosses.append(loss)\n Vaccuracies.append(accuracy)\n if accuracy>=highest_acc:\n torch.save({\n 'in_chans': in_chans,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'n_classes': 6,\n 'input_time_length':input_time_length\n }, savename)\n\n highest_acc=accuracy\n plot_confusion_matrix(dataset.y, predicted_labels, classes=['0', '1', '2', '3', '4', '5'], \n normalize=True, title='Validation confusion matrix')\n plt.show()\n else:\n average_acc.append(accuracy)\n average_loss.append(accuracy)\nend=time.time()\n\nprint('time is {}'.format(end-start))\n\n# %%\nt = np.arange(0., len(Tlosses), 1)+1\nplt.plot(t, Tlosses, 'r', t, Vlosses, 'y')\nplt.legend(('training loss', 'validation loss'))\nplt.savefig(savebase_log+'Training_test_loss_acc{:.3f}.png'.format(accuracy), dpi=300)\nplt.show()\n\nplt.figure()\nplt.plot(t, Taccuracies, 'r', t, Vaccuracies, 'y')\nplt.legend(('training accuracy', 'validation accuracy'))\nplt.savefig(savebase_log+'Training_test_accuracy_acc{:.3f}.png'.format(accuracy), dpi=300)\nplt.show()\n\n\n# %%\ninput_time_length = X_train.shape[2]\nin_chans = X_train.shape[1]\n\nif cuda:\n model.cuda()\ncheckpoint = torch.load(savename)\nmodel.load_state_dict(checkpoint['model_state_dict'])\nmodel.eval()\n\naverage_acc, average_loss = [], []\nsetname = 'testset'\ndataset = test_set\n\ni_trials_in_batch = get_balanced_batches(len(dataset.X), rng, shuffle=False, batch_size=30)\noutputs=None\nfor i_trials in i_trials_in_batch:\n # Have to add empty fourth dimension to X\n batch_X = dataset.X[i_trials]\n batch_y = dataset.y[i_trials]\n net_in = np_to_var(batch_X)\n if cuda:\n net_in = net_in.cuda()\n toutputs = model(net_in)\n if outputs is None:\n temp=toutputs.cpu()\n outputs=temp.detach().numpy()\n else:\n temp=toutputs.cpu()\n outputs=np.concatenate((outputs,temp.detach().numpy()))\n\nloss = criterion(torch.tensor(outputs), torch.tensor(dataset.y))\n \nprint(\"{:6s} Loss: {:.5f}\".format(\n setname, float(var_to_np(loss))))\npredicted_labels = np.argmax((outputs), axis=1)\naccuracy = np.mean(dataset.y == predicted_labels)\n\nprint(\"{:6s} Accuracy: {:.1f}%\".format(setname, accuracy * 100))\nplot_confusion_matrix(dataset.y, predicted_labels, classes=['0', '1', '2', '3', '4', '5'], \n normalize=True, title='Validation confusion matrix')\n\nplt.savefig(savebase_log+'Training_test_accuracy{:.3f}.png'.format(accuracy), dpi=300)\nplt.show()\n\n" ]
[ [ "matplotlib.pyplot.legend", "torch.load", "numpy.concatenate", "matplotlib.pyplot.plot", "numpy.mean", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss", "numpy.unique", "torch.from_numpy", "torch.tensor", "numpy.std", "numpy.argmax", "matplotlib.pyplot.figure", "torch.optim.lr_scheduler.StepLR", "torch.cuda.current_device", "numpy.random.RandomState", "matplotlib.pyplot.show", "numpy.random.seed", "torch.cuda.manual_seed", "torch.cuda.set_device", "torch.manual_seed" ] ]
commoncdp2021/Gun-Gaja-Gun
[ "95295f4ad97500d424b90c270bba6360f455844a" ]
[ "Plotting/list_comp.py" ]
[ "#! /usr/bin/python\n\nimport numpy as np\n\ndef main():\n x = [5,10,15,20,25]\n\n # declare y as an empty list\n y = []\n\n # The not so good way\n for counter in x:\n y.append(counter / 5)\n\n print(\"\\nOld fashioned way: x = {} y = {} \\n\".format(x, y))\n\n\n # The Pythonic way\n # Using list comprehensions\n z = [n/5 for n in x]\n print(\"List Comprehensions: x = {} z = {} \\n\".format(x, z))\n\n # Finally, numpy\n try:\n a = x / 5\n except:\n print(\"No, you can't do that with regular Python lists\\n\")\n\n a = np.array(x)\n b = a / 5\n\n print(\"With Numpy: a = {} b = {} \\n\".format(a, b))\n\n return \"With Numpy: a = {} b = {} \\n\".format(a, b)\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.array" ] ]
bobo0810/classification
[ "b27397308c5294dcc30a5aaddab4692becfc45d3" ]
[ "Models/Backbone/mynet_metric.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom typing import Any\nimport timm\nfrom timm.models import register_model\n\n\nclass MyNet_Metric(nn.Module):\n \"\"\"\n 特征提取网络 输出feature\n \"\"\"\n\n def __init__(self, pretrained, model_name, embedding_size):\n super(MyNet_Metric, self).__init__()\n # !!!该属性必须保留,以区分是否为度量学习!!!\n self.embedding_size = embedding_size\n\n # 特征提取器\n self.features = timm.create_model(\n model_name,\n pretrained=pretrained,\n num_classes=embedding_size, # 修改输出维度\n )\n self.bn = nn.BatchNorm1d(embedding_size)\n\n def forward(self, imgs):\n features = self.features(imgs)\n features = self.bn(features) # 规范化,正则化\n features = F.normalize(features, p=2, dim=1) # 特征归一化,即模长为1\n return features\n\n\n\"\"\"\n注意:\n1. @register_model注册为timm模型\n2. 命名尽量避免与timm模型重名\n\"\"\"\n\n\n@register_model\ndef mynet_metric(\n pretrained, num_classes, model_name=\"efficientnet_b0\", embedding_size=128\n):\n \"\"\"\n pretrained: 是否加载ImageNet预训练参数(接收timm.create_model传参)\n num_classes: 类别数(接收timm.create_model传参)\n\n model_name: timm主干网络名\n embedding_size: 特征维度\n \"\"\"\n print(\"Backbone_Metric come from user-defined\")\n model = MyNet_Metric(pretrained, model_name, embedding_size)\n return model\n" ]
[ [ "torch.nn.functional.normalize", "torch.nn.BatchNorm1d" ] ]
rub-ksv/-lrs_avsr1_local-
[ "c743803d5d09461f72ab7dbaf0af73a7077f3c0e", "c743803d5d09461f72ab7dbaf0af73a7077f3c0e" ]
[ "training/trainaudio/asr_train_audio.py", "training/finetuneav/nets_utils.py" ]
[ "#!/usr/bin/env python3\n# encoding: utf-8\n\n# Copyright 2017 Tomoki Hayashi (Nagoya University)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Automatic speech recognition model training script.\"\"\"\n\nimport configargparse\nimport logging\nimport multiprocessing as mp\nimport numpy as np\nimport os\nimport random\nimport subprocess\nimport sys\nfrom espnet.utils.cli_utils import strtobool\nfrom espnet.utils.training.batchfy import BATCH_COUNT_CHOICES\n\n\n# NOTE: you need this func to generate our sphinx doc\ndef get_parser(parser=None, required=True):\n if parser is None:\n parser = configargparse.ArgumentParser(\n description=\"Train an automatic speech recognition (ASR) model on one \\\n CPU, one or multiple GPUs\",\n config_file_parser_class=configargparse.YAMLConfigFileParser,\n formatter_class=configargparse.ArgumentDefaultsHelpFormatter,\n )\n # general configuration\n ##############################################################################\n parser.add(\n \"--config\",\n type=str,\n required=True,\n is_config_file=True,\n help=\"config file path\",\n )\n # parser.add('--config', is_config_file=True, help='config file path')\n ##############################################################################\n parser.add(\n \"--config2\",\n is_config_file=True,\n help=\"second config file path that overwrites the settings in `--config`.\",\n )\n parser.add(\n \"--config3\",\n is_config_file=True,\n help=\"third config file path that overwrites the settings \\\n in `--config` and `--config2`.\",\n )\n ##############################################################################\n parser.add_argument(\n \"--ngpu\",\n default=0,\n type=int,\n help=\"Number of GPUs. If not given, use all visible devices\",\n )\n ##############################################################################\n parser.add_argument(\n \"--train-dtype\",\n default=\"float32\",\n choices=[\"float16\", \"float32\", \"float64\", \"O0\", \"O1\", \"O2\", \"O3\"],\n help=\"Data type for training (only pytorch backend). \"\n \"O0,O1,.. flags require apex. See \\\n https://nvidia.github.io/apex/amp.html#opt-levels\",\n )\n ##############################################################################\n parser.add_argument(\n \"--backend\",\n default=\"pytorch\",\n type=str,\n choices=[\"chainer\", \"pytorch\"],\n help=\"Backend library\",\n )\n\n parser.add_argument(\"--outdir\", required=True, type=str, help=\"Output directory\")\n\n parser.add_argument(\"--debugmode\", default=1, type=int, help=\"Debugmode\")\n\n parser.add_argument(\"--dict\", required=True, type=str, help=\"Dictionary\")\n\n parser.add_argument(\"--seed\", default=1, type=int, help=\"Random seed\")\n\n parser.add_argument(\n \"--debugdir\", required=True, type=str, help=\"Output directory for debugging\"\n )\n\n parser.add_argument(\n \"--resume\",\n \"-r\",\n default=\"\",\n nargs=\"?\",\n help=\"Resume the training from snapshot\",\n )\n\n parser.add_argument(\n \"--minibatches\",\n \"-N\",\n type=int,\n default=\"0\",\n help=\"Process only N minibatches (for debug)\",\n )\n\n parser.add_argument(\"--verbose\", \"-V\", default=0, type=int, help=\"Verbose option\")\n\n parser.add_argument(\n \"--tensorboard-dir\",\n required=True,\n type=str,\n nargs=\"?\",\n help=\"Tensorboard log dir path\",\n )\n ##############################################################################\n parser.add_argument(\n \"--report-interval-iters\",\n default=100,\n type=int,\n help=\"Report interval iterations\",\n )\n ##############################################################################\n # task related\n parser.add_argument(\n \"--train-json\",\n required=True,\n type=str,\n help=\"Filename of train label data (json)\",\n )\n parser.add_argument(\n \"--valid-json\",\n required=True,\n type=str,\n help=\"Filename of validation label data (json)\",\n )\n ##############################################################################\n # network architecture\n parser.add_argument(\n \"--model-module\",\n type=str,\n default=None,\n help=\"model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)\",\n )\n # encoder\n parser.add_argument(\n \"--num-encs\", default=1, type=int, help=\"Number of encoders in the model.\"\n )\n # loss related\n parser.add_argument(\n \"--ctc_type\",\n default=\"warpctc\",\n type=str,\n choices=[\"builtin\", \"warpctc\"],\n help=\"Type of CTC implementation to calculate loss.\",\n )\n parser.add_argument(\n \"--mtlalpha\",\n default=0.5,\n type=float,\n help=\"Multitask learning coefficient, \\\n alpha: alpha*ctc_loss + (1-alpha)*att_loss \",\n )\n parser.add_argument(\n \"--lsm-type\",\n const=\"\",\n default=\"\",\n type=str,\n nargs=\"?\",\n choices=[\"\", \"unigram\"],\n help=\"Apply label smoothing with a specified distribution type\",\n )\n parser.add_argument(\n \"--lsm-weight\", default=0.0, type=float, help=\"Label smoothing weight\"\n )\n # recognition options to compute CER/WER\n parser.add_argument(\n \"--report-cer\",\n default=False,\n action=\"store_true\",\n help=\"Compute CER on development set\",\n )\n parser.add_argument(\n \"--report-wer\",\n default=False,\n action=\"store_true\",\n help=\"Compute WER on development set\",\n )\n parser.add_argument(\"--nbest\", type=int, default=1, help=\"Output N-best hypotheses\")\n parser.add_argument(\"--beam-size\", type=int, default=4, help=\"Beam size\")\n parser.add_argument(\"--penalty\", default=0.0, type=float, help=\"Incertion penalty\")\n parser.add_argument(\n \"--maxlenratio\",\n default=0.0,\n type=float,\n help=\"\"\"Input length ratio to obtain max output length.\n If maxlenratio=0.0 (default), it uses a end-detect function\n to automatically find maximum hypothesis lengths\"\"\",\n )\n parser.add_argument(\n \"--minlenratio\",\n default=0.0,\n type=float,\n help=\"Input length ratio to obtain min output length\",\n )\n parser.add_argument(\n \"--ctc-weight\", default=0.3, type=float, help=\"CTC weight in joint decoding\"\n )\n parser.add_argument(\n \"--rnnlm\", type=str, default=None, help=\"RNNLM model file to read\"\n )\n parser.add_argument(\n \"--rnnlm-conf\", type=str, default=None, help=\"RNNLM model config file to read\"\n )\n parser.add_argument(\"--lm-weight\", default=0.1, type=float, help=\"RNNLM weight.\")\n parser.add_argument(\"--sym-space\", default=\"<space>\", type=str, help=\"Space symbol\")\n parser.add_argument(\"--sym-blank\", default=\"<blank>\", type=str, help=\"Blank symbol\")\n # minibatch related\n parser.add_argument(\n \"--sortagrad\",\n default=0,\n type=int,\n nargs=\"?\",\n help=\"How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs\",\n )\n parser.add_argument(\n \"--batch-count\",\n default=\"auto\",\n choices=BATCH_COUNT_CHOICES,\n help=\"How to count batch_size. The default (auto) will find \\\n how to count by args.\",\n )\n parser.add_argument(\n \"--batch-size\",\n \"--batch-seqs\",\n \"-b\",\n default=0,\n type=int,\n help=\"Maximum seqs in a minibatch (0 to disable)\",\n )\n parser.add_argument(\n \"--batch-bins\",\n default=0,\n type=int,\n help=\"Maximum bins in a minibatch (0 to disable)\",\n )\n parser.add_argument(\n \"--batch-frames-in\",\n default=0,\n type=int,\n help=\"Maximum input frames in a minibatch (0 to disable)\",\n )\n parser.add_argument(\n \"--batch-frames-out\",\n default=0,\n type=int,\n help=\"Maximum output frames in a minibatch (0 to disable)\",\n )\n parser.add_argument(\n \"--batch-frames-inout\",\n default=0,\n type=int,\n help=\"Maximum input+output frames in a minibatch (0 to disable)\",\n )\n parser.add_argument(\n \"--maxlen-in\",\n \"--batch-seq-maxlen-in\",\n default=800,\n type=int,\n metavar=\"ML\",\n help=\"When --batch-count=seq, batch size is reduced if the input sequence \\\n length > ML.\",\n )\n parser.add_argument(\n \"--maxlen-out\",\n \"--batch-seq-maxlen-out\",\n default=150,\n type=int,\n metavar=\"ML\",\n help=\"When --batch-count=seq, batch size is reduced if the output sequence \\\n length > ML\",\n )\n parser.add_argument(\n \"--n-iter-processes\",\n default=0,\n type=int,\n help=\"Number of processes of iterator\",\n )\n parser.add_argument(\n \"--preprocess-conf\",\n type=str,\n required=True,\n nargs=\"?\",\n help=\"The configuration file for the pre-processing\",\n )\n # optimization related\n parser.add_argument(\n \"--opt\",\n default=\"adadelta\",\n type=str,\n choices=[\"adadelta\", \"adam\", \"noam\"],\n help=\"Optimizer\",\n )\n parser.add_argument(\n \"--accum-grad\", default=1, type=int, help=\"Number of gradient accumuration\"\n )\n parser.add_argument(\n \"--eps\", default=1e-8, type=float, help=\"Epsilon constant for optimizer\"\n )\n parser.add_argument(\n \"--eps-decay\", default=0.01, type=float, help=\"Decaying ratio of epsilon\"\n )\n parser.add_argument(\n \"--weight-decay\", default=0.0, type=float, help=\"Weight decay ratio\"\n )\n parser.add_argument(\n \"--criterion\",\n default=\"acc\",\n type=str,\n choices=[\"loss\", \"acc\"],\n help=\"Criterion to perform epsilon decay\",\n )\n parser.add_argument(\n \"--threshold\", default=1e-4, type=float, help=\"Threshold to stop iteration\"\n )\n parser.add_argument(\n \"--epochs\", \"-e\", default=30, type=int, help=\"Maximum number of epochs\"\n )\n parser.add_argument(\n \"--early-stop-criterion\",\n default=\"validation/main/acc\",\n type=str,\n nargs=\"?\",\n help=\"Value to monitor to trigger an early stopping of the training\",\n )\n parser.add_argument(\n \"--patience\",\n default=3,\n type=int,\n nargs=\"?\",\n help=\"Number of epochs to wait without improvement before stopping \\\n the training\",\n )\n parser.add_argument(\n \"--grad-clip\", default=5, type=float, help=\"Gradient norm threshold to clip\"\n )\n parser.add_argument(\n \"--num-save-attention\",\n default=3,\n type=int,\n help=\"Number of samples of attention to be saved\",\n )\n parser.add_argument(\n \"--grad-noise\",\n type=strtobool,\n default=False,\n help=\"The flag to switch to use noise injection to gradients during training\",\n )\n # asr_mix related\n parser.add_argument(\n \"--num-spkrs\",\n default=1,\n type=int,\n choices=[1, 2],\n help=\"Maximum number of speakers in the speech for multi-speaker \\\n speech recognition task.\",\n )\n # speech translation related\n parser.add_argument(\n \"--context-residual\",\n default=False,\n type=strtobool,\n nargs=\"?\",\n help=\"The flag to switch to use context vector residual in the decoder network\",\n )\n parser.add_argument(\n \"--replace-sos\",\n default=False,\n nargs=\"?\",\n help=\"Replace <sos> in the decoder with a target language ID \\\n (the first token in the target sequence)\",\n )\n # finetuning related\n parser.add_argument(\n \"--enc-init\",\n default=None,\n type=str,\n help=\"Pre-trained ASR model to initialize encoder.\",\n )\n parser.add_argument(\n \"--enc-init-mods\",\n default=\"enc.enc.\",\n type=lambda s: [str(mod) for mod in s.split(\",\") if s != \"\"],\n help=\"List of encoder modules to initialize, separated by a comma.\",\n )\n parser.add_argument(\n \"--dec-init\",\n default=None,\n type=str,\n help=\"Pre-trained ASR, MT or LM model to initialize decoder.\",\n )\n parser.add_argument(\n \"--dec-init-mods\",\n default=\"att., dec.\",\n type=lambda s: [str(mod) for mod in s.split(\",\") if s != \"\"],\n help=\"List of decoder modules to initialize, separated by a comma.\",\n )\n # front end related\n parser.add_argument(\n \"--use-frontend\",\n type=strtobool,\n default=False,\n help=\"The flag to switch to use frontend system.\",\n )\n\n # WPE related\n parser.add_argument(\n \"--use-wpe\",\n type=strtobool,\n default=False,\n help=\"Apply Weighted Prediction Error\",\n )\n parser.add_argument(\n \"--wtype\",\n default=\"blstmp\",\n type=str,\n choices=[\n \"lstm\",\n \"blstm\",\n \"lstmp\",\n \"blstmp\",\n \"vgglstmp\",\n \"vggblstmp\",\n \"vgglstm\",\n \"vggblstm\",\n \"gru\",\n \"bgru\",\n \"grup\",\n \"bgrup\",\n \"vgggrup\",\n \"vggbgrup\",\n \"vgggru\",\n \"vggbgru\",\n ],\n help=\"Type of encoder network architecture \"\n \"of the mask estimator for WPE. \"\n \"\",\n )\n parser.add_argument(\"--wlayers\", type=int, default=2, help=\"\")\n parser.add_argument(\"--wunits\", type=int, default=300, help=\"\")\n parser.add_argument(\"--wprojs\", type=int, default=300, help=\"\")\n parser.add_argument(\"--wdropout-rate\", type=float, default=0.0, help=\"\")\n parser.add_argument(\"--wpe-taps\", type=int, default=5, help=\"\")\n parser.add_argument(\"--wpe-delay\", type=int, default=3, help=\"\")\n parser.add_argument(\n \"--use-dnn-mask-for-wpe\",\n type=strtobool,\n default=False,\n help=\"Use DNN to estimate the power spectrogram. \"\n \"This option is experimental.\",\n )\n # Beamformer related\n parser.add_argument(\"--use-beamformer\", type=strtobool, default=True, help=\"\")\n parser.add_argument(\n \"--btype\",\n default=\"blstmp\",\n type=str,\n choices=[\n \"lstm\",\n \"blstm\",\n \"lstmp\",\n \"blstmp\",\n \"vgglstmp\",\n \"vggblstmp\",\n \"vgglstm\",\n \"vggblstm\",\n \"gru\",\n \"bgru\",\n \"grup\",\n \"bgrup\",\n \"vgggrup\",\n \"vggbgrup\",\n \"vgggru\",\n \"vggbgru\",\n ],\n help=\"Type of encoder network architecture \"\n \"of the mask estimator for Beamformer.\",\n )\n parser.add_argument(\"--blayers\", type=int, default=2, help=\"\")\n parser.add_argument(\"--bunits\", type=int, default=300, help=\"\")\n parser.add_argument(\"--bprojs\", type=int, default=300, help=\"\")\n parser.add_argument(\"--badim\", type=int, default=320, help=\"\")\n parser.add_argument(\n \"--bnmask\",\n type=int,\n default=2,\n help=\"Number of beamforming masks, \" \"default is 2 for [speech, noise].\",\n )\n parser.add_argument(\n \"--ref-channel\",\n type=int,\n default=-1,\n help=\"The reference channel used for beamformer. \"\n \"By default, the channel is estimated by DNN.\",\n )\n parser.add_argument(\"--bdropout-rate\", type=float, default=0.0, help=\"\")\n # Feature transform: Normalization\n parser.add_argument(\n \"--stats-file\",\n type=str,\n default=None,\n help=\"The stats file for the feature normalization\",\n )\n parser.add_argument(\n \"--apply-uttmvn\",\n type=strtobool,\n default=True,\n help=\"Apply utterance level mean \" \"variance normalization.\",\n )\n parser.add_argument(\"--uttmvn-norm-means\", type=strtobool, default=True, help=\"\")\n parser.add_argument(\"--uttmvn-norm-vars\", type=strtobool, default=False, help=\"\")\n # Feature transform: Fbank\n parser.add_argument(\n \"--fbank-fs\",\n type=int,\n default=16000,\n help=\"The sample frequency used for \" \"the mel-fbank creation.\",\n )\n parser.add_argument(\n \"--n-mels\", type=int, default=80, help=\"The number of mel-frequency bins.\"\n )\n parser.add_argument(\"--fbank-fmin\", type=float, default=0.0, help=\"\")\n parser.add_argument(\"--fbank-fmax\", type=float, default=None, help=\"\")\n\n return parser\n\n\ndef main(cmd_args):\n parser = get_parser()\n args, _ = parser.parse_known_args(cmd_args)\n if args.backend == \"chainer\" and args.train_dtype != \"float32\":\n raise NotImplementedError(\n f\"chainer backend does not support --train-dtype {args.train_dtype}.\"\n \"Use --dtype float32.\"\n )\n if args.ngpu == 0 and args.train_dtype in (\"O0\", \"O1\", \"O2\", \"O3\", \"float16\"):\n raise ValueError(\n f\"--train-dtype {args.train_dtype} does not support the CPU backend.\"\n )\n\n from espnet.utils.dynamic_import import dynamic_import\n\n # specify audio only, video only, hybrid mode\n\n if args.model_module is None:\n model_module = \"espnet.nets.\" + args.backend + \"_backend.e2e_asr:E2E\"\n else:\n model_module = args.model_module\n model_class = dynamic_import(model_module)\n model_class.add_arguments(parser)\n args = parser.parse_args(cmd_args)\n\n args.model_module = model_module\n if \"chainer_backend\" in args.model_module:\n args.backend = \"chainer\"\n if \"pytorch_backend\" in args.model_module:\n args.backend = \"pytorch\"\n\n # logging info\n if args.verbose > 0:\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n else:\n logging.basicConfig(\n level=logging.WARN,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n logging.warning(\"Skip DEBUG/INFO messages\")\n\n # If --ngpu is not given,\n # 1. if CUDA_VISIBLE_DEVICES is set, all visible devices\n # 2. if nvidia-smi exists, use all devices\n # 3. else ngpu=0\n if args.ngpu is None:\n cvd = os.environ.get(\"CUDA_VISIBLE_DEVICES\")\n if cvd is not None:\n ngpu = len(cvd.split(\",\"))\n else:\n logging.warning(\"CUDA_VISIBLE_DEVICES is not set.\")\n try:\n p = subprocess.run(\n [\"nvidia-smi\", \"-L\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n except (subprocess.CalledProcessError, FileNotFoundError):\n ngpu = 0\n else:\n ngpu = len(p.stderr.decode().split(\"\\n\")) - 1\n else:\n ngpu = args.ngpu\n logging.info(f\"ngpu: {ngpu}\")\n\n # display PYTHONPATH\n logging.info(\"python path = \" + os.environ.get(\"PYTHONPATH\", \"(None)\"))\n\n # set random seed\n logging.info(\"random seed = %d\" % args.seed)\n random.seed(args.seed)\n np.random.seed(args.seed)\n\n # load dictionary for debug log\n if args.dict is not None:\n with open(args.dict, \"rb\") as f:\n dictionary = f.readlines()\n char_list = [entry.decode(\"utf-8\").split(\" \")[0] for entry in dictionary]\n char_list.insert(0, \"<blank>\")\n char_list.append(\"<eos>\")\n args.char_list = char_list\n else:\n args.char_list = None\n\n # train\n logging.info(\"backend = \" + args.backend)\n\n if args.num_spkrs == 1:\n if args.backend == \"chainer\":\n from espnet.asr.chainer_backend.asr import train\n\n train(args)\n elif args.backend == \"pytorch\":\n\n from espnet.trainaudio.asr import train\n\n train(args)\n else:\n raise ValueError(\"Only chainer and pytorch are supported.\")\n else:\n # FIXME(kamo): Support --model-module\n if args.backend == \"pytorch\":\n from espnet.asr.pytorch_backend.asr_mix import train\n\n train(args)\n else:\n raise ValueError(\"Only pytorch is supported.\")\n\n\nif __name__ == \"__main__\":\n # NOTE(kan-bayashi): setting multiple times causes RuntimeError\n # See also https://github.com/pytorch/pytorch/issues/3492\n try:\n mp.set_start_method(\"spawn\")\n except RuntimeError:\n pass\n main(sys.argv[1:])\n", "# -*- coding: utf-8 -*-\n\n\"\"\"Network related utility tools.\"\"\"\n\nimport numpy as np\nimport torch\n\n\ndef to_device(m, x):\n \"\"\"Send tensor into the device of the module.\n\n Args:\n m (torch.nn.Module): Torch module.\n x (Tensor): Torch tensor.\n\n Returns:\n Tensor: Torch tensor located in the same place as torch module.\n\n \"\"\"\n assert isinstance(m, torch.nn.Module)\n device = next(m.parameters()).device\n return x.to(device)\n\n\ndef pad_list(xs, pad_value):\n \"\"\"Perform padding for the list of tensors.\n\n Args:\n xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].\n pad_value (float): Value for padding.\n\n Returns:\n Tensor: Padded tensor (B, Tmax, `*`).\n\n Examples:\n >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]\n >>> x\n [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]\n >>> pad_list(x, 0)\n tensor([[1., 1., 1., 1.],\n [1., 1., 0., 0.],\n [1., 0., 0., 0.]])\n\n \"\"\"\n n_batch = len(xs)\n max_len = max(x.size(0) for x in xs)\n pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)\n\n for i in range(n_batch):\n pad[i, : xs[i].size(0)] = xs[i]\n\n return pad\n\n\ndef make_pad_mask(lengths, xs=None, length_dim=-1):\n \"\"\"Make mask tensor containing indices of padded part.\n\n Args:\n lengths (LongTensor or List): Batch of lengths (B,).\n xs (Tensor, optional): The reference tensor. If set, masks will be\n the same shape as this tensor.\n length_dim (int, optional): Dimension indicator of the above tensor.\n See the example.\n\n Returns:\n Tensor: Mask tensor containing indices of padded part.\n\n Examples:\n With only lengths.\n\n >>> lengths = [5, 3, 2]\n >>> make_non_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n\n With the reference tensor.\n\n >>> xs = torch.zeros((3, 2, 4))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 1],\n [0, 0, 0, 1]],\n [[0, 0, 1, 1],\n [0, 0, 1, 1]]], dtype=torch.uint8)\n >>> xs = torch.zeros((3, 2, 6))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n With the reference tensor and dimension indicator.\n\n >>> xs = torch.zeros((3, 6, 6))\n >>> make_pad_mask(lengths, xs, 1)\n tensor([[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)\n >>> make_pad_mask(lengths, xs, 2)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n \"\"\"\n if length_dim == 0:\n raise ValueError(\"length_dim cannot be 0: {}\".format(length_dim))\n\n if not isinstance(lengths, list):\n lengths = lengths.tolist()\n bs = int(len(lengths))\n if xs is None:\n maxlen = int(max(lengths))\n else:\n maxlen = xs.size(length_dim)\n\n seq_range = torch.arange(0, maxlen, dtype=torch.int64)\n seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)\n seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n\n if xs is not None:\n assert xs.size(0) == bs, (xs.size(0), bs)\n\n if length_dim < 0:\n length_dim = xs.dim() + length_dim\n # ind = (:, None, ..., None, :, , None, ..., None)\n ind = tuple(\n slice(None) if i in (0, length_dim) else None for i in range(xs.dim())\n )\n mask = mask[ind].expand_as(xs).to(xs.device)\n return mask\n\n\ndef make_non_pad_mask(lengths, xs=None, length_dim=-1):\n \"\"\"Make mask tensor containing indices of non-padded part.\n\n Args:\n lengths (LongTensor or List): Batch of lengths (B,).\n xs (Tensor, optional): The reference tensor. If set, masks will be the\n same shape as this tensor.\n length_dim (int, optional): Dimension indicator of the above tensor.\n See the example.\n\n Returns:\n ByteTensor: mask tensor containing indices of padded part.\n\n Examples:\n With only lengths.\n\n >>> lengths = [5, 3, 2]\n >>> make_non_pad_mask(lengths)\n masks = [[1, 1, 1, 1 ,1],\n [1, 1, 1, 0, 0],\n [1, 1, 0, 0, 0]]\n\n With the reference tensor.\n\n >>> xs = torch.zeros((3, 2, 4))\n >>> make_non_pad_mask(lengths, xs)\n tensor([[[1, 1, 1, 1],\n [1, 1, 1, 1]],\n [[1, 1, 1, 0],\n [1, 1, 1, 0]],\n [[1, 1, 0, 0],\n [1, 1, 0, 0]]], dtype=torch.uint8)\n >>> xs = torch.zeros((3, 2, 6))\n >>> make_non_pad_mask(lengths, xs)\n tensor([[[1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0]],\n [[1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0]],\n [[1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)\n\n With the reference tensor and dimension indicator.\n\n >>> xs = torch.zeros((3, 6, 6))\n >>> make_non_pad_mask(lengths, xs, 1)\n tensor([[[1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0]],\n [[1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]],\n [[1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)\n >>> make_non_pad_mask(lengths, xs, 2)\n tensor([[[1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0]],\n [[1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0]],\n [[1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)\n\n \"\"\"\n return ~make_pad_mask(lengths, xs, length_dim)\n\n\ndef mask_by_length(xs, lengths, fill=0):\n \"\"\"Mask tensor according to length.\n\n Args:\n xs (Tensor): Batch of input tensor (B, `*`).\n lengths (LongTensor or List): Batch of lengths (B,).\n fill (int or float): Value to fill masked part.\n\n Returns:\n Tensor: Batch of masked input tensor (B, `*`).\n\n Examples:\n >>> x = torch.arange(5).repeat(3, 1) + 1\n >>> x\n tensor([[1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5]])\n >>> lengths = [5, 3, 2]\n >>> mask_by_length(x, lengths)\n tensor([[1, 2, 3, 4, 5],\n [1, 2, 3, 0, 0],\n [1, 2, 0, 0, 0]])\n\n \"\"\"\n assert xs.size(0) == len(lengths)\n ret = xs.data.new(*xs.size()).fill_(fill)\n for i, l in enumerate(lengths):\n ret[i, :l] = xs[i, :l]\n return ret\n\n\ndef th_accuracy(pad_outputs, pad_targets, ignore_label):\n \"\"\"Calculate accuracy.\n\n Args:\n pad_outputs (Tensor): Prediction tensors (B * Lmax, D).\n pad_targets (LongTensor): Target label tensors (B, Lmax, D).\n ignore_label (int): Ignore label id.\n\n Returns:\n float: Accuracy value (0.0 - 1.0).\n\n \"\"\"\n pad_pred = pad_outputs.view(\n pad_targets.size(0), pad_targets.size(1), pad_outputs.size(1)\n ).argmax(2)\n mask = pad_targets != ignore_label\n numerator = torch.sum(\n pad_pred.masked_select(mask) == pad_targets.masked_select(mask)\n )\n denominator = torch.sum(mask)\n return float(numerator) / float(denominator)\n\n\ndef to_torch_tensor(x):\n \"\"\"Change to torch.Tensor or ComplexTensor from numpy.ndarray.\n\n Args:\n x: Inputs. It should be one of numpy.ndarray, Tensor, ComplexTensor, and dict.\n\n Returns:\n Tensor or ComplexTensor: Type converted inputs.\n\n Examples:\n >>> xs = np.ones(3, dtype=np.float32)\n >>> xs = to_torch_tensor(xs)\n tensor([1., 1., 1.])\n >>> xs = torch.ones(3, 4, 5)\n >>> assert to_torch_tensor(xs) is xs\n >>> xs = {'real': xs, 'imag': xs}\n >>> to_torch_tensor(xs)\n ComplexTensor(\n Real:\n tensor([1., 1., 1.])\n Imag;\n tensor([1., 1., 1.])\n )\n\n \"\"\"\n # If numpy, change to torch tensor\n if isinstance(x, np.ndarray):\n if x.dtype.kind == \"c\":\n # Dynamically importing because torch_complex requires python3\n from torch_complex.tensor import ComplexTensor\n\n return ComplexTensor(x)\n else:\n return torch.from_numpy(x)\n\n # If {'real': ..., 'imag': ...}, convert to ComplexTensor\n elif isinstance(x, dict):\n # Dynamically importing because torch_complex requires python3\n from torch_complex.tensor import ComplexTensor\n\n if \"real\" not in x or \"imag\" not in x:\n raise ValueError(\"has 'real' and 'imag' keys: {}\".format(list(x)))\n # Relative importing because of using python3 syntax\n return ComplexTensor(x[\"real\"], x[\"imag\"])\n\n # If torch.Tensor, as it is\n elif isinstance(x, torch.Tensor):\n return x\n\n else:\n error = (\n \"x must be numpy.ndarray, torch.Tensor or a dict like \"\n \"{{'real': torch.Tensor, 'imag': torch.Tensor}}, \"\n \"but got {}\".format(type(x))\n )\n try:\n from torch_complex.tensor import ComplexTensor\n except Exception:\n # If PY2\n raise ValueError(error)\n else:\n # If PY3\n if isinstance(x, ComplexTensor):\n return x\n else:\n raise ValueError(error)\n" ]
[ [ "numpy.random.seed" ], [ "torch.sum", "torch.from_numpy", "torch.arange" ] ]
HxJi/google-research
[ "3f76493a1e194198f1d6a48e4e1c4381b2433170" ]
[ "state_of_sparsity/sparse_rn50/imagenet_train_eval.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script trains a ResNet model that implements various pruning methods.\n\nCode partially branched out from\nthird_party/cloud_tpu/models/resnet/resnet_main.py.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\nimport sys\nsys.path.append('/home/hxji1997/google-research')\n\nfrom tensorflow.python.estimator import estimator\nfrom state_of_sparsity.sparse_rn50 import imagenet_input\nfrom state_of_sparsity.sparse_rn50 import resnet_model\nfrom state_of_sparsity.sparse_rn50 import utils\nfrom tensorflow.contrib.model_pruning.python import pruning\nfrom tensorflow.contrib.tpu.python.tpu import tpu_config\nfrom tensorflow.contrib.tpu.python.tpu import tpu_estimator\nfrom tensorflow.contrib.training.python.training import evaluation\n\nflags.DEFINE_string(\n 'precision',\n default='float32',\n help=('Precision to use; one of: {bfloat16, float32}'))\nflags.DEFINE_integer('num_workers', 1, 'Number of training workers.')\nflags.DEFINE_float(\n 'base_learning_rate',\n default=0.1,\n help=('Base learning rate when train batch size is 256.'))\n\nflags.DEFINE_float(\n 'momentum',\n default=0.9,\n help=('Momentum parameter used in the MomentumOptimizer.'))\nflags.DEFINE_integer('ps_task', 0,\n 'Task id of the replica running the training.')\nflags.DEFINE_float(\n 'weight_decay',\n default=1e-4,\n help=('Weight decay coefficiant for l2 regularization.'))\nflags.DEFINE_string('master', '', 'Master job.')\nflags.DEFINE_string('tpu_job_name', None, 'For complicated TensorFlowFlock')\nflags.DEFINE_integer(\n 'steps_per_checkpoint',\n default=1000,\n help=('Controls how often checkpoints are generated. More steps per '\n 'checkpoint = higher utilization of TPU and generally higher '\n 'steps/sec'))\nflags.DEFINE_integer(\n 'keep_checkpoint_max', default=0, help=('Number of checkpoints to hold.'))\nflags.DEFINE_string(\n 'data_directory',\n None,\n 'The location of the sstable used for training.')\nflags.DEFINE_string(\n 'data_format',\n default='channels_last',\n help=('A flag to override the data format used in the model. The value'\n ' is either channels_first or channels_last. To run the network on'\n ' CPU or TPU, channels_last should be used. For GPU, channels_first'\n ' will improve performance.'))\nflags.DEFINE_bool(\n 'transpose_input',\n default=False,\n help='Use TPU double transpose optimization')\nflags.DEFINE_integer(\n 'resnet_depth',\n default=50,\n help=('Depth of ResNet model to use. Must be one of {18, 34, 50, 101, 152,'\n ' 200}. ResNet-18 and 34 use the pre-activation residual blocks'\n ' without bottleneck layers. The other models use pre-activation'\n ' bottleneck layers. Deeper models require more training time and'\n ' more memory and may require reducing --train_batch_size to prevent'\n ' running out of memory.'))\nflags.DEFINE_float('label_smoothing', 0.1,\n 'Relax confidence in the labels by (1-label_smoothing).')\nflags.DEFINE_integer(\n 'train_steps',\n default=2,\n help=('The number of steps to use for training. Default is 112590 steps'\n ' which is approximately 90 epochs at batch size 1024. This flag'\n ' should be adjusted according to the --train_batch_size flag.'))\nflags.DEFINE_integer(\n 'train_batch_size', default=1024, help='Batch size for training.')\nflags.DEFINE_integer(\n 'eval_batch_size', default=1000, help='Batch size for evaluation.')\nflags.DEFINE_integer(\n 'num_train_images', default=1281167, help='Size of training data set.')\nflags.DEFINE_integer(\n 'num_eval_images', default=50000, help='Size of evaluation data set.')\nflags.DEFINE_integer(\n 'num_label_classes', default=1000, help='Number of classes, at least 2')\nflags.DEFINE_integer(\n 'steps_per_eval',\n default=1251,\n help=('Controls how often evaluation is performed. Since evaluation is'\n ' fairly expensive, it is advised to evaluate as infrequently as'\n ' possible (i.e. up to --train_steps, which evaluates the model only'\n ' after finishing the entire training regime).'))\nflags.DEFINE_bool(\n 'use_tpu',\n default=False,\n help=('Use TPU to execute the model for training and evaluation. If'\n ' --use_tpu=false, will use whatever devices are available to'\n ' TensorFlow by default (e.g. CPU and GPU)'))\nflags.DEFINE_integer(\n 'iterations_per_loop',\n default=1251,\n help=('Number of steps to run on TPU before outfeeding metrics to the CPU.'\n ' If the number of iterations in the loop would exceed the number of'\n ' train steps, the loop will exit before reaching'\n ' --iterations_per_loop. The larger this value is, the higher the'\n ' utilization on the TPU.'))\nflags.DEFINE_integer(\n 'num_parallel_calls',\n default=64,\n help=('Number of parallel threads in CPU for the input pipeline'))\nflags.DEFINE_integer(\n 'num_cores',\n default=8,\n help=('Number of TPU cores. For a single TPU device, this is 8 because each'\n ' TPU has 4 chips each with 2 cores.'))\nflags.DEFINE_string('output_dir', '/tmp/imagenet/',\n 'Directory where to write event logs and checkpoint.')\nflags.DEFINE_integer(\n 'checkpoint_step',\n 128000,\n 'Checkpoint step to evaluate for mode=\\'eval_once\\'')\nflags.DEFINE_string(\n 'mode',\n default='train',\n help='One of {\"eval_once\", \"train_and_eval\", \"train\", \"eval\"}.')\nflags.DEFINE_integer('export_model_freq', 2502,\n 'The rate at which estimator exports the model.')\n\n# pruning flags\nflags.DEFINE_float('end_sparsity', 0.9,\n 'Target sparsity desired by end of training.')\nflags.DEFINE_integer('sparsity_begin_step', 5000, 'Step to begin pruning at.')\nflags.DEFINE_integer('sparsity_end_step', 8000, 'Step to end pruning at.')\nflags.DEFINE_integer('pruning_frequency', 2000,\n 'Step interval between pruning.')\nflags.DEFINE_enum(\n 'pruning_method', 'baseline',\n ('baseline', 'threshold', 'variational_dropout', 'l0_regularization'),\n 'Method used for pruning. baseline means no pruning is used.')\nflags.DEFINE_enum(\n 'init_method', 'baseline', ('baseline', 'sparse'),\n 'Method for initialization. If sparse and pruning_method=scratch, then'\n ' use initializers that take into account starting sparsity.')\nflags.DEFINE_float('reg_scalar', 0., 'Weight placed on variational dropout'\n 'regularizer.')\nflags.DEFINE_float('clip_log_alpha', 8.0, 'Threshold for clipping log alpha.')\nflags.DEFINE_float('log_alpha_threshold', 3.0,\n 'Threshold for thresholding log alpha during eval.')\nflags.DEFINE_bool(\n 'is_warm_up',\n default=True,\n help=('Boolean for whether to scale weight of regularizer.'))\nflags.DEFINE_float(\n 'width', -1., 'Multiplier for the number of channels in each layer.')\n# first and last layer are somewhat special. First layer has almost no\n# parameters, but 3% of the total flops. Last layer has only .05% of the total\n# flops but 10% of the total parameters. Depending on whether the goal is max\n# compression or max acceleration, pruning goals will be different.\nflags.DEFINE_bool('prune_first_layer', True,\n 'Whether or not to apply sparsification to the first layer')\nflags.DEFINE_bool('prune_last_layer', True,\n 'Whether or not to apply sparsification to the last layer')\nflags.DEFINE_float(\n 'first_layer_sparsity', -1.,\n 'Sparsity to use for the first layer. Overrides default of end_sparsity.')\nflags.DEFINE_float(\n 'last_layer_sparsity', -1.,\n 'Sparsity to use for the last layer. Overrides default of end_sparsity.')\nflags.DEFINE_string(\n 'load_mask_dir', '',\n 'Directory of a trained model from which to load only the mask')\nflags.DEFINE_string(\n 'initial_value_checkpoint', '',\n 'Directory of a model from which to load only the parameters')\n\nFLAGS = flags.FLAGS\n\n# Learning rate schedule (multiplier, epoch to start) tuples\nLR_SCHEDULE = [(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)]\n\n# The input tensor is in the range of [0, 255], we need to scale them to the\n# range of [0, 1]\nMEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]\nSTDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]\n\n# TODO(shooker): verify hyperparameter defaults once code is stabilized.\n\n\ndef lr_schedule(current_epoch):\n \"\"\"Computes learning rate schedule.\"\"\"\n scaled_lr = FLAGS.base_learning_rate * (FLAGS.train_batch_size / 256.0)\n\n decay_rate = (\n scaled_lr * LR_SCHEDULE[0][0] * current_epoch / LR_SCHEDULE[0][1])\n for mult, start_epoch in LR_SCHEDULE:\n decay_rate = tf.where(current_epoch < start_epoch, decay_rate,\n scaled_lr * mult)\n return decay_rate\n\n\ndef train_function(pruning_method, loss, output_dir, use_tpu):\n \"\"\"Training script for resnet model.\n\n Args:\n pruning_method: string indicating pruning method used to compress model.\n loss: tensor float32 of the cross entropy + regularization losses.\n output_dir: string tensor indicating the directory to save summaries.\n use_tpu: boolean indicating whether to run script on a tpu.\n\n Returns:\n host_call: summary tensors to be computed at each training step.\n train_op: the optimization term.\n \"\"\"\n\n global_step = tf.train.get_global_step()\n\n steps_per_epoch = FLAGS.num_train_images / FLAGS.train_batch_size\n current_epoch = (tf.cast(global_step, tf.float32) / steps_per_epoch)\n learning_rate = lr_schedule(current_epoch)\n optimizer = tf.train.MomentumOptimizer(\n learning_rate=learning_rate, momentum=FLAGS.momentum, use_nesterov=True)\n\n if use_tpu:\n # use CrossShardOptimizer when using TPU.\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n # UPDATE_OPS needs to be added as a dependency due to batch norm\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops), tf.name_scope('train'):\n train_op = optimizer.minimize(loss, global_step)\n\n if not use_tpu:\n if FLAGS.num_workers > 0:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer,\n replicas_to_aggregate=FLAGS.num_workers,\n total_num_replicas=FLAGS.num_workers)\n optimizer.make_session_run_hook(True)\n\n metrics = {\n 'global_step': tf.train.get_or_create_global_step(),\n 'loss': loss,\n 'learning_rate': learning_rate,\n 'current_epoch': current_epoch\n }\n\n if pruning_method == 'threshold':\n # construct the necessary hparams string from the FLAGS\n hparams_string = ('begin_pruning_step={0},'\n 'sparsity_function_begin_step={0},'\n 'end_pruning_step={1},'\n 'sparsity_function_end_step={1},'\n 'target_sparsity={2},'\n 'pruning_frequency={3},'\n 'threshold_decay=0,'\n 'use_tpu={4}'.format(\n FLAGS.sparsity_begin_step,\n FLAGS.sparsity_end_step,\n FLAGS.end_sparsity,\n FLAGS.pruning_frequency,\n FLAGS.use_tpu,\n ))\n\n # Parse pruning hyperparameters\n pruning_hparams = pruning.get_pruning_hparams().parse(hparams_string)\n\n # The first layer has so few parameters, we don't need to prune it, and\n # pruning it a higher sparsity levels has very negative effects.\n if FLAGS.prune_first_layer and FLAGS.first_layer_sparsity >= 0.:\n pruning_hparams.set_hparam(\n 'weight_sparsity_map',\n ['resnet_model/initial_conv:%f' % FLAGS.first_layer_sparsity])\n if FLAGS.prune_last_layer and FLAGS.last_layer_sparsity >= 0:\n pruning_hparams.set_hparam(\n 'weight_sparsity_map',\n ['resnet_model/final_dense:%f' % FLAGS.last_layer_sparsity])\n\n # Create a pruning object using the pruning hyperparameters\n pruning_obj = pruning.Pruning(pruning_hparams, global_step=global_step)\n\n # We override the train op to also update the mask.\n with tf.control_dependencies([train_op]):\n train_op = pruning_obj.conditional_mask_update_op()\n\n masks = pruning.get_masks()\n metrics.update(utils.mask_summaries(masks))\n elif pruning_method == 'scratch':\n masks = pruning.get_masks()\n # make sure the masks have the sparsity we expect and that it doesn't change\n metrics.update(utils.mask_summaries(masks))\n elif pruning_method == 'variational_dropout':\n masks = utils.add_vd_pruning_summaries(threshold=FLAGS.log_alpha_threshold)\n metrics.update(masks)\n elif pruning_method == 'l0_regularization':\n summaries = utils.add_l0_summaries()\n metrics.update(summaries)\n elif pruning_method == 'baseline':\n pass\n else:\n raise ValueError('Unsupported pruning method', FLAGS.pruning_method)\n\n host_call = (functools.partial(utils.host_call_fn, output_dir),\n utils.format_tensors(metrics))\n\n return host_call, train_op\n\n\ndef resnet_model_fn_w_pruning(features, labels, mode, params):\n \"\"\"The model_fn for ResNet-50 with pruning.\n\n Args:\n features: A float32 batch of images.\n labels: A int32 batch of labels.\n mode: Specifies whether training or evaluation.\n params: Dictionary of parameters passed to the model.\n\n Returns:\n A TPUEstimatorSpec for the model\n \"\"\"\n\n width = 1. if FLAGS.width <= 0 else FLAGS.width\n if isinstance(features, dict):\n features = features['feature']\n\n if FLAGS.data_format == 'channels_first':\n assert not FLAGS.transpose_input # channels_first only for GPU\n features = tf.transpose(features, [0, 3, 1, 2])\n\n if FLAGS.transpose_input and mode != tf.estimator.ModeKeys.PREDICT:\n features = tf.transpose(features, [3, 0, 1, 2]) # HWCN to NHWC\n\n # Normalize the image to zero mean and unit variance.\n features -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=features.dtype)\n features /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=features.dtype)\n\n pruning_method = params['pruning_method']\n use_tpu = params['use_tpu']\n log_alpha_threshold = params['log_alpha_threshold']\n\n def build_network():\n \"\"\"Construct the network in the graph.\"\"\"\n model_pruning_method = pruning_method\n if pruning_method == 'scratch':\n model_pruning_method = 'threshold'\n\n network = resnet_model.resnet_v1_(\n resnet_depth=FLAGS.resnet_depth,\n num_classes=FLAGS.num_label_classes,\n # we need to construct the model with the pruning masks, but they won't\n # be updated if we're doing scratch training\n pruning_method=model_pruning_method,\n init_method=FLAGS.init_method,\n width=width,\n prune_first_layer=FLAGS.prune_first_layer,\n prune_last_layer=FLAGS.prune_last_layer,\n data_format=FLAGS.data_format,\n end_sparsity=FLAGS.end_sparsity,\n clip_log_alpha=FLAGS.clip_log_alpha,\n log_alpha_threshold=log_alpha_threshold,\n weight_decay=FLAGS.weight_decay)\n return network(\n inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))\n\n if FLAGS.precision == 'bfloat16':\n with tf.contrib.tpu.bfloat16_scope():\n logits = build_network()\n logits = tf.cast(logits, tf.float32)\n elif FLAGS.precision == 'float32':\n logits = build_network()\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'classes': tf.argmax(logits, axis=1),\n 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')\n }\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n export_outputs={\n 'classify': tf.estimator.export.PredictOutput(predictions)\n })\n\n output_dir = params['output_dir'] # pylint: disable=unused-variable\n\n # Calculate loss, which includes softmax cross entropy and L2 regularization.\n one_hot_labels = tf.one_hot(labels, FLAGS.num_label_classes)\n\n # make sure we reuse the same label smoothing parameter is we're doing\n # scratch / lottery ticket experiments.\n label_smoothing = FLAGS.label_smoothing\n if FLAGS.pruning_method == 'scratch':\n label_smoothing = float(FLAGS.load_mask_dir.split('/')[15])\n loss = tf.losses.softmax_cross_entropy(\n logits=logits,\n onehot_labels=one_hot_labels,\n label_smoothing=label_smoothing)\n # Add regularization loss term\n loss += tf.losses.get_regularization_loss()\n\n if pruning_method == 'variational_dropout':\n reg_loss = utils.variational_dropout_dkl_loss(\n reg_scalar=FLAGS.reg_scalar,\n start_reg_ramp_up=FLAGS.sparsity_begin_step,\n end_reg_ramp_up=FLAGS.sparsity_end_step,\n warm_up=FLAGS.is_warm_up,\n use_tpu=use_tpu)\n loss += reg_loss\n tf.losses.add_loss(reg_loss, loss_collection=tf.GraphKeys.LOSSES)\n elif pruning_method == 'l0_regularization':\n reg_loss = utils.l0_regularization_loss(\n reg_scalar=FLAGS.reg_scalar,\n start_reg_ramp_up=FLAGS.sparsity_begin_step,\n end_reg_ramp_up=FLAGS.sparsity_end_step,\n warm_up=FLAGS.is_warm_up,\n use_tpu=use_tpu)\n loss += reg_loss\n tf.losses.add_loss(reg_loss, loss_collection=tf.GraphKeys.LOSSES)\n\n host_call = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n host_call, train_op = train_function(pruning_method, loss, output_dir,\n use_tpu)\n\n else:\n train_op = None\n\n eval_metrics = None\n if mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(labels, logits):\n \"\"\"Calculate eval metrics.\"\"\"\n logging.info('In metric function')\n eval_metrics = {}\n predictions = tf.cast(tf.argmax(logits, axis=1), tf.int32)\n in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)\n eval_metrics['top_5_eval_accuracy'] = tf.metrics.mean(in_top_5)\n eval_metrics['eval_accuracy'] = tf.metrics.accuracy(\n labels=labels, predictions=predictions)\n\n return eval_metrics\n\n def vd_metric_fn(labels, logits, global_sparsity):\n eval_metrics = metric_fn(labels, logits)\n eval_metrics['global_sparsity'] = tf.metrics.mean(global_sparsity)\n return eval_metrics\n\n tensors = [labels, logits]\n metric_function = metric_fn\n\n if FLAGS.pruning_method == 'variational_dropout':\n batch_size = labels.shape[0]\n ones = tf.ones([batch_size, 1])\n mask_metrics = utils.add_vd_pruning_summaries(\n threshold=FLAGS.log_alpha_threshold)\n tensors.append(mask_metrics['global_sparsity'] * ones)\n metric_function = vd_metric_fn\n\n eval_metrics = (metric_function, tensors)\n\n # define a custom scaffold function to enable initializing the mask from an\n # already trained checkpoint.\n def initialize_mask_from_ckpt(ckpt_path):\n \"\"\"Load mask from an existing checkpoint.\"\"\"\n model_dir = FLAGS.output_dir\n already_has_ckpt = model_dir and tf.train.latest_checkpoint(\n model_dir) is not None\n if already_has_ckpt:\n tf.logging.info(\n 'Training already started on this model, not loading masks from'\n 'previously trained model')\n return\n\n reader = tf.train.NewCheckpointReader(ckpt_path)\n mask_names = reader.get_variable_to_shape_map().keys()\n mask_names = [x for x in mask_names if x.endswith('mask')]\n\n variable_map = {}\n for var in tf.global_variables():\n var_name = var.name.split(':')[0]\n if var_name in mask_names:\n tf.logging.info('Loading mask variable from checkpoint: %s', var_name)\n variable_map[var_name] = var\n elif 'mask' in var_name:\n tf.logging.info('Cannot find mask variable in checkpoint, skipping: %s',\n var_name)\n tf.train.init_from_checkpoint(ckpt_path, variable_map)\n\n def initialize_parameters_from_ckpt(ckpt_path):\n \"\"\"Load parameters from an existing checkpoint.\"\"\"\n model_dir = FLAGS.output_dir\n already_has_ckpt = model_dir and tf.train.latest_checkpoint(\n model_dir) is not None\n if already_has_ckpt:\n tf.logging.info(\n 'Training already started on this model, not loading masks from'\n 'previously trained model')\n return\n\n reader = tf.train.NewCheckpointReader(ckpt_path)\n param_names = reader.get_variable_to_shape_map().keys()\n param_names = [x for x in param_names if not x.endswith('mask')]\n\n variable_map = {}\n for var in tf.global_variables():\n var_name = var.name.split(':')[0]\n if var_name in param_names:\n tf.logging.info('Loading parameter variable from checkpoint: %s',\n var_name)\n variable_map[var_name] = var\n elif 'mask' not in var_name:\n tf.logging.info(\n 'Cannot find parameter variable in checkpoint, skipping: %s',\n var_name)\n tf.train.init_from_checkpoint(ckpt_path, variable_map)\n\n if FLAGS.pruning_method == 'scratch':\n if FLAGS.load_mask_dir:\n\n def scaffold_fn():\n initialize_mask_from_ckpt(FLAGS.load_mask_dir)\n if FLAGS.initial_value_checkpoint:\n initialize_parameters_from_ckpt(FLAGS.initial_value_checkpoint)\n return tf.train.Scaffold()\n else:\n raise ValueError('Must supply a mask directory to use scratch method')\n else:\n scaffold_fn = None\n\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op,\n host_call=host_call,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n\n\nclass ExportModelHook(tf.train.SessionRunHook):\n \"\"\"Train hooks called after each session run for exporting the model.\"\"\"\n\n def __init__(self, classifier, export_dir):\n self.classifier = classifier\n self.global_step = None\n self.export_dir = export_dir\n self.last_export = 0\n self.supervised_input_receiver_fn = (\n tf.contrib.estimator.build_raw_supervised_input_receiver_fn({\n 'feature':\n tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3])\n }, tf.placeholder(dtype=tf.int32, shape=[None])))\n\n def begin(self):\n self.global_step = tf.train.get_or_create_global_step()\n\n def after_run(self, run_context, run_values):\n # export saved model\n global_step = run_context.session.run(self.global_step)\n\n if global_step - self.last_export >= FLAGS.export_model_freq:\n tf.logging.info(\n 'Export model for prediction (step={}) ...'.format(global_step))\n\n self.last_export = global_step\n tf.contrib.estimator.export_all_saved_models(\n self.classifier,\n os.path.join(self.export_dir, str(global_step)), {\n tf.estimator.ModeKeys.EVAL:\n self.supervised_input_receiver_fn,\n tf.estimator.ModeKeys.PREDICT:\n imagenet_input.image_serving_input_fn\n })\n\n\ndef main(_):\n\n if FLAGS.pruning_method in ['threshold']:\n folder_stub = os.path.join(FLAGS.pruning_method, str(FLAGS.end_sparsity),\n str(FLAGS.sparsity_begin_step),\n str(FLAGS.sparsity_end_step),\n str(FLAGS.pruning_frequency),\n str(FLAGS.label_smoothing))\n elif FLAGS.pruning_method == 'variational_dropout':\n folder_stub = os.path.join(FLAGS.pruning_method,\n str(FLAGS.sparsity_begin_step),\n str(FLAGS.sparsity_end_step),\n str(FLAGS.reg_scalar),\n str(FLAGS.label_smoothing))\n elif FLAGS.pruning_method == 'l0_regularization':\n folder_stub = os.path.join(FLAGS.pruning_method,\n str(FLAGS.sparsity_begin_step),\n str(FLAGS.sparsity_end_step),\n str(FLAGS.reg_scalar),\n str(FLAGS.label_smoothing))\n elif FLAGS.pruning_method == 'baseline':\n folder_stub = os.path.join(FLAGS.pruning_method, str(0.0), str(0.0),\n str(0.0), str(0.0))\n elif FLAGS.pruning_method == 'scratch':\n run_info = FLAGS.load_mask_dir.split('/')\n run_type = run_info[10]\n run_sparsity = run_info[11]\n run_begin = run_info[12]\n run_end = run_info[13]\n run_freq = run_info[14]\n run_label_smoothing = run_info[15]\n folder_stub = os.path.join(FLAGS.pruning_method, run_type, run_sparsity,\n run_begin, run_end, run_freq,\n run_label_smoothing, FLAGS.init_method)\n else:\n raise ValueError('Pruning method is not known %s' % (FLAGS.pruning_method))\n\n output_dir = os.path.join(FLAGS.output_dir, folder_stub)\n\n export_dir = os.path.join(output_dir, 'export_dir')\n\n # we pass the updated eval and train string to the params dictionary.\n params = {}\n params['output_dir'] = output_dir\n params['pruning_method'] = FLAGS.pruning_method\n params['use_tpu'] = FLAGS.use_tpu\n params['log_alpha_threshold'] = FLAGS.log_alpha_threshold\n\n imagenet_train, imagenet_eval = [\n imagenet_input.ImageNetInput( # pylint: disable=g-complex-comprehension\n is_training=is_training,\n data_dir=FLAGS.data_directory,\n transpose_input=False,\n num_parallel_calls=FLAGS.num_parallel_calls,\n use_bfloat16=False) for is_training in [True, False]\n ]\n\n run_config = tpu_config.RunConfig(\n ##master=FLAGS.master,\n master = tf.contrib.cluster_resolver.TPUClusterResolver(tpu=[os.environ['TPU_NAME']]).get_master(),\n model_dir=output_dir,\n save_checkpoints_steps=FLAGS.steps_per_checkpoint,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=False),\n tpu_config=tpu_config.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_cores,\n tpu_job_name=FLAGS.tpu_job_name))\n\n classifier = tpu_estimator.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=resnet_model_fn_w_pruning,\n params=params,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size)\n\n cpu_classifier = tpu_estimator.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=resnet_model_fn_w_pruning,\n params=params,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n export_to_tpu=False,\n eval_batch_size=FLAGS.eval_batch_size)\n\n if FLAGS.num_eval_images % FLAGS.eval_batch_size != 0:\n raise ValueError(\n 'eval_batch_size (%d) must evenly divide num_eval_images(%d)!' %\n (FLAGS.eval_batch_size, FLAGS.num_eval_images))\n\n eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size\n\n if FLAGS.mode == 'eval_once':\n ckpt = FLAGS.output_dir + 'model.ckpt-{}'.format(FLAGS.checkpoint_step)\n classifier.evaluate(\n input_fn=imagenet_eval.input_fn,\n steps=eval_steps,\n checkpoint_path=ckpt,\n name='{0}'.format(int(FLAGS.log_alpha_threshold * 10)))\n elif FLAGS.mode == 'eval':\n # Run evaluation when there's a new checkpoint\n for ckpt in evaluation.checkpoints_iterator(output_dir):\n print('Starting to evaluate.')\n try:\n classifier.evaluate(\n input_fn=imagenet_eval.input_fn,\n steps=eval_steps,\n checkpoint_path=ckpt,\n name='{0}'.format(int(FLAGS.log_alpha_threshold * 10)))\n # Terminate eval job when final checkpoint is reached\n global_step = int(os.path.basename(ckpt).split('-')[1])\n if global_step >= FLAGS.train_steps:\n print('Evaluation finished after training step %d' % global_step)\n break\n\n except tf.errors.NotFoundError:\n logging.info('Checkpoint no longer exists,skipping checkpoint.')\n\n else:\n global_step = estimator._load_global_step_from_checkpoint_dir(output_dir) # pylint: disable=protected-access,line-too-long\n # Session run hooks to export model for prediction\n export_hook = ExportModelHook(cpu_classifier, export_dir)\n hooks = [export_hook]\n\n if FLAGS.mode == 'train':\n print('start training...')\n classifier.train(\n input_fn=imagenet_train.input_fn,\n hooks=hooks,\n max_steps=FLAGS.train_steps)\n else:\n assert FLAGS.mode == 'train_and_eval'\n print('start training and eval...')\n while global_step < FLAGS.train_steps:\n next_checkpoint = min(global_step + FLAGS.steps_per_eval,\n FLAGS.train_steps)\n classifier.train(\n input_fn=imagenet_train.input_fn, max_steps=next_checkpoint)\n global_step = next_checkpoint\n logging.info('Completed training up to step :%d', global_step)\n classifier.evaluate(input_fn=imagenet_eval.input_fn, steps=eval_steps)\n\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.contrib.tpu.bfloat16_scope", "tensorflow.metrics.accuracy", "tensorflow.control_dependencies", "tensorflow.contrib.training.python.training.evaluation.checkpoints_iterator", "tensorflow.cast", "tensorflow.global_variables", "tensorflow.train.init_from_checkpoint", "tensorflow.contrib.tpu.CrossShardOptimizer", "tensorflow.contrib.model_pruning.python.pruning.get_masks", "tensorflow.where", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.get_collection", "tensorflow.estimator.export.PredictOutput", "tensorflow.train.get_global_step", "tensorflow.train.get_or_create_global_step", "tensorflow.losses.softmax_cross_entropy", "tensorflow.ConfigProto", "tensorflow.train.MomentumOptimizer", "tensorflow.name_scope", "tensorflow.nn.in_top_k", "tensorflow.argmax", "tensorflow.metrics.mean", "tensorflow.contrib.model_pruning.python.pruning.Pruning", "tensorflow.contrib.tpu.python.tpu.tpu_estimator.TPUEstimator", "tensorflow.placeholder", "tensorflow.train.NewCheckpointReader", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.contrib.tpu.python.tpu.tpu_config.TPUConfig", "tensorflow.contrib.model_pruning.python.pruning.get_pruning_hparams", "tensorflow.losses.add_loss", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.transpose", "tensorflow.train.latest_checkpoint", "tensorflow.train.Scaffold", "tensorflow.losses.get_regularization_loss", "tensorflow.ones", "tensorflow.train.SyncReplicasOptimizer", "tensorflow.python.estimator.estimator._load_global_step_from_checkpoint_dir" ] ]
Magnushhoie/mlops_MH
[ "2875ecee4a7a935f517eddda56c8bc7b424880fb" ]
[ "src/visualization/visualize.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom torch import nn, optim\nfrom torch.autograd import Variable\n\n\ndef plot_metric(value_list, value_string, dataset=\"Training\"):\n plt.figure()\n epoch_list = list(range(len(value_list)))\n sns.lineplot(x=epoch_list, y=value_list)\n plt.xlabel(\"Epochs\")\n plt.ylabel(value_string)\n plt.title(f\"{dataset} {value_string.lower()}\")\n\n\ndef test_network(net, trainloader):\n\n criterion = nn.MSELoss()\n optimizer = optim.Adam(net.parameters(), lr=0.001)\n\n dataiter = iter(trainloader)\n images, labels = dataiter.next()\n\n # Create Variables for the inputs and targets\n inputs = Variable(images)\n targets = Variable(images)\n\n # Clear the gradients from all Variables\n optimizer.zero_grad()\n\n # Forward pass, then backward pass, then update weights\n output = net.forward(inputs)\n loss = criterion(output, targets)\n loss.backward()\n optimizer.step()\n\n return True\n\n\ndef imshow(image, ax=None, title=None, normalize=True):\n \"\"\"Imshow for Tensor.\"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n image = image.numpy().transpose((1, 2, 0))\n\n if normalize:\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n image = np.clip(image, 0, 1)\n\n ax.imshow(image)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"left\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(False)\n ax.tick_params(axis=\"both\", length=0)\n ax.set_xticklabels(\"\")\n ax.set_yticklabels(\"\")\n\n return ax\n\n\ndef view_recon(img, recon):\n \"\"\"Function for displaying an image (as a PyTorch Tensor) and its\n reconstruction also a PyTorch Tensor\n \"\"\"\n\n fig, axes = plt.subplots(ncols=2, sharex=True, sharey=True)\n axes[0].imshow(img.numpy().squeeze())\n axes[1].imshow(recon.data.numpy().squeeze())\n for ax in axes:\n ax.axis(\"off\")\n ax.set_adjustable(\"box-forced\")\n\n\ndef view_classify(img, ps):\n \"\"\"Function for viewing an image and it's predicted classes.\"\"\"\n ps = ps.data.numpy().squeeze()\n\n fig, (ax1, ax2) = plt.subplots(figsize=(6, 9), ncols=2)\n ax1.imshow(img.resize_(1, 28, 28).numpy().squeeze())\n ax1.axis(\"off\")\n ax2.barh(np.arange(10), ps)\n ax2.set_aspect(0.1)\n ax2.set_yticks(np.arange(10))\n ax2.set_yticklabels(np.arange(10))\n ax2.set_title(\"Class Probability\")\n ax2.set_xlim(0, 1.1)\n\n plt.tight_layout()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "numpy.clip", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "numpy.array", "torch.nn.MSELoss", "torch.autograd.Variable" ] ]
vnmabus/scikit-datasets
[ "ef6f4f9dda7f6c929e438d186806553ba04c2809" ]
[ "tests/utils/test_scores.py" ]
[ "\"\"\"\n@author: David Diaz Vico\n@license: MIT\n\"\"\"\n\nimport numpy as np\n\nfrom skdatasets.utils.scores import scores_table, hypotheses_table\n\n\ndatasets = ['a4a', 'a8a', 'combined', 'dna', 'ijcnn1', 'letter', 'pendigits',\n 'satimage', 'shuttle', 'usps', 'w7a', 'w8a']\nestimators = ['LogisticRegression', 'MLPClassifier0', 'MLPClassifier1',\n 'MLPClassifier2', 'MLPClassifier3', 'MLPClassifier4',\n 'MLPClassifier5']\nscores = np.asarray(((89.79, 89.78, 89.76, 89.88, 89.85, 89.91, 89.93),\n (90.73, 90.73, 90.73, 90.85, 90.83, 90.81, 90.80),\n (92.36, 92.31, 94.58, 94.82, 94.84, 94.92, 94.89),\n (99.28, 99.27, 99.28, 99.26, 99.27, 99.25, 99.25),\n (91.34, 91.34, 99.29, 99.33, 99.34, 99.53, 99.54),\n (98.07, 98.04, 99.94, 99.95, 99.96, 99.96, 99.95),\n (99.17, 99.08, 99.87, 99.87, 99.88, 99.90, 99.89),\n (96.67, 96.28, 98.84, 98.87, 98.90, 98.87, 98.92),\n (95.85, 92.83, 99.88, 99.93, 99.96, 99.98, 99.99),\n (99.12, 99.11, 99.65, 99.58, 99.58, 99.65, 99.60),\n (95.93, 95.40, 94.58, 96.31, 96.34, 96.58, 96.50),\n (95.80, 95.99, 95.35, 96.20, 96.22, 96.36, 96.71)))\n\n\ndef test_scores_table():\n \"\"\"Tests scores table.\"\"\"\n scores_table(datasets, estimators, scores)\n scores_table(datasets, estimators, scores, stds=scores/10.0)\n\n\ndef test_hypotheses_table():\n \"\"\"Tests hypotheses table.\"\"\"\n for multitest in ('kruskal', 'friedmanchisquare', None):\n for test in ('mannwhitneyu', 'wilcoxon'):\n hypotheses_table(scores, estimators, multitest=multitest, test=test)\n for correction in ('bonferroni', 'sidak', 'holm-sidak', 'holm',\n 'simes-hochberg', 'hommel', 'fdr_bh', 'fdr_by',\n 'fdr_tsbh', 'fdr_tsbky'):\n hypotheses_table(scores, estimators, multitest=multitest,\n test=test, correction=correction)\n" ]
[ [ "numpy.asarray" ] ]
YiranK/pytorch-yolo2
[ "08772feb4702b886ad0ac29cbf04cb58623e502b" ]
[ "image.py" ]
[ "#!/usr/bin/python\n# encoding: utf-8\nimport random\nimport os\nfrom PIL import Image\nimport numpy as np\n\n\ndef scale_image_channel(im, c, v):\n cs = list(im.split())\n cs[c] = cs[c].point(lambda i: i * v)\n out = Image.merge(im.mode, tuple(cs))\n return out\n\ndef distort_image(im, hue, sat, val):\n im = im.convert('HSV')\n cs = list(im.split())\n cs[1] = cs[1].point(lambda i: i * sat)\n cs[2] = cs[2].point(lambda i: i * val)\n \n def change_hue(x):\n x += hue*255\n if x > 255:\n x -= 255\n if x < 0:\n x += 255\n return x\n cs[0] = cs[0].point(change_hue)\n im = Image.merge(im.mode, tuple(cs))\n\n im = im.convert('RGB')\n #constrain_image(im)\n return im\n\ndef rand_scale(s):\n scale = random.uniform(1, s)\n if(random.randint(1,10000)%2): \n return scale\n return 1./scale\n\ndef random_distort_image(im, hue, saturation, exposure):\n dhue = random.uniform(-hue, hue)\n dsat = rand_scale(saturation)\n dexp = rand_scale(exposure)\n res = distort_image(im, dhue, dsat, dexp)\n return res\n\ndef data_augmentation(img, shape, jitter, hue, saturation, exposure):\n oh = img.height \n ow = img.width\n \n dw =int(ow*jitter)\n dh =int(oh*jitter)\n\n pleft = random.randint(-dw, dw)\n pright = random.randint(-dw, dw)\n ptop = random.randint(-dh, dh)\n pbot = random.randint(-dh, dh)\n\n swidth = ow - pleft - pright\n sheight = oh - ptop - pbot\n\n sx = float(swidth) / ow\n sy = float(sheight) / oh\n \n flip = random.randint(1,10000)%2\n cropped = img.crop( (pleft, ptop, pleft + swidth - 1, ptop + sheight - 1))\n\n dx = (float(pleft)/ow)/sx\n dy = (float(ptop) /oh)/sy\n\n sized = cropped.resize(shape)\n\n if flip: \n sized = sized.transpose(Image.FLIP_LEFT_RIGHT)\n img = random_distort_image(sized, hue, saturation, exposure)\n \n return img, flip, dx,dy,sx,sy \n\ndef fill_truth_detection(labpath, w, h, flip, dx, dy, sx, sy):\n max_boxes = 50\n label = np.zeros((max_boxes,1+4+723)) # one value indicating 609 class, 4 box, 723 attribute, 20180509\n if os.path.getsize(labpath):\n bs = np.load(labpath) # for '.npy', 20180509\n # print labpath\n if bs is None:\n return label\n #bs = np.reshape(bs, (-1, 6)) # no use, 20180509\n\n cc = 0\n for i in range(50):\n if bs[i][0] != 1:\n break\n # get only one class index of bs[i], 20180510\n cls_idx = bs[i][2].nonzero()[0][0]\n\n # if i < 3:\n # print (i,' origin bbox:', bs[i][1])\n\n x_center_float = bs[i][1][0]*(1./w) #x\n y_center_float = bs[i][1][1]*(1./h) #y\n w_float = bs[i][1][2]*(1./w) #w\n h_float = bs[i][1][3]*(1./h) #h\n bbox = np.array([cls_idx,x_center_float,y_center_float,w_float,h_float])\n\n # if i < 3:\n # print (i,' class:', bs[i][2].nonzero())\n # print (i,' /wh bbox:', bs[i][1])\n # print (i,' cls concat /wh box:', bbox)\n\n # origin YOLOv2 handle PASCAL VOC label file\n # x1 = bbox[1] - bbox[3]/2\n # y1 = bbox[2] - bbox[4]/2\n # x2 = bbox[1] + bbox[3]/2\n # y2 = bbox[2] + bbox[4]/2\n\n # bbox:[x1,y1,w,h] -> [x1,y1,x2,y2], 20180511\n x1 = bbox[1]\n y1 = bbox[2]\n x2 = bbox[1] + bbox[3] # x1+w\n y2 = bbox[2] + bbox[4] # y1+h\n \n x1 = min(0.999, max(0, x1 * sx - dx)) \n y1 = min(0.999, max(0, y1 * sy - dy)) \n x2 = min(0.999, max(0, x2 * sx - dx))\n y2 = min(0.999, max(0, y2 * sy - dy))\n\n # if i < 3:\n # print(i,' (x1,y1,x2,y2):',x1,y1,x2,y2)\n \n bbox[1] = (x1 + x2)/2\n bbox[2] = (y1 + y2)/2\n bbox[3] = (x2 - x1)\n bbox[4] = (y2 - y1)\n\n # if i<3:\n # print (i,' after cropped (x,y,w,h):', bbox)\n\n if flip:\n bbox[1] = 0.999 - bbox[1]\n \n if bbox[3] < 0.001 or bbox[4] < 0.001:\n continue\n\n label[cc] = np.concatenate((bbox,bs[i][3]))\n\n # if i<3:\n # print (i,' label:',label[cc])\n\n cc += 1\n if cc >= 50:\n break\n label = np.reshape(label, (-1))\n return label\n\ndef load_data_detection(imgpath, shape, jitter, hue, saturation, exposure):\n # for vg_train.txt has the img id but not img path, modify the labpath and imgpath, 20180509\n labpath = os.path.join('/mnt/lustre/kangyiran2/zero-shot-detection/dataset/zsd_anno', imgpath + '.npy')\n imgpath = os.path.join('/mnt/lustre/kangyiran2/zero-shot-detection/dataset/imgs/VG_100K', imgpath + '.jpg')\n #labpath = imgpath.replace('images', 'labels').replace('JPEGImages', 'labels').replace('.jpg', '.npy').replace('.png','.npy')\n\n ## data augmentation\n img = Image.open(imgpath).convert('RGB')\n img,flip,dx,dy,sx,sy = data_augmentation(img, shape, jitter, hue, saturation, exposure)\n label = fill_truth_detection(labpath, img.width, img.height, flip, dx, dy, 1./sx, 1./sy)\n return img,label\n" ]
[ [ "numpy.reshape", "numpy.concatenate", "numpy.load", "numpy.array", "numpy.zeros" ] ]
Accern/accern-xyme
[ "887536144539eb93a798830f312aaebf09c4afc9" ]
[ "packages/python/accern_xyme/util.py" ]
[ "from typing import (\n Any,\n Callable,\n cast,\n Dict,\n IO,\n Iterable,\n List,\n Optional,\n Tuple,\n TypeVar,\n Union,\n)\nimport io\nimport json\nimport shutil\nimport time\nimport threading\nfrom io import BytesIO, TextIOWrapper\nimport pandas as pd\nfrom scipy import sparse\nimport torch\nfrom .types import MinimalQueueStatsResponse, QueueStatus\n\nVERBOSE = False\nFILE_UPLOAD_CHUNK_SIZE = 100 * 1024 # 100kb\nFILE_HASH_CHUNK_SIZE = FILE_UPLOAD_CHUNK_SIZE\nMAX_RETRY = 20\nRETRY_SLEEP = 5.0\n\n\nRT = TypeVar('RT')\n\n\nByteResponse = Union[pd.DataFrame, dict, IO[bytes], List[dict]]\n\n\ndef set_verbose() -> None:\n global VERBOSE\n\n import logging\n import http.client as http_client\n\n http_client.HTTPConnection.debuglevel = 1 # type: ignore\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True\n VERBOSE = True\n\n\ndef is_verbose() -> bool:\n return VERBOSE\n\n\ndef to_bool(value: Union[bool, float, int, str]) -> bool:\n value = f\"{value}\".lower()\n if value == \"true\":\n return True\n if value == \"false\":\n return False\n try:\n return bool(int(float(value)))\n except ValueError:\n pass\n raise ValueError(f\"{value} cannot be interpreted as bool\")\n\n\nMINUTE = 60.0\nHOUR = 60.0 * MINUTE\nDAY = 24.0 * HOUR\nWEEK = 7.0 * DAY\nYEAR = 365.0 * DAY\n\n\ndef get_age(cur_time: float, other_time: Optional[float]) -> str:\n if other_time is None:\n return \"never\"\n diff = cur_time - other_time\n if diff < 0.0:\n return \"soon\"\n if diff < 0.1:\n return \"now\"\n if diff < 1.0:\n return \"<1s\"\n if diff < MINUTE:\n return \"<1m\"\n if diff < HOUR:\n return f\"{diff // MINUTE:.0f}m\"\n if diff < DAY:\n return f\"{diff // HOUR:.0f}h\"\n if diff < WEEK:\n return f\"{diff // DAY:.0f}d\"\n if diff < YEAR:\n return f\"{diff // WEEK:.0f}w\"\n return f\"{diff // YEAR:.0f}y\"\n\n\ndef safe_opt_num(num: Optional[float]) -> Tuple[bool, float]:\n if num is None:\n return (False, 0.0)\n return (True, num)\n\n\ndef set_file_upload_chunk_size(size: int) -> None:\n global FILE_UPLOAD_CHUNK_SIZE\n\n FILE_UPLOAD_CHUNK_SIZE = size\n\n\ndef get_file_upload_chunk_size() -> int:\n return FILE_UPLOAD_CHUNK_SIZE\n\n\ndef set_file_hash_chunk_size(size: int) -> None:\n global FILE_HASH_CHUNK_SIZE\n\n FILE_HASH_CHUNK_SIZE = size\n\n\ndef get_file_hash_chunk_size() -> int:\n return FILE_HASH_CHUNK_SIZE\n\n\ndef get_max_retry() -> int:\n \"\"\"Returns the maximum number of retries on connection errors.\n\n Returns:\n int -- The number of times a connection tries to be established.\n \"\"\"\n return MAX_RETRY\n\n\ndef get_retry_sleep() -> float:\n return RETRY_SLEEP\n\n\ndef maybe_timestamp(timestamp: Optional[str]) -> Optional[pd.Timestamp]:\n return None if timestamp is None else pd.Timestamp(timestamp)\n\n\ndef content_to_csv_bytes(content: Union[bytes, str, pd.DataFrame]) -> BytesIO:\n bio = BytesIO()\n wrap = TextIOWrapper(bio, encoding=\"utf-8\", write_through=True)\n if isinstance(content, pd.DataFrame):\n content.to_csv(wrap, index=False)\n elif isinstance(content, bytes):\n wrap.write(content.decode(\"utf-8\"))\n else:\n wrap.write(content)\n wrap.detach()\n bio.seek(0)\n return bio\n\n\ndef df_to_csv_bytes(df: pd.DataFrame) -> BytesIO:\n bio = BytesIO()\n wrap = TextIOWrapper(bio, encoding=\"utf-8\", write_through=True)\n df.to_csv(wrap, index=False)\n wrap.detach()\n bio.seek(0)\n return bio\n\n\nMPL_SETUP = False\n\n\ndef setup_matplotlib() -> None:\n global MPL_SETUP\n\n if MPL_SETUP:\n return\n from pandas.plotting import register_matplotlib_converters\n\n register_matplotlib_converters()\n MPL_SETUP = True\n\n\nIS_JUPYTER: Optional[bool] = None\n\n\ndef is_jupyter() -> bool:\n global IS_JUPYTER\n\n if IS_JUPYTER is not None:\n return IS_JUPYTER\n\n try:\n from IPython import get_ipython\n\n IS_JUPYTER = get_ipython() is not None\n except (NameError, ModuleNotFoundError) as _:\n IS_JUPYTER = False\n return IS_JUPYTER\n\n\nHAS_GRAPH_EASY: Optional[bool] = None\n\n\ndef has_graph_easy() -> bool:\n global HAS_GRAPH_EASY\n\n if HAS_GRAPH_EASY is not None:\n return HAS_GRAPH_EASY\n\n try:\n import subprocess\n subprocess.Popen([\"graph-easy\", \"--help\"])\n HAS_GRAPH_EASY = True\n except FileNotFoundError:\n # pylint: disable=line-too-long\n print(\n \"Warning: Graph:Easy module not found. Use the \"\n \"whalebrew to install graph-easy. \\n\"\n \"https://stackoverflow.com/questions/3211801/graphviz-and-ascii-output/55403011#55403011\") # nopep8, line too long\n HAS_GRAPH_EASY = False\n return HAS_GRAPH_EASY\n\n\nHAS_DVC: Optional[bool] = None\n\n\ndef has_dvc() -> bool:\n global HAS_DVC\n\n if HAS_DVC is not None:\n return HAS_DVC\n try:\n # pylint: disable=unused-import\n import dvc.api\n HAS_DVC = True\n except (NameError, ModuleNotFoundError) as _:\n HAS_DVC = False\n return HAS_DVC\n\n\ndef get_progress_bar(out: Optional[IO[Any]]) -> Callable[[float, bool], None]:\n # pylint: disable=unused-argument\n\n def no_bar(progress: float, final: bool) -> None:\n return\n\n if out is None:\n return no_bar\n\n io_out: IO[Any] = out\n\n if is_jupyter():\n from IPython.display import ProgressBar\n\n mul = 1000\n bar = ProgressBar(mul)\n bar.display()\n\n def jupyter_bar(progress: float, final: bool) -> None:\n bar.progress = int(progress * mul)\n end = \"\\n\" if final else \"\\r\"\n io_out.write(f\"{progress * 100.0:.2f}%{end}\")\n\n return jupyter_bar\n\n cols, _ = shutil.get_terminal_size((80, 20))\n max_len = len(\" 100.00%\")\n border = \"|\"\n\n def stdout_bar(progress: float, final: bool) -> None:\n pstr = f\" {progress * 100.0:.2f}%\"\n cur_len = len(pstr)\n if cur_len < max_len:\n pstr = f\"{' ' * (max_len - cur_len)}{pstr}\"\n end = \"\\n\" if final else \"\\r\"\n full_len = len(border) * 2 + len(pstr) + len(end)\n bar = \"█\" * int(progress * (cols - full_len))\n mid = ' ' * max(0, cols - full_len - len(bar))\n io_out.write(f\"{border}{bar}{mid}{border}{pstr}{end}\")\n\n return stdout_bar\n\n\ndef get_file_hash(buff: IO[bytes]) -> str:\n \"\"\"Return sha224 hash of data files\n\n Args:\n buff (IO[bytes]): Data used to generate the hash.\n\n Returns:\n str: A sha224 hashed string.\n \"\"\"\n import hashlib\n\n sha = hashlib.sha224()\n chunk_size = FILE_HASH_CHUNK_SIZE\n init_pos = buff.seek(0, io.SEEK_CUR)\n while True:\n chunk = buff.read(chunk_size)\n if not chunk:\n break\n sha.update(chunk)\n buff.seek(init_pos, io.SEEK_SET)\n return sha.hexdigest()\n\n\ndef interpret_ctype(data: IO[bytes], ctype: str) -> ByteResponse:\n if ctype == \"application/json\":\n return json.load(data)\n if ctype == \"application/problem+json\":\n res = json.load(data)\n raise ServerSideError(res[\"errMessage\"])\n if ctype == \"application/parquet\":\n return pd.read_parquet(data)\n if ctype == \"application/torch\":\n return torch.load(data)\n if ctype == \"application/npz\":\n return sparse.load_npz(data)\n if ctype == \"application/jsonl\":\n return [\n json.load(BytesIO(line))\n for line in data\n ]\n content = BytesIO(data.read())\n if ctype == \"application/octet-stream\":\n return content\n # NOTE: try best guess...\n try:\n return pd.read_parquet(content)\n except OSError:\n pass\n content.seek(0)\n try:\n return json.load(content)\n except json.decoder.JSONDecodeError:\n pass\n except UnicodeDecodeError:\n pass\n content.seek(0)\n try:\n return [\n json.load(BytesIO(line))\n for line in content\n ]\n except json.decoder.JSONDecodeError:\n pass\n except UnicodeDecodeError:\n pass\n content.seek(0)\n return content\n\n\ndef merge_ctype(datas: List[ByteResponse], ctype: str) -> ByteResponse:\n if ctype == \"application/json\":\n return cast(ByteResponse, datas)\n if ctype == \"application/parquet\":\n return pd.concat(datas)\n if ctype == \"application/torch\":\n return torch.cat(datas, dim=0) # pylint: disable=no-member\n if ctype == \"application/npz\":\n return sparse.vstack(datas)\n if ctype == \"application/jsonl\":\n return [\n cast(Any, obj)\n for arr in datas\n for obj in arr\n ]\n return cast(ByteResponse, datas)\n\n\ndef async_compute(\n arr: List[Any],\n start: Callable[[List[Any]], List[RT]],\n get: Callable[[RT], ByteResponse],\n check_queue: Callable[[], MinimalQueueStatsResponse],\n get_status: Callable[[List[RT]], Dict[RT, QueueStatus]],\n max_buff: int,\n block_size: int,\n num_threads: int) -> Iterable[ByteResponse]:\n assert max_buff > 0\n assert block_size > 0\n assert num_threads > 0\n arr = list(arr)\n done: List[bool] = [False]\n end_produce: List[bool] = [False]\n exc: List[Optional[BaseException]] = [None]\n cond = threading.Condition()\n ids: Dict[RT, int] = {}\n res: Dict[int, ByteResponse] = {}\n min_size_th = 20\n main_threads = 3\n\n def get_waiting_count(remote_queue: MinimalQueueStatsResponse) -> int:\n return remote_queue[\"total\"] - remote_queue[\"active\"]\n\n def can_push_more() -> bool:\n if exc[0] is not None:\n return True\n if len(ids) < max_buff:\n return True\n try:\n waiting_count = get_waiting_count(check_queue())\n except BaseException as e: # pylint: disable=broad-except\n if exc[0] is None:\n exc[0] = e\n return True\n return waiting_count < max_buff\n\n def push(cur: List[Any], start_pos: int) -> None:\n if len(cur) <= min_size_th * block_size:\n try:\n for block_ix in range(0, len(cur), block_size):\n ids.update({\n cur_id: cur_ix + start_pos + block_ix\n for (cur_ix, cur_id) in enumerate(\n start(cur[block_ix:block_ix + block_size]))\n })\n except BaseException as e: # pylint: disable=broad-except\n if exc[0] is None:\n exc[0] = e\n else:\n half_ix: int = len(cur) // 2\n args = (cur[half_ix:], start_pos + half_ix)\n push_th = threading.Thread(target=push, args=args)\n push_th.start()\n push(cur[:half_ix], start_pos)\n push_th.join()\n\n def produce() -> None:\n try:\n pos = 0\n while pos < len(arr):\n with cond:\n while not cond.wait_for(can_push_more, timeout=0.1):\n pass\n if exc[0] is not None:\n break\n start_pos = pos\n try:\n remote_queue = check_queue()\n except BaseException as e: # pylint: disable=broad-except\n if exc[0] is None:\n exc[0] = e\n continue\n waiting_count = get_waiting_count(remote_queue)\n add_more = max(\n max_buff - len(ids),\n max_buff - waiting_count)\n if add_more > 0:\n cur = arr[pos:pos + add_more]\n pos += len(cur)\n push(cur, start_pos)\n with cond:\n cond.notify_all()\n finally:\n end_produce[0] = True\n with cond:\n cond.notify_all()\n\n def get_one(t_ix: int, t_id: RT) -> None:\n try:\n res[t_ix] = get(t_id)\n except KeyError:\n pass\n except BaseException as e: # pylint: disable=broad-except\n if exc[0] is None:\n exc[0] = e\n\n def consume() -> None:\n while not done[0]:\n with cond:\n while not cond.wait_for(\n lambda: exc[0] is not None or done[0] or len(ids) > 0,\n timeout=0.1):\n pass\n do_wait = False\n while ids:\n do_wait = True\n sorted_ids = sorted(ids.items(), key=lambda v: v[1])\n lookahead = main_threads * num_threads\n check_ids = [v[0] for v in sorted_ids[0:lookahead]]\n if not check_ids:\n continue\n status = get_status(check_ids)\n ths: List[threading.Thread] = []\n for (t_id, t_status) in status.items():\n if t_status in (\"waiting\", \"running\"):\n continue\n do_wait = False\n try:\n t_ix = ids.pop(t_id)\n args = (t_ix, t_id)\n r_th = threading.Thread(target=get_one, args=args)\n r_th.start()\n ths.append(r_th)\n except KeyError:\n pass\n for r_th in ths:\n r_th.join()\n if do_wait:\n time.sleep(0.1)\n else:\n with cond:\n cond.notify_all()\n\n try:\n prod_th = threading.Thread(target=produce)\n prod_th.start()\n consume_ths = [\n threading.Thread(target=consume)\n for _ in range(main_threads)\n ]\n for th in consume_ths:\n th.start()\n with cond:\n cond.notify_all()\n yield_ix = 0\n while yield_ix < len(arr):\n with cond:\n while not cond.wait_for(\n lambda: exc[0] is not None or bool(res), timeout=0.1):\n pass\n if exc[0] is not None:\n break\n try:\n while res:\n yield res.pop(yield_ix)\n yield_ix += 1\n except KeyError:\n pass\n if exc[0] is not None:\n with cond:\n cond.wait_for(lambda: end_produce[0])\n finally:\n done[0] = True\n with cond:\n cond.notify_all()\n prod_th.join()\n for th in consume_ths:\n th.join()\n raise_e = exc[0]\n if isinstance(raise_e, BaseException):\n raise raise_e # pylint: disable=raising-bad-type\n\n\nclass ServerSideError(Exception):\n def __init__(self, message: str) -> None:\n self._message = message\n super().__init__(self._message)\n\n def __str__(self) -> str:\n return f\"Error from xyme backend: \\n{self._message}\"\n\n\ndef escape_str(value: str) -> str:\n return value.encode().decode(\"unicode-escape\").strip('\"\"')\n\n\ndef report_json_error(err: json.JSONDecodeError) -> None:\n print(f\"JSON parse error ({err.lineno}:{err.colno}): {repr(err.doc)}\")\n\n\ndef json_loads(value: str) -> Any:\n try:\n return json.loads(escape_str(value))\n except json.JSONDecodeError as e:\n report_json_error(e)\n raise e\n\n\ndef maybe_json_loads(value: str) -> Any:\n try:\n return json.loads(escape_str(value))\n except json.JSONDecodeError:\n return None\n" ]
[ [ "pandas.concat", "torch.load", "torch.cat", "scipy.sparse.load_npz", "pandas.read_parquet", "pandas.plotting.register_matplotlib_converters", "scipy.sparse.vstack", "pandas.Timestamp" ] ]
HazyResearch/torchhalp
[ "58dbfc5bd2997660ded3ea7a27f6df686d251a66" ]
[ "examples/regression/utils.py" ]
[ "import torch\nimport torch.utils.data as data\n\nclass SynthDataset(data.Dataset):\n def __init__(self, data, labels):\n self.data = data\n self.labels = labels\n\n def __len__(self):\n return len(self.labels)\n\n def __getitem__(self, idx):\n return self.data[idx], self.labels[idx]\n\ndef build_model(input_dim, output_dim=1, initial_value=None):\n model = torch.nn.Sequential()\n module = torch.nn.Linear(input_dim, output_dim, bias=False)\n if initial_value is not None:\n module.weight.data = torch.from_numpy(initial_value).type(torch.FloatTensor)\n model.add_module(\"linear\", module)\n else:\n model.add_module(\"linear\", torch.nn.Linear(input_dim, output_dim, bias=False))\n return model" ]
[ [ "torch.nn.Linear", "torch.nn.Sequential", "torch.from_numpy" ] ]
MollsAndHersh/AdaptiveCards
[ "f43104dd7bf4d4451cb4b7cb76dc4911feb84dc5" ]
[ "source/pic2card/mystique/utils.py" ]
[ "import time\nimport io\nimport re\nfrom typing import Optional, Dict\nimport glob\nimport xml.etree.ElementTree as Et\nfrom contextlib import contextmanager\nfrom importlib import import_module\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nfrom mystique import config\n\n# Colro map used for the plotting.\nCOLORS = [\n [0.000, 0.447, 0.888],\n [0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],\n [0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]\n]\n\n\n@contextmanager\ndef timeit(name=\"code-block\"):\n \"\"\"\n Execute the codeblock and measure the time.\n\n >> with timeit('name') as f:\n >> # Your code block\n \"\"\"\n try:\n start = time.time()\n yield\n finally:\n # Execution is over.\n end = time.time() - start\n print(f\"Execution block: {name} finishes in : {end} sec.\")\n\n\ndef xml_to_csv(labelmg_dir: str) -> pd.DataFrame:\n \"\"\"\n Maps the xml labels of each object\n to the image file\n\n @param labelmg_dir: Folder with labelmg exported image and tags.\n\n @return: xml dataframe\n \"\"\"\n xml_list = []\n for xml_file in glob.glob(labelmg_dir + \"/*.xml\"):\n tree = Et.parse(xml_file)\n root = tree.getroot()\n for member in root.findall(\"object\"):\n value = (root.find(\"filename\").text,\n int(root.find(\"size\")[0].text),\n int(root.find(\"size\")[1].text),\n member[0].text,\n int(member[4][0].text),\n int(member[4][1].text),\n int(member[4][2].text),\n int(member[4][3].text)\n )\n xml_list.append(value)\n column_name = [\"filename\", \"width\", \"height\", \"class\", \"xmin\",\n \"ymin\", \"xmax\", \"ymax\"]\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n\n return xml_df\n\n\ndef id_to_label(label_id: int) -> Optional[str]:\n return config.ID_TO_LABEL.get(label_id)\n\n\ndef plot_results(pil_img: Image,\n classes: np.array,\n scores: np.array,\n boxes: np.array,\n label_map: Dict = None,\n score_threshold=0.8) -> io.BytesIO:\n \"\"\"\n Generic bounding box plotting, inspired from detr implementation.\n\n Returns binary representation of the image with bounding box drawn, Use\n `Image.open` to render the image.\n \"\"\"\n label_map = label_map or config.ID_TO_LABEL\n plt.imshow(pil_img)\n plt.margins(0, 0)\n plt.axis('off')\n ax = plt.gca()\n\n keep = scores >= score_threshold\n scores = scores[keep]\n boxes = boxes[keep]\n classes = classes[keep]\n\n for cl_id, score, (xmin, ymin, xmax, ymax), c in zip(classes,\n scores,\n boxes.tolist(),\n COLORS * 100):\n\n ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,\n fill=False, color=c, linewidth=1))\n text = f'{label_map[cl_id]}: {score:0.2f}'\n ax.text(xmin, ymin, text, fontsize=8,\n bbox=dict(facecolor='yellow', alpha=0.5))\n\n img_buf = io.BytesIO()\n plt.savefig(img_buf, format=\"png\", bbox_inches='tight', pad_inches=0)\n img_buf.seek(0)\n plt.close()\n return img_buf\n\n\ndef load_od_instance():\n \"\"\"\n Load the object detection instance from class_path\n \"\"\"\n class_path = config.MODEL_REGISTRY[config.ACTIVE_MODEL_NAME]\n p_split = class_path.split(\".\")\n module_path, class_name = \".\".join(p_split[:-1]), p_split[-1]\n module = import_module(module_path)\n od_obj = getattr(module, class_name)()\n return od_obj\n\n\ndef text_size_processing(text, height):\n \"\"\"\n Reduces the extra pixels to normalize the height of text boxes\n @param text: input extraced text from pytesseract\n @param height: input height of the extracted text\n @return: height int\n \"\"\"\n extra_pixel_char = r\"y|g|j|p|q\"\n match = re.search(extra_pixel_char, text)\n if (match or text[0].isupper()):\n height -= 2\n return height\n" ]
[ [ "matplotlib.pyplot.Rectangle", "matplotlib.pyplot.gca", "matplotlib.pyplot.imshow", "matplotlib.pyplot.margins", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.axis" ] ]
3sky/mlflow-example
[ "9ff864ad68f0e65dee494d1d73a6b3923a399cfc" ]
[ "train.py" ]
[ "# The data set used in this example is from http://archive.ics.uci.edu/ml/datasets/Wine+Quality\n# P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis.\n# Modeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009.\n\nimport os\nimport warnings\nimport sys\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import ElasticNet\n\nimport mlflow\nimport mlflow.sklearn\n\n\ndef eval_metrics(actual, pred):\n rmse = np.sqrt(mean_squared_error(actual, pred))\n mae = mean_absolute_error(actual, pred)\n r2 = r2_score(actual, pred)\n return rmse, mae, r2\n\n\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\")\n np.random.seed(40)\n\n # Read the wine-quality csv file (make sure you're running this from the root of MLflow!)\n wine_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"skoda.csv\")\n data = pd.read_csv(wine_path, usecols = ['year','price', 'mileage', 'tax', 'mpg', 'engineSize'])\n\n # Split the data into training and test sets. (0.75, 0.25) split.\n train, test = train_test_split(data)\n\n # The predicted column is \"price\" which is a scalar from [7000, 30000]\n train_x = train.drop([\"price\"], axis=1)\n test_x = test.drop([\"price\"], axis=1)\n train_y = train[[\"price\"]]\n test_y = test[[\"price\"]]\n\n alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.5\n l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.5\n\n with mlflow.start_run():\n lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)\n lr.fit(train_x, train_y)\n\n predicted_qualities = lr.predict(test_x)\n\n (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)\n\n print(\"Elasticnet model (alpha=%f, l1_ratio=%f):\" % (alpha, l1_ratio))\n print(\" RMSE: %s\" % rmse)\n print(\" MAE: %s\" % mae)\n print(\" R2: %s\" % r2)\n\n mlflow.log_param(\"alpha\", alpha)\n mlflow.log_param(\"l1_ratio\", l1_ratio)\n mlflow.log_metric(\"rmse\", rmse)\n mlflow.log_metric(\"r2\", r2)\n mlflow.log_metric(\"mae\", mae)\n\n mlflow.sklearn.log_model(lr, \"model\")\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.r2_score", "numpy.random.seed", "sklearn.linear_model.ElasticNet", "sklearn.metrics.mean_absolute_error", "sklearn.model_selection.train_test_split", "sklearn.metrics.mean_squared_error" ] ]
eli-osherovich/datasets
[ "4da1816088ea9e72b5761efc2534a4d032a2a438" ]
[ "tensorflow_datasets/core/features/class_label_feature.py" ]
[ "# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"ClassLabel feature.\"\"\"\n\nimport os\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_datasets.core.features import tensor_feature\nfrom tensorflow_datasets.core.utils import type_utils\n\nJson = type_utils.Json\n\n\nclass ClassLabel(tensor_feature.Tensor):\n \"\"\"`FeatureConnector` for integer class labels.\"\"\"\n\n def __init__(self, *, num_classes=None, names=None, names_file=None):\n \"\"\"Constructs a ClassLabel FeatureConnector.\n\n There are 3 ways to define a ClassLabel, which correspond to the 3\n arguments:\n\n * `num_classes`: create 0 to (num_classes-1) labels\n * `names`: a list of label strings\n * `names_file`: a file containing the list of labels.\n\n Note: On python2, the strings are encoded as utf-8.\n\n Args:\n num_classes: `int`, number of classes. All labels must be < num_classes.\n names: `list<str>`, string names for the integer classes. The order in\n which the names are provided is kept.\n names_file: `str`, path to a file with names for the integer classes, one\n per line.\n \"\"\"\n super(ClassLabel, self).__init__(shape=(), dtype=tf.int64)\n\n self._num_classes = None\n self._str2int = None\n self._int2str = None\n\n # The label is explicitly set as undefined (no label defined)\n if all(a is None for a in (num_classes, names, names_file)):\n return\n\n if sum(a is not None for a in (num_classes, names, names_file)) != 1:\n raise ValueError(\n \"Only a single argument of ClassLabel() should be provided.\")\n\n if num_classes is not None:\n self._num_classes = num_classes\n elif names is not None:\n self.names = names\n else:\n self.names = _load_names_from_file(names_file)\n\n @property\n def num_classes(self):\n return self._num_classes\n\n @property\n def names(self):\n if not self._int2str:\n return [tf.compat.as_text(str(i)) for i in range(self._num_classes)]\n return list(self._int2str)\n\n @names.setter\n def names(self, new_names):\n int2str = [tf.compat.as_text(name) for name in new_names]\n # Names can only be defined once\n if self._int2str is not None and self._int2str != int2str:\n raise ValueError(\n \"Trying to overwrite already defined ClassLabel names. Previous: {} \"\n \", new: {}\".format(self._int2str, int2str))\n\n # Set-up [new] names\n self._int2str = int2str\n self._str2int = {name: i for i, name in enumerate(self._int2str)}\n if len(self._int2str) != len(self._str2int):\n raise ValueError(\n \"Some label names are duplicated. Each label name should be unique.\")\n\n # If num_classes has been defined, ensure that num_classes and names match\n num_classes = len(self._str2int)\n if self._num_classes is None:\n self._num_classes = num_classes\n elif self._num_classes != num_classes:\n raise ValueError(\n \"ClassLabel number of names do not match the defined num_classes. \"\n \"Got {} names VS {} num_classes\".format(num_classes,\n self._num_classes))\n\n def str2int(self, str_value):\n \"\"\"Conversion class name string => integer.\"\"\"\n str_value = tf.compat.as_text(str_value)\n if self._str2int:\n return self._str2int[str_value]\n\n # No names provided, try to integerize\n failed_parse = False\n try:\n int_value = int(str_value)\n except ValueError:\n failed_parse = True\n if failed_parse or not 0 <= int_value < self._num_classes:\n raise ValueError(\"Invalid string class label %s\" % str_value)\n return int_value\n\n def int2str(self, int_value):\n \"\"\"Conversion integer => class name string.\"\"\"\n if self._int2str:\n # Maybe should support batched np array/eager tensors, to allow things\n # like\n # out_ids = model(inputs)\n # labels = cifar10.info.features['label'].int2str(out_ids)\n return self._int2str[int_value]\n\n # No names provided, return str(int)\n if not 0 <= int_value < self._num_classes:\n raise ValueError(\"Invalid integer class label %d\" % int_value)\n return tf.compat.as_text(str(int_value))\n\n def encode_example(self, example_data):\n if self._num_classes is None:\n raise ValueError(\n \"Trying to use ClassLabel feature with undefined number of class. \"\n \"Please set ClassLabel.names or num_classes.\")\n\n # If a string is given, convert to associated integer\n if isinstance(example_data, str):\n example_data = self.str2int(example_data)\n elif isinstance(example_data, bytes):\n # Accept bytes if user yield `tensor.numpy()`\n # Python 3 doesn't interpret byte strings as strings directly.\n example_data = self.str2int(example_data.decode(\"utf-8\"))\n\n # Allowing -1 to mean no label.\n if not -1 <= example_data < self._num_classes:\n raise ValueError(\"Class label %d greater than configured num_classes %d\" %\n (example_data, self._num_classes))\n return example_data\n\n def save_metadata(self, data_dir, feature_name=None):\n \"\"\"See base class for details.\"\"\"\n # Save names if defined\n if self._str2int is not None:\n names_filepath = _get_names_filepath(data_dir, feature_name)\n _write_names_to_file(names_filepath, self.names)\n\n def load_metadata(self, data_dir, feature_name=None):\n \"\"\"See base class for details.\"\"\"\n # Restore names if defined\n names_filepath = _get_names_filepath(data_dir, feature_name)\n if tf.io.gfile.exists(names_filepath):\n self.names = _load_names_from_file(names_filepath)\n\n def _additional_repr_info(self):\n return {\"num_classes\": self.num_classes}\n\n def repr_html(self, ex: int) -> str:\n \"\"\"Class labels are displayed with their name.\"\"\"\n if ex == -1:\n return \"-\"\n elif not self._int2str:\n return str(ex)\n else:\n return f\"{ex} ({self.int2str(ex)})\"\n\n @classmethod\n def from_json_content(cls, value: Json) -> \"ClassLabel\":\n return cls(**value)\n\n def to_json_content(self) -> Json:\n return {\"num_classes\": self.num_classes}\n\n\ndef _get_names_filepath(data_dir, feature_name):\n return os.path.join(data_dir, \"{}.labels.txt\".format(feature_name))\n\n\ndef _load_names_from_file(names_filepath):\n names_filepath = os.fspath(names_filepath)\n with tf.io.gfile.GFile(names_filepath, \"r\") as f:\n return [\n name.strip()\n for name in tf.compat.as_text(f.read()).split(\"\\n\")\n if name.strip() # Filter empty names\n ]\n\n\ndef _write_names_to_file(names_filepath, names):\n with tf.io.gfile.GFile(names_filepath, \"w\") as f:\n f.write(\"\\n\".join(names) + \"\\n\")\n" ]
[ [ "tensorflow.compat.v2.io.gfile.GFile", "tensorflow.compat.v2.io.gfile.exists", "tensorflow.compat.v2.compat.as_text" ] ]
MichelBartels/haystack
[ "b63669d1bc60b6c773b8b89d631afdd0ebbf4c4c" ]
[ "haystack/modeling/model/adaptive_model.py" ]
[ "import copy\nimport json\nimport logging\nimport multiprocessing\nimport os\nfrom pathlib import Path\nfrom typing import Iterable, Dict, Union, List, Optional, Callable\n\nimport numpy\nimport torch\nfrom torch import nn\nfrom transformers import AutoConfig\nfrom transformers.convert_graph_to_onnx import convert, quantize as quantize_model\n\nimport haystack.modeling.conversion.transformers as conv\nfrom haystack.modeling.data_handler.processor import Processor\nfrom haystack.modeling.model.language_model import LanguageModel\nfrom haystack.modeling.model.prediction_head import PredictionHead\nfrom haystack.modeling.logger import MLFlowLogger as MlLogger\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseAdaptiveModel:\n \"\"\"\n Base Class for implementing AdaptiveModel with frameworks like PyTorch and ONNX.\n \"\"\"\n language_model: LanguageModel\n subclasses = {} # type: Dict\n\n def __init_subclass__(cls, **kwargs):\n \"\"\" \n This automatically keeps track of all available subclasses.\n Enables generic load() for all specific AdaptiveModel implementation.\n \"\"\"\n super().__init_subclass__(**kwargs)\n cls.subclasses[cls.__name__] = cls\n\n def __init__(self, prediction_heads: Union[List[PredictionHead], nn.ModuleList]):\n self.prediction_heads = prediction_heads\n\n @classmethod\n def load(cls, **kwargs):\n \"\"\"\n Load corresponding AdaptiveModel Class(AdaptiveModel/ONNXAdaptiveModel) based on the\n files in the load_dir.\n\n :param kwargs: Arguments to pass for loading the model.\n :return: Instance of a model.\n \"\"\"\n if (Path(kwargs[\"load_dir\"]) / \"model.onnx\").is_file():\n model = cls.subclasses[\"ONNXAdaptiveModel\"].load(**kwargs)\n else:\n model = cls.subclasses[\"AdaptiveModel\"].load(**kwargs)\n return model\n\n def logits_to_preds(self, logits: torch.Tensor, **kwargs):\n \"\"\"\n Get predictions from all prediction heads.\n\n :param logits: Logits that can vary in shape and type, depending on task.\n :return: A list of all predictions from all prediction heads.\n \"\"\"\n all_preds = []\n # collect preds from all heads\n for head, logits_for_head in zip(self.prediction_heads, logits):\n preds = head.logits_to_preds(logits=logits_for_head, **kwargs)\n all_preds.append(preds)\n return all_preds\n\n def formatted_preds(self, logits: torch.Tensor, **kwargs):\n \"\"\"\n Format predictions for inference.\n\n :param logits: Model logits.\n :return: Predictions in the right format.\n \"\"\"\n n_heads = len(self.prediction_heads)\n\n if n_heads == 0:\n # just return LM output (e.g. useful for extracting embeddings at inference time)\n preds_final = self.language_model.formatted_preds(logits=logits, **kwargs)\n\n elif n_heads == 1:\n preds_final = []\n # This try catch is to deal with the fact that sometimes we collect preds before passing it to\n # formatted_preds (see Inferencer._get_predictions_and_aggregate()) and sometimes we don't\n # (see Inferencer._get_predictions())\n try:\n preds = kwargs[\"preds\"]\n temp = [y[0] for y in preds]\n preds_flat = [item for sublist in temp for item in sublist]\n kwargs[\"preds\"] = preds_flat\n except KeyError:\n kwargs[\"preds\"] = None\n head = self.prediction_heads[0]\n logits_for_head = logits[0]\n preds = head.formatted_preds(logits=logits_for_head, **kwargs)\n # TODO This is very messy - we need better definition of what the output should look like\n if type(preds) == list:\n preds_final += preds\n elif type(preds) == dict and \"predictions\" in preds:\n preds_final.append(preds)\n\n return preds_final\n\n def connect_heads_with_processor(self, tasks: Dict, require_labels: bool = True):\n \"\"\"\n Populates prediction head with information coming from tasks.\n\n :param tasks: A dictionary where the keys are the names of the tasks and\n the values are the details of the task (e.g. label_list, metric,\n tensor name).\n :param require_labels: If True, an error will be thrown when a task is\n not supplied with labels.\n :return: None\n \"\"\"\n for head in self.prediction_heads:\n head.label_tensor_name = tasks[head.task_name][\"label_tensor_name\"]\n label_list = tasks[head.task_name][\"label_list\"]\n if not label_list and require_labels:\n raise Exception(f\"The task \\'{head.task_name}\\' is missing a valid set of labels\")\n label_list = tasks[head.task_name][\"label_list\"]\n head.label_list = label_list\n head.metric = tasks[head.task_name][\"metric\"]\n\n @classmethod\n def _get_prediction_head_files(cls, load_dir: Union[str, Path], strict: bool = True):\n load_dir = Path(load_dir)\n files = os.listdir(load_dir)\n model_files = [\n load_dir / f\n for f in files\n if \".bin\" in f and \"prediction_head\" in f\n ]\n config_files = [\n load_dir / f\n for f in files\n if \"config.json\" in f and \"prediction_head\" in f\n ]\n # sort them to get correct order in case of multiple prediction heads\n model_files.sort()\n config_files.sort()\n\n if strict:\n error_str = (\n f\"There is a mismatch in number of model files ({len(model_files)}) and config files ({len(config_files)}).\"\n \"This might be because the Language Model Prediction Head \"\n \"does not currently support saving and loading\"\n )\n assert len(model_files) == len(config_files), error_str\n logger.info(f\"Found files for loading {len(model_files)} prediction heads\")\n\n return model_files, config_files\n\ndef loss_per_head_sum(loss_per_head: Iterable, global_step: Optional[int] = None, batch: Optional[Dict] = None):\n \"\"\"\n Sums up the loss of each prediction head.\n\n :param loss_per_head: List of losses.\n \"\"\"\n return sum(loss_per_head)\n\n\nclass AdaptiveModel(nn.Module, BaseAdaptiveModel):\n \"\"\" \n PyTorch implementation containing all the modelling needed for your NLP task. Combines a language\n model and a prediction head. Allows for gradient flow back to the language model component.\n \"\"\"\n def __init__(\n self,\n language_model: LanguageModel,\n prediction_heads: List[PredictionHead],\n embeds_dropout_prob: float,\n lm_output_types: Union[str, List[str]],\n device: str,\n loss_aggregation_fn: Optional[Callable] = None,\n ):\n \"\"\"\n :param language_model: Any model that turns token ids into vector representations.\n :param prediction_heads: A list of models that take embeddings and return logits for a given task.\n :param embeds_dropout_prob: The probability that a value in the embeddings returned by the\n language model will be zeroed.\n :param lm_output_types: How to extract the embeddings from the final layer of the language model. When set\n to \"per_token\", one embedding will be extracted per input token. If set to\n \"per_sequence\", a single embedding will be extracted to represent the full\n input sequence. Can either be a single string, or a list of strings,\n one for each prediction head.\n :param device: The device on which this model will operate. Either \"cpu\" or \"cuda\".\n :param loss_aggregation_fn: Function to aggregate the loss of multiple prediction heads.\n Input: loss_per_head (list of tensors), global_step (int), batch (dict)\n Output: aggregated loss (tensor)\n Default is a simple sum:\n `lambda loss_per_head, global_step=None, batch=None: sum(tensors)`\n However, you can pass more complex functions that depend on the\n current step (e.g. for round-robin style multitask learning) or the actual\n content of the batch (e.g. certain labels)\n Note: The loss at this stage is per sample, i.e one tensor of\n shape (batchsize) per prediction head.\n \"\"\"\n super(AdaptiveModel, self).__init__() # type: ignore\n self.device = device\n self.language_model = language_model.to(device)\n self.lm_output_dims = language_model.get_output_dims()\n self.prediction_heads = nn.ModuleList([ph.to(device) for ph in prediction_heads])\n self.fit_heads_to_lm()\n self.dropout = nn.Dropout(embeds_dropout_prob)\n self.lm_output_types = (\n [lm_output_types] if isinstance(lm_output_types, str) else lm_output_types\n )\n self.log_params()\n # default loss aggregation function is a simple sum (without using any of the optional params)\n if not loss_aggregation_fn:\n loss_aggregation_fn = loss_per_head_sum\n self.loss_aggregation_fn = loss_aggregation_fn\n\n def fit_heads_to_lm(self):\n \"\"\"\n This iterates over each prediction head and ensures that its input\n dimensionality matches the output dimensionality of the language model.\n If it doesn't, it is resized so it does fit.\n \"\"\"\n for ph in self.prediction_heads:\n ph.resize_input(self.lm_output_dims)\n ph.to(self.device)\n\n def bypass_ph(self):\n \"\"\"\n Replaces methods in the prediction heads with dummy functions.\n Used for benchmarking where we want to isolate the LanguageModel run time\n from the PredictionHead run time.\n \"\"\"\n # TODO convert inner functions into lambdas\n\n def fake_forward(x):\n \"\"\"\n Slices lm vector outputs of shape (batch_size, max_seq_len, dims) --> (batch_size, max_seq_len, 2)\n \"\"\"\n return x.narrow(2, 0, 2)\n\n def fake_logits_to_preds(logits, **kwargs):\n batch_size = logits.shape[0]\n return [None, None] * batch_size\n\n def fake_formatted_preds(**kwargs):\n return None\n\n for ph in self.prediction_heads:\n ph.forward = fake_forward\n ph.logits_to_preds = fake_logits_to_preds\n ph.formatted_preds = fake_formatted_preds\n\n def save(self, save_dir: Union[str, Path]):\n \"\"\"\n Saves the language model and prediction heads. This will generate a config file\n and model weights for each.\n\n :param save_dir: Path to save the AdaptiveModel to.\n \"\"\"\n os.makedirs(save_dir, exist_ok=True)\n self.language_model.save(save_dir)\n for i, ph in enumerate(self.prediction_heads):\n ph.save(save_dir, i)\n # Need to save config and pipeline\n\n @classmethod\n def load(cls, load_dir: Union[str, Path], device: str, strict: bool = True, lm_name: Optional[str] = None, # type: ignore\n processor: Optional[Processor] = None):\n \"\"\"\n Loads an AdaptiveModel from a directory. The directory must contain:\n\n * language_model.bin\n * language_model_config.json\n * prediction_head_X.bin multiple PH possible\n * prediction_head_X_config.json\n * processor_config.json config for transforming input\n * vocab.txt vocab file for language model, turning text to Wordpiece Tokens\n\n :param load_dir: Location where the AdaptiveModel is stored.\n :param device: To which device we want to sent the model, either cpu or cuda.\n :param lm_name: The name to assign to the loaded language model.\n :param strict: Whether to strictly enforce that the keys loaded from saved model match the ones in\n the PredictionHead (see torch.nn.module.load_state_dict()).\n :param processor: Processor to populate prediction head with information coming from tasks.\n \"\"\"\n # Language Model\n if lm_name:\n language_model = LanguageModel.load(load_dir, haystack_lm_name=lm_name)\n else:\n language_model = LanguageModel.load(load_dir)\n\n # Prediction heads\n _, ph_config_files = cls._get_prediction_head_files(load_dir)\n prediction_heads = []\n ph_output_type = []\n for config_file in ph_config_files:\n head = PredictionHead.load(config_file, strict=strict)\n prediction_heads.append(head)\n ph_output_type.append(head.ph_output_type)\n\n model = cls(language_model, prediction_heads, 0.1, ph_output_type, device)\n if processor:\n model.connect_heads_with_processor(processor.tasks)\n\n return model\n\n def logits_to_loss_per_head(self, logits: torch.Tensor, **kwargs):\n \"\"\"\n Collect losses from each prediction head.\n\n :param logits: Logits, can vary in shape and type, depending on task.\n :return: The per sample per prediciton head loss whose first two dimensions\n have length n_pred_heads, batch_size.\n \"\"\"\n all_losses = []\n for head, logits_for_one_head in zip(self.prediction_heads, logits):\n # check if PredictionHead connected to Processor\n assert hasattr(head, \"label_tensor_name\"), \\\n (f\"Label_tensor_names are missing inside the {head.task_name} Prediction Head. Did you connect the model\"\n \" with the processor through either 'model.connect_heads_with_processor(processor.tasks)'\"\n \" or by passing the processor to the Adaptive Model?\")\n all_losses.append(head.logits_to_loss(logits=logits_for_one_head, **kwargs))\n return all_losses\n\n def logits_to_loss(self, logits: torch.Tensor, global_step: Optional[int] = None, **kwargs):\n \"\"\"\n Get losses from all prediction heads & reduce to single loss *per sample*.\n\n :param logits: Logits, can vary in shape and type, depending on task.\n :param global_step: Number of current training step.\n :param kwargs: Placeholder for passing generic parameters.\n Note: Contains the batch (as dict of tensors), when called from Trainer.train().\n :return: torch.tensor that is the per sample loss (len: batch_size)\n \"\"\"\n all_losses = self.logits_to_loss_per_head(logits, **kwargs)\n # This aggregates the loss per sample across multiple prediction heads\n # Default is sum(), but you can configure any fn that takes [Tensor, Tensor ...] and returns [Tensor]\n loss = self.loss_aggregation_fn(all_losses, global_step=global_step, batch=kwargs)\n return loss\n\n def prepare_labels(self, **kwargs):\n \"\"\"\n Label conversion to original label space, per prediction head.\n\n :param label_maps: dictionary for mapping ids to label strings\n :type label_maps: dict[int:str]\n :return: labels in the right format\n \"\"\"\n all_labels = []\n # for head, label_map_one_head in zip(self.prediction_heads):\n # labels = head.prepare_labels(label_map=label_map_one_head, **kwargs)\n # all_labels.append(labels)\n for head in self.prediction_heads:\n labels = head.prepare_labels(**kwargs)\n all_labels.append(labels)\n return all_labels\n\n def forward(self, **kwargs):\n \"\"\"\n Push data through the whole model and returns logits. The data will\n propagate through the language model and each of the attached prediction heads.\n\n :param kwargs: Holds all arguments that need to be passed to the language model\n and prediction head(s).\n :return: All logits as torch.tensor or multiple tensors.\n \"\"\"\n # Run forward pass of language model\n sequence_output, pooled_output = self.forward_lm(**kwargs)\n\n # Run forward pass of (multiple) prediction heads using the output from above\n all_logits = []\n if len(self.prediction_heads) > 0:\n for head, lm_out in zip(self.prediction_heads, self.lm_output_types):\n # Choose relevant vectors from LM as output and perform dropout\n if lm_out == \"per_token\":\n output = self.dropout(sequence_output)\n elif lm_out == \"per_sequence\" or lm_out == \"per_sequence_continuous\":\n output = self.dropout(pooled_output)\n elif (\n lm_out == \"per_token_squad\"\n ): # we need a per_token_squad because of variable metric computation later on...\n output = self.dropout(sequence_output)\n else:\n raise ValueError(\n \"Unknown extraction strategy from language model: {}\".format(lm_out)\n )\n\n # Do the actual forward pass of a single head\n all_logits.append(head(output))\n else:\n # just return LM output (e.g. useful for extracting embeddings at inference time)\n all_logits.append((sequence_output, pooled_output))\n\n return all_logits\n\n def forward_lm(self, **kwargs):\n \"\"\"\n Forward pass for the language model.\n\n :return: Tuple containing list of embeddings for each token and\n embedding for whole sequence.\n \"\"\"\n # Check if we have to extract from a special layer of the LM (default = last layer)\n try:\n extraction_layer = self.language_model.extraction_layer\n except:\n extraction_layer = -1\n\n # Run forward pass of language model\n if extraction_layer == -1:\n sequence_output, pooled_output = self.language_model(**kwargs, return_dict=False, output_all_encoded_layers=False)\n else:\n # get output from an earlier layer\n self.language_model.enable_hidden_states_output()\n sequence_output, pooled_output, all_hidden_states = self.language_model(**kwargs, return_dict=False)\n sequence_output = all_hidden_states[extraction_layer]\n pooled_output = None #not available in earlier layers\n self.language_model.disable_hidden_states_output()\n return sequence_output, pooled_output\n\n def log_params(self):\n \"\"\"\n Logs parameteres to generic logger MlLogger\n \"\"\"\n params = {\n \"lm_type\": self.language_model.__class__.__name__,\n \"lm_name\": self.language_model.name,\n \"prediction_heads\": \",\".join(\n [head.__class__.__name__ for head in self.prediction_heads]\n ),\n \"lm_output_types\": \",\".join(self.lm_output_types),\n }\n try:\n MlLogger.log_params(params)\n except Exception as e:\n logger.warning(f\"ML logging didn't work: {e}\")\n\n def verify_vocab_size(self, vocab_size: int):\n \"\"\"\n Verifies that the model fits to the tokenizer vocabulary.\n They could diverge in case of custom vocabulary added via tokenizer.add_tokens()\n \"\"\"\n model_vocab_len = self.language_model.model.resize_token_embeddings(new_num_tokens=None).num_embeddings\n\n msg = f\"Vocab size of tokenizer {vocab_size} doesn't match with model {model_vocab_len}. \" \\\n \"If you added a custom vocabulary to the tokenizer, \" \\\n \"make sure to supply 'n_added_tokens' to LanguageModel.load() and BertStyleLM.load()\"\n assert vocab_size == model_vocab_len, msg\n\n for head in self.prediction_heads:\n if head.model_type == \"language_modelling\":\n ph_decoder_len = head.decoder.weight.shape[0]\n assert vocab_size == ph_decoder_len, msg\n\n def get_language(self):\n return self.language_model.language\n\n def convert_to_transformers(self):\n \"\"\"\n Convert an adaptive model to huggingface's transformers format. Returns a list containing one model for each\n prediction head.\n\n :return: List of huggingface transformers models.\n \"\"\"\n return conv.Converter.convert_to_transformers(self)\n\n @classmethod\n def convert_from_transformers(cls, model_name_or_path: Union[str, Path], device: str, revision: Optional[str] = None,\n task_type: Optional[str] = None, processor: Optional[Processor] = None, **kwargs):\n \"\"\"\n Load a (downstream) model from huggingface's transformers format. Use cases:\n - continue training in Haystack (e.g. take a squad QA model and fine-tune on your own data)\n - compare models without switching frameworks\n - use model directly for inference\n\n :param model_name_or_path: local path of a saved model or name of a public one.\n Exemplary public names:\n - distilbert-base-uncased-distilled-squad\n - deepset/bert-large-uncased-whole-word-masking-squad2\n\n See https://huggingface.co/models for full list\n :param revision: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.\n :param device: \"cpu\" or \"cuda\"\n :param task_type: One of :\n - 'question_answering'\n More tasks coming soon ...\n :param processor: Processor to populate prediction head with information coming from tasks.\n :type processor: Processor\n :return: AdaptiveModel\n \"\"\"\n return conv.Converter.convert_from_transformers(model_name_or_path,\n revision=revision,\n device=device,\n task_type=task_type,\n processor=processor,\n **kwargs)\n\n\n @classmethod\n def convert_to_onnx(cls, model_name: str, output_path: Path, task_type: str, convert_to_float16: bool = False,\n quantize: bool = False, opset_version: int = 11):\n \"\"\"\n Convert a PyTorch model from transformers hub to an ONNX Model.\n\n :param model_name: Transformers model name.\n :param output_path: Output Path to write the converted model to.\n :param task_type: Type of task for the model. Available options: \"question_answering\"\n :param convert_to_float16: By default, the model uses float32 precision. With half precision of float16, inference\n should be faster on Nvidia GPUs with Tensor core like T4 or V100. On older GPUs, float32\n might be more performant.\n :param quantize: Convert floating point number to integers\n :param opset_version: ONNX opset version.\n :return: None.\n \"\"\"\n language_model_class = LanguageModel.get_language_model_class(model_name)\n if language_model_class not in [\"Bert\", \"Roberta\", \"XLMRoberta\"]:\n raise Exception(\"The current ONNX conversion only support 'BERT', 'RoBERTa', and 'XLMRoberta' models.\")\n\n task_type_to_pipeline_map = {\n \"question_answering\": \"question-answering\",\n }\n\n convert(\n pipeline_name=task_type_to_pipeline_map[task_type],\n framework=\"pt\",\n model=model_name,\n output=output_path/\"model.onnx\",\n opset=opset_version,\n use_external_format=True if language_model_class==\"XLMRoberta\" else False\n )\n\n # save processor & model config files that are needed when loading the model with the Haystack.basics Inferencer\n processor = Processor.convert_from_transformers(\n tokenizer_name_or_path=model_name,\n task_type=task_type,\n max_seq_len=256,\n doc_stride=128,\n use_fast=True\n )\n processor.save(output_path)\n model = AdaptiveModel.convert_from_transformers(model_name, device=\"cpu\", task_type=task_type)\n model.save(output_path)\n os.remove(output_path / \"language_model.bin\") # remove the actual PyTorch model(only configs are required)\n\n onnx_model_config = {\n \"task_type\": task_type,\n \"onnx_opset_version\": opset_version,\n \"language_model_class\": language_model_class,\n \"language\": model.language_model.language\n }\n with open(output_path / \"onnx_model_config.json\", \"w\") as f:\n json.dump(onnx_model_config, f)\n\n if convert_to_float16:\n from onnxruntime_tools import optimizer\n config = AutoConfig.from_pretrained(model_name)\n optimized_model = optimizer.optimize_model(\n input=str(output_path/\"model.onnx\"),\n model_type='bert',\n num_heads=config.num_hidden_layers,\n hidden_size=config.hidden_size\n )\n optimized_model.convert_model_float32_to_float16()\n optimized_model.save_model_to_file(\"model.onnx\")\n\n if quantize:\n quantize_model(output_path/\"model.onnx\")\n\n\nclass ONNXAdaptiveModel(BaseAdaptiveModel):\n \"\"\"\n Implementation of ONNX Runtime for Inference of ONNX Models.\n\n Existing PyTorch based Haystack.basics AdaptiveModel can be converted to ONNX format using AdaptiveModel.convert_to_onnx().\n The conversion is currently only implemented for Question Answering Models.\n\n For inference, this class is compatible with the Haystack.basics Inferencer.\n \"\"\"\n # TODO validate usefulness\n def __init__(\n self,\n onnx_session, # TODO\n language_model_class: str,\n language: str,\n prediction_heads: List[PredictionHead],\n device: str\n ):\n \"\"\"\n :param onnx_session: ? # TODO\n :param language_model_class: Class of LanguageModel\n :param langauge: Language the model is trained for.\n :param prediction_heads: A list of models that take embeddings and return logits for a given task.\n :param device: The device on which this model will operate. Either \"cpu\" or \"cuda\".\n \"\"\"\n import onnxruntime\n if str(device) == \"cuda\" and onnxruntime.get_device() != \"GPU\":\n raise Exception(f\"Device {device} not available for Inference. For CPU, run pip install onnxruntime and\"\n f\"for GPU run pip install onnxruntime-gpu\")\n self.onnx_session = onnx_session\n self.language_model_class = language_model_class\n self.language = language\n self.prediction_heads = prediction_heads\n self.device = device\n\n @classmethod\n def load(cls, load_dir: Union[str, Path], device: str, **kwargs): # type: ignore\n \"\"\"\n Loads an ONNXAdaptiveModel from a directory.\n\n :param load_dir: Location where the ONNXAdaptiveModel is stored.\n :param device: The device on which this model will operate. Either \"cpu\" or \"cuda\".\n \"\"\"\n load_dir = Path(load_dir)\n import onnxruntime\n sess_options = onnxruntime.SessionOptions()\n # Set graph optimization level to ORT_ENABLE_EXTENDED to enable bert optimization.\n sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_EXTENDED\n # Use OpenMP optimizations. Only useful for CPU, has little impact for GPUs.\n sess_options.intra_op_num_threads = multiprocessing.cpu_count()\n onnx_session = onnxruntime.InferenceSession(str(load_dir / \"model.onnx\"), sess_options)\n\n # Prediction heads\n _, ph_config_files = cls._get_prediction_head_files(load_dir, strict=False)\n prediction_heads = []\n ph_output_type = []\n for config_file in ph_config_files:\n # ONNX Model doesn't need have a separate neural network for PredictionHead. It only uses the\n # instance methods of PredictionHead class, so, we load with the load_weights param as False.\n head = PredictionHead.load(config_file, load_weights=False)\n prediction_heads.append(head)\n ph_output_type.append(head.ph_output_type)\n\n with open(load_dir/\"onnx_model_config.json\") as f:\n model_config = json.load(f)\n language_model_class = model_config[\"language_model_class\"]\n language = model_config[\"language\"]\n\n return cls(onnx_session, language_model_class, language, prediction_heads, device)\n\n def forward(self, **kwargs):\n \"\"\"\n Perform forward pass on the model and return the logits.\n\n :param kwargs: All arguments that need to be passed on to the model.\n :return: All logits as torch.tensor or multiple tensors.\n \"\"\"\n with torch.no_grad():\n if self.language_model_class == \"Bert\":\n input_to_onnx = {\n 'input_ids': numpy.ascontiguousarray(kwargs['input_ids'].cpu().numpy()),\n 'attention_mask': numpy.ascontiguousarray(kwargs['padding_mask'].cpu().numpy()),\n 'token_type_ids': numpy.ascontiguousarray(kwargs['segment_ids'].cpu().numpy()),\n }\n elif self.language_model_class in [\"Roberta\", \"XLMRoberta\"]:\n input_to_onnx = {\n 'input_ids': numpy.ascontiguousarray(kwargs['input_ids'].cpu().numpy()),\n 'attention_mask': numpy.ascontiguousarray(kwargs['padding_mask'].cpu().numpy())\n }\n res = self.onnx_session.run(None, input_to_onnx)\n res = numpy.stack(res).transpose(1, 2, 0)\n logits = [torch.Tensor(res).to(self.device)]\n\n return logits\n\n def eval(self):\n \"\"\"\n Stub to make ONNXAdaptiveModel compatible with the PyTorch AdaptiveModel.\n \"\"\"\n return True\n\n def get_language(self):\n \"\"\"\n Get the language(s) the model was trained for.\n :return: str\n \"\"\"\n return self.language\n\n\nclass ONNXWrapper(AdaptiveModel):\n \"\"\"\n Wrapper Class for converting PyTorch models to ONNX.\n\n As of torch v1.4.0, torch.onnx.export only support passing positional arguments\n to the forward pass of the model. However, the AdaptiveModel's forward takes keyword arguments.\n This class circumvents the issue by converting positional arguments to keyword arguments.\n \"\"\"\n @classmethod\n def load_from_adaptive_model(cls, adaptive_model: AdaptiveModel):\n model = copy.deepcopy(adaptive_model)\n model.__class__ = ONNXWrapper\n return model\n\n def forward(self, *batch):\n return super().forward(input_ids=batch[0], padding_mask=batch[1], segment_ids=batch[2])\n" ]
[ [ "torch.nn.Dropout", "torch.no_grad", "torch.Tensor", "numpy.stack" ] ]
kerwenwwer/pensive-pytorch-py3
[ "76292fa92078a7d2294d9438503e5fd5187804e4" ]
[ "rl_test.py" ]
[ "import os\nimport sys\nimport torch\nimport load_trace\nimport numpy as np\nimport fixed_env as env\nfrom Network import ActorNetwork\nfrom torch.distributions import Categorical\n\n\nS_INFO = 6 # bit_rate, buffer_size, next_chunk_size, bandwidth_measurement(throughput and time), chunk_til_video_end\nS_LEN = 8 # take how many frames in the past\nA_DIM = 6\nACTOR_LR_RATE = 0.0001\nCRITIC_LR_RATE = 0.001\n# VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] \nVIDEO_BIT_RATE = [500, 850, 1200, 1850, 0, 0] # Kbps\nBUFFER_NORM_FACTOR = 10.0\nCHUNK_TIL_VIDEO_END_CAP = 48.0\nM_IN_K = 1000.0\nREBUF_PENALTY = 4.3 # 1 sec rebuffering -> 3 Mbps\nSMOOTH_PENALTY = 0.02\nDEFAULT_QUALITY = 1 # default video quality without agent\nRANDOM_SEED = 42\nRAND_RANGE = 1000\nLOG_FILE = './test_results/log_sim_rl'\nTEST_TRACES = './dataset/network_trace/'\n# log in format of time_stamp bit_rate buffer_size rebuffer_time chunk_size download_time reward\nframe_time_len = 0.04\nACTOR_MODEL=sys.argv[1]\n\ndef main():\n torch.set_num_threads(1)\n\n np.random.seed(RANDOM_SEED)\n torch.manual_seed(RANDOM_SEED)\n\n assert len(VIDEO_BIT_RATE) == A_DIM\n\n all_cooked_time, all_cooked_bw, all_file_names = load_trace.load_trace(TEST_TRACES)\n\n net_env = env.Environment(all_cooked_time=all_cooked_time,\n all_cooked_bw=all_cooked_bw)\n\n log_path = LOG_FILE + '_' + all_file_names[net_env.trace_idx]\n log_file = open(log_path, 'w')\n\n # all models have same actor network\n # so model_type can be anything\n net=ActorNetwork([S_INFO,S_LEN],A_DIM)\n\n # restore neural net parameters\n net.load_state_dict(torch.load(ACTOR_MODEL))\n print(\"Testing model restored.\")\n\n time_stamp = 0\n\n last_bit_rate = DEFAULT_QUALITY\n bit_rate = DEFAULT_QUALITY\n\n video_count = 0\n state=torch.zeros((S_INFO,S_LEN))\n\n while True: # serve video forever\n # the action is from the last decision\n # this is to make the framework similar to the real\n delay, sleep_time, buffer_size, rebuf, \\\n video_chunk_size, next_video_chunk_sizes, \\\n end_of_video, video_chunk_remain = \\\n net_env.get_video_chunk(bit_rate)\n\n time_stamp += delay # in ms\n time_stamp += sleep_time # in ms\n\n # reward is video quality - rebuffer penalty - smoothness\n reward = VIDEO_BIT_RATE[bit_rate] / M_IN_K \\\n - REBUF_PENALTY * rebuf \\\n - SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[bit_rate] -\n VIDEO_BIT_RATE[last_bit_rate]) / M_IN_K\n\n last_bit_rate = bit_rate\n\n # log time_stamp, bit_rate, buffer_size, reward\n log_file.write(str(time_stamp / M_IN_K) + '\\t' +\n str(VIDEO_BIT_RATE[bit_rate]) + '\\t' +\n str(buffer_size) + '\\t' +\n str(rebuf) + '\\t' +\n str(video_chunk_size) + '\\t' +\n str(delay) + '\\t' +\n str(reward) + '\\n')\n log_file.flush()\n\n # retrieve previous state\n state = torch.roll(state,-1,dims=-1)\n\n # this should be S_INFO number of terms\n state[0, -1] = VIDEO_BIT_RATE[bit_rate] / float(np.max(VIDEO_BIT_RATE)) # last quality\n state[1, -1] = buffer_size / BUFFER_NORM_FACTOR # 10 sec\n state[2, -1] = float(video_chunk_size) / float(delay) / M_IN_K # kilo byte / ms\n state[3, -1] = float(delay) / M_IN_K / BUFFER_NORM_FACTOR # 10 sec\n state[4, :A_DIM] = torch.tensor(next_video_chunk_sizes) / M_IN_K / M_IN_K # mega byte\n state[5, -1] = min(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)\n\n with torch.no_grad():\n probability=net.forward(state.unsqueeze(0))\n m=Categorical(probability)\n bit_rate=m.sample().item()\n # Note: we need to discretize the probability into 1/RAND_RANGE steps,\n # because there is an intrinsic discrepancy in passing single state and batch states\n\n\n if end_of_video:\n log_file.write('\\n')\n log_file.close()\n\n last_bit_rate = DEFAULT_QUALITY\n bit_rate = DEFAULT_QUALITY # use the default action here\n\n state=torch.zeros((S_INFO,S_LEN))\n\n video_count += 1\n\n if video_count >= len(all_file_names):\n break\n\n log_path = LOG_FILE + '_' + all_file_names[net_env.trace_idx]\n log_file = open(log_path, 'w')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.abs", "numpy.random.seed", "torch.load", "torch.zeros", "torch.manual_seed", "torch.tensor", "numpy.max", "torch.distributions.Categorical", "torch.set_num_threads", "torch.no_grad", "torch.roll" ] ]
gttm/eth-deep-learning
[ "6480e2ae794bc5f6e2e1af17923c5a02548a122d" ]
[ "dcpo/code/reinforced_epos/helpers/oop/Worker.py" ]
[ "from builtins import print\nimport os\nfrom reinforced_epos.helpers.oop.Network import Network as AC_Network\nimport tensorflow as tf\nfrom reinforced_epos.helpers.oop.helpers import *\nfrom reinforced_epos.helpers.oop.Environment import Environment\nfrom reinforced_epos.helpers.config import get_experiement_folder\nimport pickle\nimport shutil\n\nclass Worker():\n def __init__(self,name, dataset, env: Environment, total_actions, trainer, model_path, global_episodes):\n self.name = \"worker_\" + str(name)\n self.number = name\n self.trainer = trainer\n self.global_episodes = global_episodes\n self.increment = self.global_episodes.assign_add(1)\n self.episode_rewards = []\n self.episode_lengths = []\n self.episode_mean_values = []\n folder = get_experiement_folder()\n a3cfolder = os.path.join(folder, \"a3c\")\n self.model_path = a3cfolder\n\n\n worker_folder = os.path.join(folder, \"a3c\", \"train_\"+str(self.number))\n\n self.summary_writer = tf.summary.FileWriter(worker_folder)\n self.successful_episodes_path = os.path.join(worker_folder, \"successfull_episodes\")\n if(not os.path.exists(self.successful_episodes_path)):\n os.mkdir(self.successful_episodes_path)\n\n #Create the local copy of the network and the tensorflow op to copy global paramters to local network\n self.local_AC = AC_Network(dataset, total_actions, self.name, trainer)\n self.update_local_ops = update_target_graph('global',self.name)\n\n self.actions = None #here i did a major change that I don't know how it will work\n self.rewards_plus = None\n self.value_plus = None\n self.env = env\n # actions are used in the AC_network to determine the q-values, so they should be\n # agent x actions matrix\n\n\n #rollout became episode_memories, bootstrap is either 0.0 or a value from a feedforward of the net\n def train(self, episode_memories, sess, gamma, bootstrap_value):\n #print(\"training\")\n episode_memories = np.array(episode_memories)\n observed_states = episode_memories[:, 0] #instead of observations, batch x agent x states\n actions = episode_memories[:, 1] # batch x agent x acions\n rewards = episode_memories[:, 2] # batch x 1\n next_observations = episode_memories[:, 3] # unused?\n values = episode_memories[:, 5] # batch x 1\n\n # Here we take the rewards and values from the episode_memories, and use them to\n # generate the advantage and discounted returns.\n # The advantage function uses \"Generalized Advantage Estimation\"\n self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value]) #increase all rewards with bootstrap\n discounted_rewards = discount(self.rewards_plus, gamma)[:-1] #discount all rewards except the last\n self.value_plus = np.asarray(values.tolist() + [bootstrap_value]) #also adding bootstrap to values\n advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1] #advantages are calculated\n # as the sum of the rewards plus the discounted values (after first) minus all the values except the last\n advantages = discount(advantages, gamma) #batch x 1\n\n\n #TODO solve dim mess\n discounted_rewards = np.expand_dims(discounted_rewards, 1)\n advantages = np.expand_dims(advantages, 1)\n o_states = np.stack(observed_states, axis=0)\n actions = np.expand_dims(actions[0], 0)\n # print(actions)\n b1 = self.batch_rnn_state[0]\n b2 = self.batch_rnn_state[1]\n\n # print(np.shape(b1))\n\n # Update the global network using gradients from loss\n # Generate network statistics to periodically save\n feed_dict = {\n self.local_AC.target_v:discounted_rewards,\n self.local_AC.input:o_states, # a batch of observed states\n self.local_AC.actions:actions, # a batch of actions\n self.local_AC.advantages:advantages, # a batch of advantages\n self.local_AC.lstm_h_prev:b1[0],\n self.local_AC.lstm_c_prev:b2[0]\n }\n\n v_l,p_l,e_l,g_n,v_n, self.batch_rnn_state,_ = sess.run([self.local_AC.value_loss,\n self.local_AC.policy_loss,\n self.local_AC.entropy,\n self.local_AC.grad_norms,\n self.local_AC.var_norms,\n self.local_AC.state_out,\n self.local_AC.apply_grads],\n feed_dict={\n self.local_AC.target_v:discounted_rewards,\n self.local_AC.input:o_states, # a batch of observed states\n self.local_AC.actions:actions, # a batch of actions\n self.local_AC.advantages:advantages, # a batch of advantages\n self.local_AC.lstm_h_prev:np.expand_dims(b1[0], 0),\n self.local_AC.lstm_c_prev:np.expand_dims(b2[0], 0)\n })\n return v_l / len(episode_memories), p_l / len(episode_memories), e_l / len(episode_memories), g_n, v_n\n\n def work(self,max_episode_length,gamma,sess,coord,saver):\n #print(\"working\")\n episode_count = sess.run(self.global_episodes)\n total_steps = 0\n print (\"Starting worker \" + str(self.number))\n with sess.as_default(), sess.graph.as_default():\n while not coord.should_stop():\n sess.run(self.update_local_ops)\n episode_buffer = []\n episode_values = []\n episode_frames = []\n episode_reward = 0\n episode_variances = []\n episode_step_count = 0\n d = False\n\n #self.env.new_episode()\n s_indeces = self.env.current_state #indeces\n episode_frames.append(s_indeces) #append indeces\n s = self.env.get_plans_from_state(s_indeces) #plans\n\n rnn_state = self.local_AC.state_init\n self.batch_rnn_state = rnn_state\n\n var_prev = None\n self.env.reset(random=True)\n while self.env.is_episode_finished() == False:\n #Take an action using probabilities from policy network output.\n a_dist,v,rnn_state = sess.run([self.local_AC.policy,self.local_AC.value,self.local_AC.state_out],\n feed_dict={self.local_AC.input:[s],\n self.local_AC.state_in[0]:rnn_state[0],\n self.local_AC.state_in[1]:rnn_state[1]})\n\n\n agents = np.shape(self.env.dataset)[0]\n a = np.zeros([agents], dtype=np.int)\n indeces = np.arange(self.env.total_actions)\n for agent in range(agents):\n a[agent] = np.random.choice(indeces, p=a_dist[0, agent, :])\n\n # a = np.random.choice(a_dist[0],p=a_dist[0]) #choose the action based on the\n #probability density of the policy gradient\n # a = np.argmax(a_dist[0] == a, axis=1) # get indeces from probabilities\n # print(a)\n new_state, var, prev_best, d = self.env.step(a) #calcualtes new state\n\n var_prev = var_prev or var\n # print(var)\n\n #r = (self.env.best_var - var)#/ 100.0#self.actions[a])\n r = (prev_best - var)\n episode_variances.append(var)\n var_prev = var\n\n # d = self.env.is_episode_finished()\n\n if d == False:\n s1_indeces = self.env.current_state\n episode_frames.append(s1_indeces)\n s1 = self.env.get_plans_from_state(s1_indeces)\n else:\n s1 = s\n\n episode_buffer.append([s, a, r, s1, d, v])\n episode_values.append(v)#v[0,0]\n\n episode_reward += r\n s = s1\n total_steps += 1\n episode_step_count += 1\n\n if(episode_step_count > max_episode_length):\n break;\n\n # If the episode hasn't ended, but the experience buffer is full, then we\n # make an update step using that experience rollout.\n if len(episode_buffer) == 30 and d != True and episode_step_count < max_episode_length - 1:\n # Since we don't know what the true final return is, we \"bootstrap\" from our current\n # value estimation.\n v1 = sess.run(self.local_AC.value,\n feed_dict={self.local_AC.input:[s],\n self.local_AC.state_in[0]:rnn_state[0],\n self.local_AC.state_in[1]:rnn_state[1]})\n v_l,p_l,e_l,g_n,v_n = self.train(episode_buffer,sess,gamma,v1)\n episode_buffer = []\n sess.run(self.update_local_ops)\n if d == True:\n print(\"success\")\n file = os.path.join(self.successful_episodes_path, \"episode_\"+str(episode_count))\n if(os.path.exists(file)):\n os.remove(file)\n with open(file, \"wb+\") as outfile:\n pickle.dump(episode_buffer, outfile)\n outfile.close()\n\n break\n\n print(self.name + \" \" + \"finished episode \" + str(episode_count))\n self.episode_rewards.append(episode_reward)\n self.episode_lengths.append(episode_step_count)\n self.episode_mean_values.append(np.mean(episode_values))\n\n # Update the network using the episode buffer at the end of the episode.\n if len(episode_buffer) != 0:\n v_l,p_l,e_l,g_n,v_n = self.train(episode_buffer,sess,gamma,0.0)\n\n\n # Periodically save gifs of episodes, model parameters, and summary statistics.\n if episode_count % 5 == 0 and episode_count != 0:\n if self.name == 'worker_0' and episode_count % 25 == 0:\n state_transitions = np.array(episode_frames) #data_for_heatmap\n if episode_count % 250 == 0 and self.name == 'worker_0':\n saver.save(sess,self.model_path+'/model-'+str(episode_count)+'.cptk')\n print (\"Saved Model\")\n\n mean_reward = np.mean(self.episode_rewards[-5:])\n mean_length = np.mean(self.episode_lengths[-5:])\n mean_value = np.mean(self.episode_mean_values[-5:])\n mean_var = np.mean(episode_variances)\n summary = tf.Summary()\n summary.value.add(tag='Perf/Reward', simple_value=float(mean_reward))\n summary.value.add(tag='Perf/Length', simple_value=float(mean_length))\n summary.value.add(tag='Perf/Value', simple_value=float(mean_value))\n summary.value.add(tag='Losses/Value Loss', simple_value=float(v_l))\n summary.value.add(tag='Losses/Policy Loss', simple_value=float(p_l))\n summary.value.add(tag='Losses/Entropy', simple_value=float(e_l))\n summary.value.add(tag='Losses/Grad Norm', simple_value=float(g_n))\n summary.value.add(tag='Losses/Var Norm', simple_value=float(v_n))\n summary.value.add(tag=\"Perf/Min Variance\", simple_value=float(Environment.bvar))\n summary.value.add(tag=\"Perf/Mean Variance\", simple_value=float(mean_var))\n\n self.summary_writer.add_summary(summary, episode_count)\n\n self.summary_writer.flush()\n if self.name == 'worker_0':\n sess.run(self.increment)\n episode_count += 1\n" ]
[ [ "tensorflow.Summary", "tensorflow.summary.FileWriter" ] ]
leabeusch/pysteps
[ "5f162d4b1155e4cfd894c9635eed3f0e823adedd", "5f162d4b1155e4cfd894c9635eed3f0e823adedd" ]
[ "pysteps/visualization/thunderstorms.py", "pysteps/utils/fft.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.visualization.tstorm\n============================\n\nMethods for plotting thunderstorm cells.\n\nCreated on Wed Nov 4 11:09:44 2020\n\n@author: mfeldman\n\n.. autosummary::\n :toctree: ../generated/\n\n plot_track\n plot_cart_contour\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n################################\n# track and contour plots zorder\n# - precipitation: 40\n\n\ndef plot_track(track_list, geodata=None, ref_shape=None):\n \"\"\"\n Plot storm tracks.\n\n .. _Axes: https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes\n\n Parameters\n ----------\n track_list: list\n List of tracks provided by DATing.\n geodata: dictionary or None, optional\n Optional dictionary containing geographical information about\n the field. If not None, plots the contours in a georeferenced frame.\n ref_shape: (vertical, horizontal)\n Shape of the 2D precipitation field used to find the cells' contours.\n This is only needed only if `geodata=None`.\n\n IMPORTANT: If `geodata=None` it is assumed that the y-origin of the reference\n precipitation fields is the upper-left corner (yorigin=\"upper\").\n\n Returns\n -------\n ax: fig Axes_\n Figure axes.\n \"\"\"\n ax = plt.gca()\n pix2coord = _pix2coord_factory(geodata, ref_shape)\n\n color = iter(plt.cm.spring(np.linspace(0, 1, len(track_list))))\n for track in track_list:\n cen_x, cen_y = pix2coord(track.cen_x, track.cen_y)\n ax.plot(cen_x, cen_y, c=next(color), zorder=40)\n return ax\n\n\ndef plot_cart_contour(contours, geodata=None, ref_shape=None):\n \"\"\"\n Plots input image with identified cell contours.\n Also, this function can be user to add points of interest to a plot.\n\n .. _Axes: https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes\n\n Parameters\n ----------\n contours: list or dataframe-element\n list of identified cell contours.\n geodata: dictionary or None, optional\n Optional dictionary containing geographical information about\n the field. If not None, plots the contours in a georeferenced frame.\n ref_shape: (vertical, horizontal)\n Shape of the 2D precipitation field used to find the cells' contours.\n This is only needed only if `geodata=None`.\n\n IMPORTANT: If `geodata=None` it is assumed that the y-origin of the reference\n precipitation fields is the upper-left corner (yorigin=\"upper\").\n\n Returns\n -------\n ax: fig Axes_\n Figure axes.\n \"\"\"\n ax = plt.gca()\n pix2coord = _pix2coord_factory(geodata, ref_shape)\n\n contours = list(contours)\n for contour in contours:\n for c in contour:\n x, y = pix2coord(c[:, 1], c[:, 0])\n ax.plot(x, y, color=\"black\", zorder=40)\n return ax\n\n\ndef _pix2coord_factory(geodata, ref_shape):\n \"\"\"\n Construct the pix2coord transformation function.\"\"\"\n if geodata is not None:\n\n def pix2coord(x_input, y_input):\n x = geodata[\"x1\"] + geodata[\"xpixelsize\"] * x_input\n if geodata[\"yorigin\"] == \"lower\":\n y = geodata[\"y1\"] + geodata[\"ypixelsize\"] * y_input\n else:\n y = geodata[\"y2\"] - geodata[\"ypixelsize\"] * y_input\n return x, y\n\n else:\n if ref_shape is None:\n raise ValueError(\"'ref_shape' can't be None when not geodata is available.\")\n\n # Default pix2coord function when no geographical information is present.\n def pix2coord(x_input, y_input):\n # yorigin is \"upper\" by default\n return x_input, ref_shape[0] - y_input\n\n return pix2coord\n", "# -*- coding: utf-8 -*-\n\"\"\"\npysteps.utils.fft\n=================\n\nInterface module for different FFT methods.\n\n.. autosummary::\n :toctree: ../generated/\n\n get_numpy\n get_scipy\n get_pyfftw\n\"\"\"\n\nfrom pysteps.exceptions import MissingOptionalDependency\nfrom types import SimpleNamespace\n\n\ndef get_numpy(shape, fftn_shape=None, **kwargs):\n import numpy.fft as numpy_fft\n\n f = {\n \"fft2\": numpy_fft.fft2,\n \"ifft2\": numpy_fft.ifft2,\n \"rfft2\": numpy_fft.rfft2,\n \"irfft2\": lambda X: numpy_fft.irfft2(X, s=shape),\n \"fftshift\": numpy_fft.fftshift,\n \"ifftshift\": numpy_fft.ifftshift,\n \"fftfreq\": numpy_fft.fftfreq,\n }\n if fftn_shape is not None:\n f[\"fftn\"] = numpy_fft.fftn\n fft = SimpleNamespace(**f)\n\n return fft\n\n\ndef get_scipy(shape, fftn_shape=None, **kwargs):\n import numpy.fft as numpy_fft\n import scipy.fftpack as scipy_fft\n\n # use numpy implementation of rfft2/irfft2 because they have not been\n # implemented in scipy.fftpack\n f = {\n \"fft2\": scipy_fft.fft2,\n \"ifft2\": scipy_fft.ifft2,\n \"rfft2\": numpy_fft.rfft2,\n \"irfft2\": lambda X: numpy_fft.irfft2(X, s=shape),\n \"fftshift\": scipy_fft.fftshift,\n \"ifftshift\": scipy_fft.ifftshift,\n \"fftfreq\": scipy_fft.fftfreq,\n }\n if fftn_shape is not None:\n f[\"fftn\"] = scipy_fft.fftn\n fft = SimpleNamespace(**f)\n\n return fft\n\n\ndef get_pyfftw(shape, fftn_shape=None, n_threads=1, **kwargs):\n try:\n import pyfftw.interfaces.numpy_fft as pyfftw_fft\n import pyfftw\n\n pyfftw.interfaces.cache.enable()\n except ImportError:\n raise MissingOptionalDependency(\"pyfftw is required but not installed\")\n\n X = pyfftw.empty_aligned(shape, dtype=\"complex128\")\n F = pyfftw.empty_aligned(shape, dtype=\"complex128\")\n\n fft_obj = pyfftw.FFTW(\n X,\n F,\n flags=[\"FFTW_ESTIMATE\"],\n direction=\"FFTW_FORWARD\",\n axes=(0, 1),\n threads=n_threads,\n )\n ifft_obj = pyfftw.FFTW(\n F,\n X,\n flags=[\"FFTW_ESTIMATE\"],\n direction=\"FFTW_BACKWARD\",\n axes=(0, 1),\n threads=n_threads,\n )\n\n if fftn_shape is not None:\n X = pyfftw.empty_aligned(fftn_shape, dtype=\"complex128\")\n F = pyfftw.empty_aligned(fftn_shape, dtype=\"complex128\")\n\n fftn_obj = pyfftw.FFTW(\n X,\n F,\n flags=[\"FFTW_ESTIMATE\"],\n direction=\"FFTW_FORWARD\",\n axes=list(range(len(fftn_shape))),\n threads=n_threads,\n )\n\n X = pyfftw.empty_aligned(shape, dtype=\"float64\")\n output_shape = list(shape[:-1])\n output_shape.append(int(shape[-1] / 2) + 1)\n output_shape = tuple(output_shape)\n F = pyfftw.empty_aligned(output_shape, dtype=\"complex128\")\n\n rfft_obj = pyfftw.FFTW(\n X,\n F,\n flags=[\"FFTW_ESTIMATE\"],\n direction=\"FFTW_FORWARD\",\n axes=(0, 1),\n threads=n_threads,\n )\n irfft_obj = pyfftw.FFTW(\n F,\n X,\n flags=[\"FFTW_ESTIMATE\"],\n direction=\"FFTW_BACKWARD\",\n axes=(0, 1),\n threads=n_threads,\n )\n\n f = {\n \"fft2\": lambda X: fft_obj(input_array=X.copy()).copy(),\n \"ifft2\": lambda X: ifft_obj(input_array=X.copy()).copy(),\n \"rfft2\": lambda X: rfft_obj(input_array=X.copy()).copy(),\n \"irfft2\": lambda X: irfft_obj(input_array=X.copy()).copy(),\n \"fftshift\": pyfftw_fft.fftshift,\n \"ifftshift\": pyfftw_fft.ifftshift,\n \"fftfreq\": pyfftw_fft.fftfreq,\n }\n\n if fftn_shape is not None:\n f[\"fftn\"] = lambda X: fftn_obj(input_array=X).copy()\n fft = SimpleNamespace(**f)\n\n return fft\n" ]
[ [ "matplotlib.pyplot.gca" ], [ "numpy.fft.irfft2" ] ]
andrewcistola/healthy-neighborhoods
[ "08bd0cd9dcb81b083a003943cd6679ca12237a1e" ]
[ "_archive/neville/neville_nhanes_crc_alpha.py" ]
[ "#### Healthy Neighborhoods Project: Using Ecological Data to Improve Community Health\n### Neville Subproject: Using Random Forestes, Factor Analysis, and Logistic regression to Screen Variables for Imapcts on Public Health\n## NHANES 2015-2016: Detecting different factors between individuals with a BMI over 30 and are between 40 and 70 that have pre-diabates and have diabetes\n# The Python Programming Langauge Script by DrewC!\n\n#### Section A: Import Libraries, Import Dataset, Prepare for Classification\n\n### Step 1: Import Libraries and Import Dataset\n\n## Import Standard Libraries\nimport os # Inlcuded in every script DC!\nimport numpy as np # Inclduded in every code script DC!\nimport pandas as pd # Incldued in every code script for DC!\nimport scipy as sp # Incldued in every code script for DC!\n\n## Import Statistics Packages\nfrom sklearn.linear_model import LogisticRegression # Logisitc Regression in Sklean\nfrom sklearn.linear_model import LinearRegression # Logisitc Regression in Sklean\nfrom sklearn.decomposition import PCA # Principal compnents analysis from sklearn\nfrom sklearn.preprocessing import StandardScaler # Scaling for Principal components analysis\n\n## Import Machine Learning Libraries\nfrom sklearn.ensemble import RandomForestClassifier as rfc # Random Forest classification component\nfrom sklearn.feature_selection import RFECV as rfe # Recursive Feature selection component\nfrom sklearn.svm import SVR as svr # Linear model for RFE\nfrom sklearn.impute import SimpleImputer # Univariate imputation for missing data\n\n## Import Dataset\nos.chdir(\"C:/Users/drewc/GitHub/Healthy_Neighborhoods\") # Set wd to project repository\ndf_nh = pd.read_csv(\"_data/nhanes_0506_noRX_stage.csv\", encoding = \"ISO-8859-1\", low_memory= False) # Import dataset with outcome and ecological variable for each geographical id, all datasets in _data folder in repository\n\n## Tidy Data\ndf_nh = df_nh.select_dtypes(exclude = [\"object\"]) # Remove all objects from data frame\n\n## Verify\ndf_nh.info() # Get class, memory, and column info: names, data types, obs.\ndf_nh.head() # Print first 5 observations\ndf_nh.shape # Print dimensions of data frame\n\n### Step 2: Prepare Data for Full Analysis\n\n## Create outcome variable and remove variables used to calculate\ndf_nh[\"outcome\"] = np.where((df_nh[\"SSCAMIGA\"] >= 0.481) | (df_nh[\"SSCAMIGG\"] >= 0.229) | (df_nh[\"SSCAMIGM\"] >= 0.375), 1, 0) # Create New Column Based on Conditions with nested If then where DM = 2, PD = 1, Neither = 0\ndf_nh = df_nh.drop(columns = [\"SSCAMIGG\", \"SSCAMIGA\", \"SSCAMIGM\", \"SSSALIGG\", \"SSSALIGA\", \"SSSALIGM\"]) # Remove variables for Identification\n\n## Susbet for population at risk\ndf_nh = df_nh[(df_nh[\"RIDAGEYR\"] >= 50) & (df_nh[\"RIDAGEYR\"] <= 75)] # Subset data frame by USPSTF screening values\ndf_nh = df_nh.drop(columns = [\"RIDAGEYR\"]) # Drop columns for age and bmi\n\n## Remove ID, Sampling, and Weight Variables\ndf_nh = df_nh.drop(columns = [\"SEQN\", \"SDMVPSU\"]) # Remove Patient ID and Sampling Unit Variables\ndf_nh = df_nh.drop(columns = [\"WTMEC2YR\", \"WTAL2YR\", \"WTINT2YR\", \"WTSA2YR\", \"WTSAF2YR\", \"WTSB2YR\", \"WTSC2YR\", \"WTSOG2YR\", \"WTSPC2YR\", \"WTSC2YRA\"]) # Remove variables for Identification\ndf_nh = df_nh.drop(columns = [\"BMDSTATS\", \"DUAISC\", \"HCASCST1\", \"PSASCST1\", \"SXAISC\"]) # Remove intrerview status code variables\n\n## Identify missing values\ndf_NA = df_nh # Rename data for missing values\ndf_NA = df_NA.dropna(axis = 1, thresh = 0.75*len(df_NA)) # Drop variables less than 70% non-NA count for all columns\nNAtot = df_NA.isnull().sum().sum() # Get count of all NA values\nNAnot = df_NA.count().sum().sum() # Get count of all nonNA values\nNAratio = NAtot / (NAtot + NAnot) # Percent of values with values\nNout = (df_NA[\"outcome\"] == 1).sum() # Get cout of outcome variable\nprint(NAratio) # Print value\nprint(Nout) # Print value\ndf_NA.info() # Get info\n\n## Impute Missing Values with Median\nimp = SimpleImputer(strategy = \"median\") # Build Imputer model. strategy = \"mean\" or \" median\" or \"most_frequent\" or \"constant\"\ndf_imp = pd.DataFrame(imp.fit_transform(df_NA)) # Impute missing data\ndf_imp.columns = df_NA.columns # Rename columns from new dataset\n\n## Rename Final Dataset\ndf_nev = df_imp # Rename Data\n\n## Verify\ndf_nev.info() # Get class, memory, and column info: names, data types, obs.\ndf_nev.head() # Print first 5 observations\ndf_nev.shape() # Print dimensions of data frame\n\n# Create Results Text File\ntext_1 = str(df_nh.shape)\ntext_2 = str(df_nev.shape)\ntext_3 = str(NAratio)\ntext_4 = str(Nout)\ntext_file = open(\"Neville/neville_nhanes_crc_results.txt\", \"w\") # Open text file and name with subproject, content, and result suffix\ntext_file.write(\"Healthy Neighborhoods Project: Using Ecological Data to Improve Community Health\\n\") # Line of text with space after\ntext_file.write(\"Neville Subproject: Using Random Forestes, Principal Component Analysis, and Logistic regression to Screen Variables for Imapcts on Public Health\\n\") # Line of text with space after\ntext_file.write(\"NHANES 2005-2006: Campylobacter Anitbodies\\n\") # Line of text with space after\ntext_file.write(\"The Python Programming Langauge Script by DrewC!\\n\\n\") # Line of text with space after\ntext_file.write(\"Variables, Observations, Missing Values \\n\\n\") # Line of text with space after\ntext_file.write(\"Total Cohort\\n\") # Line of text with space after\ntext_file.write(text_1) # write string version of variable above\ntext_file.write(\"\\n\\nSubset Cohort\\n\") # Line of text with space after\ntext_file.write(text_2) # write string version of variable above\ntext_file.write(\"\\n\\nNA Ratio\\n\") # Line of text with space after\ntext_file.write(text_3) # write string version of variable above\ntext_file.write(\"\\n\\nN outcome\\n\") # Line of text with space after\ntext_file.write(text_4) # write string version of variable above\ntext_file.write(\"\\n\\n\") # Add two lines of blank text at end of every section text\ntext_file.close() # Close file\n\n#### Section B: Conduct Principal Component Analysis to Identify Latent Variables for Prediabetes SubCohort\n\n### Step 4: Prepare for Principal Component Analysis\n\n## Prepare Data for PCA\ndf_pca = df_nev\ndf_pca = df_pca.drop(columns = [\"outcome\"]) # Remove outcome variable\nx = df_pca.values # Save feature values as x\nx = StandardScaler().fit_transform(x) # While applying StandardScaler, each feature of your data should be normally distributed such that it will scale the distribution to a mean of zero and a standard deviation of one.\n\n## Verify\nx.shape # Verify that dimensions are same length\nnp.mean(x),np.std(x) # whether the normalized data has a mean of zero and a standard deviation of one.\n\n## Convert to Data frame\ndf_pcn = pd.DataFrame(x, columns = df_pca.columns) # convert the normalized features into a tabular format with the help of DataFrame.\n\n## Create PCA model to determine Components\npca = PCA(n_components = 100) # you will pass the number of components to make PCA model\npca.fit(df_pcn) # fit to data\ndf_pcs = pd.DataFrame(pca.explained_variance_) # Print explained variance of components\ndf_pcs = df_pcs[(df_pcs[0] > 10)]\ncomponents = len(df_pcs.index) # Save count of values for Variabel reduction\n\n### Step 5: Run PCA with principal component count\n\n## PCA to reduce variables\npca = PCA(n_components = components) # you will pass the number of components to make PCA model\npca.fit_transform(df_pcn) # finally call fit_transform on the aggregate data to create PCA results object\ndf_pcs2 = pd.DataFrame(pca.components_, columns = df_pcn.columns) # Export eigenvectors to data frame\n\n## Collect list important features\ndf_pcs3 = df_pcs2[(df_pcs2 > 0)] # Remove all values below or equal to 0\ndf_pc = pd.DataFrame(df_pcs3.max()) # select maximum value for each feature\ndf_pc = df_pc.reset_index() # Save index as first column named \"index\"\ndf_pc = df_pc.rename(columns = {\"index\": \"Features\", 0: \"Eigenvectors\"}) # Rename columns\ndf_pc = df_pc.sort_values(by = [\"Eigenvectors\"], ascending = False) # Sort Columns by Value\ndf_pc = df_pc[(df_pc[\"Eigenvectors\"] > df_pc[\"Eigenvectors\"].mean())] # Subset by Gini values higher than mean\ndf_pc = df_pc.dropna() # Drop all rows with NA values, 0 = rows, 1 = columns \n\n## Verify\ndf_pc.info() # Get class, memory, and column info: names, data types, obs.\ndf_pc.head() # Print first 5 observations\n\n## Write Summary to Text File\ntext_1 = df_pc.head(10).to_string() # Save variable as string value for input below\ntext_file = open(\"Neville/neville_nhanes_crc_results.txt\", \"a\") # Open text file and name with subproject, content, and result suffix\ntext_file.write(\"\\nPrincipal Components Analysis\\n\") # Title of section with double space after\ntext_file.write(\"\\nTop 10 Variables by Eigenvector\\n\") # Line of text with space after\ntext_file.write(text_1) # write string version of variable above\ntext_file.write(\"\\n\\n\") # Add two lines of blank text at end of every section text\ntext_file.close() # Close file\n\n#### Section C: Create a Random Forest to Rank Variables by Importance for Prediabetes\n\n### Step 6: Run Random Forest\n\n## Prepare for Classification\ndf_rf = df_nev.fillna(0).astype(np.float64) # Remove NA and change to float64 zeros for feature selection and save as Neville\nY = df_rf[\"outcome\"] # Isolate Outcome variable\nfeatures = df_rf.columns.drop([\"outcome\"]) # Drop outcome variable and Geo to isolate all predictor variable names as features\nX = df_rf[features] # Save features columns as predictor data frame\nforest = rfc(n_estimators = 10000, max_depth = 10) #Use default values except for number of trees. For a further explanation see readme included in repository. \n\n## Run Forest \nforest.fit(X, Y) # This will take time\n\n## Output importances\ngini = forest.feature_importances_ # Output importances of features\nl_gini = list(zip(X, gini)) # Create list of variables alongside importance scores \ndf_gini = pd.DataFrame(l_gini, columns = [\"Features\", \"Gini\"]) # Create data frame of importances with variables and gini column names\ndf_gini = df_gini.sort_values(by = [\"Gini\"], ascending = False) # Sort data frame by gini value in desceding order\ndf_gini = df_gini[(df_gini[\"Gini\"] > df_gini[\"Gini\"].mean())] # Subset by Gini values higher than mean\n\n## Verify\ndf_gini.info() # Get class, memory, and column info: names, data types, obs.\ndf_gini.head() # Print first 5 observations\n\n## Write Summary to Text File\ntext_1 = df_gini.head(10).to_string() # Save variable as string value for input below\ntext_file = open(\"Neville/neville_nhanes_crc_results.txt\", \"a\") # Open text file and name with subproject, content, and result suffix\ntext_file.write(\"\\nRandom Forest\\n\") # Line of text with space after\ntext_file.write(\"\\nTop 10 Variables by Gini Rankings\\n\") # Line of text with space after\ntext_file.write(text_1) # write string version of variable above\ntext_file.write(\"\\n\\n\") # Add two lines of blank text at end of every section text\ntext_file.close() # Close file\n\n#### Section D: Run Recursive Feature Selection \n\n### Step 7: Run Recursive Feature Selection\n\n## Join Forest and Fator Analysis\ndf_join = pd.merge(df_gini, df_pc, on = \"Features\", how = \"inner\") # Join by column while keeping only items that exist in both, select outer or left for other options\ndf_features = df_join[\"Features\"] # Save features from data frame\nfeatures = df_features.tolist() # Convert to list\n\n## Setup Predictors and RFE\ndf_rfe = df_nev[features] # Add selected features to df\ndf_rfe[\"outcome\"] = df_nev[\"outcome\"] # Add outcome to RFE df\ndf_rfe = df_rfe.dropna() # Drop all columns with NA\nX = df_rfe[df_features] # Save features columns as predictor data frame\nY = df_rfe[\"outcome\"] # Use outcome data frame \nLog_RFE = LogisticRegression(solver = \"liblinear\", max_iter = 10000) # Use regression coefficient as estimator\nselector = rfe(estimator = Log_RFE, step = 1, min_features_to_select = 1) # define selection parameters, in this case all features are selected. See Readme for more ifo\n\n## Run Recursive Feature Selection\nselected = selector.fit(X, Y) # This will take time\n\n## Output RFE results\nar_rfe = selected.support_ # Save Boolean values as numpy array\nl_rfe = list(zip(X, ar_rfe)) # Create list of variables alongside RFE value \ndf_rfe = pd.DataFrame(l_rfe, columns = [\"Features\", \"RFE\"]) # Create data frame of importances with variables and gini column names\ndf_rfe = df_rfe[df_rfe.RFE == True] # Select Variables that were True\ndf_rfe = df_rfe.drop(columns = [\"RFE\"]) # Drop Unwanted Columns\n\n## Verify\ndf_rfe.info() # Get class, memory, and column info: names, data types, obs.\ndf_rfe.head() # Print first 5 observations\n\n## Write Summary to Text File\ntext_1 = df_rfe.to_string() # Save variable as string value for input below\ntext_file = open(\"Neville/neville_nhanes_crc_results.txt\", \"a\") # Open text file and name with subproject, content, and result suffix\ntext_file.write(\"\\nRecursive Feature Selection\\n\") # Line of text with space after\ntext_file.write(\"\\nSelected Features by Cross-Validation\\n\") # Line of text with space after\ntext_file.write(text_1) # write string version of variable above\ntext_file.write(\"\\n\\n\") # Add two lines of blank text at end of every section text\ntext_file.close() # Close file\n\n#### Section E: Build Regression Model to Evaluate Selected Features\n\n### Step 8: Logistic Regression Model\n\n## Save selected Features to list\nfeatures = list(df_rfe[\"Features\"]) # Save chosen featres as list\ndf_log = df_nev.filter(features) # Keep only selected columns from rfe\ndf_log[\"outcome\"] = df_nev[\"outcome\"] # Add outcome variable\n\n## Logisitc Regression in Scikit Learn\nx = df_log[features] # features as x\ny = df_log[\"outcome\"] # Save outcome variable as y\nLog = LogisticRegression(solver = \"liblinear\")\nmodel_log = Log.fit(x, y) # Fit model\nscore_log = model_log.score(x, y) # rsq value\ncoef_log = model_log.coef_ # Coefficient models as scipy array\nlogfeatures = df_log[features].columns # Get columns from features df\n\n## Output Coefficients\ndf_logfeatures = pd.DataFrame(logfeatures) # Convert list to data frame\ndf_logcoef = pd.DataFrame(coef_log) # Convert array to data frame\ndf_logcoef = df_logcoef.transpose() # Transpose Rows and Columns\ndf_logcoef = df_logcoef.reset_index() # Reset index and save as column\ndf_logfeatures = df_logfeatures.reset_index() # Reset index and save as column\ndf_logfeatures = df_logfeatures.rename(columns = {0: \"Variable\"}) # Rename column\ndf_logcoef = df_logcoef.rename(columns = {0: \"Coefficient\"}) # Rename column\ndf_score = pd.merge(df_logfeatures, df_logcoef, on = \"index\", how = \"inner\") # Join by column while keeping only items that exist in both, select outer or left for other options\ndf_score[\"Score\"] = round((df_score[\"Coefficient\"] * 100), 1)\ndf_score = df_score.drop(columns = [\"index\", \"Coefficient\"]) # Remove outcome variable\ndf_score = df_score.sort_values(by = [\"Score\"], ascending = False) # Sort data frame by gini value in desceding order\n\n## Verify\ndf_score.info() # Get class, memory, and column info: names, data types, obs.\ndf_score.head() # Print first 5 observations\n\n## Write Summary to Text File\ntext_1 = str(df_score) # Save variable as string value for input below\ntext_2 = str(score_log) # Save variable as string value for input below\ntext_file = open(\"Neville/neville_nhanes_crc_results.txt\", \"a\") # Open text file and name with subproject, content, and result suffix\ntext_file.write(\"\\nRegression Model\\n\") # Line of text with space after\ntext_file.write(\"\\nCoefficient Scores\\n\") # Line of text with space after\ntext_file.write(text_1) # write string version of variable above\ntext_file.write(\"\\n\\nR sq = \") # Line of text with space after\ntext_file.write(text_2) # write string version of variable above\ntext_file.write(\"\\n\\n\") # Add two lines of blank text at end of every section text\ntext_file.write(\"THE END\")\ntext_file.close() # Close file\n\n## Write to CSV\ndf_score.to_csv(r\"Neville/neville_nhanes_crc_results.csv\") # Clean in excel and select variable\n\nprint(\"THE END\")\n#### End Script\n\n" ]
[ [ "pandas.merge", "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "sklearn.ensemble.RandomForestClassifier", "sklearn.impute.SimpleImputer", "pandas.DataFrame", "numpy.std", "numpy.mean", "sklearn.feature_selection.RFECV", "sklearn.preprocessing.StandardScaler", "numpy.where", "sklearn.decomposition.PCA" ] ]
pchandrasekaran1595/Female-and-Male-Eyes
[ "5b5e98e7dafb83eb91822e9e272f148918e6ec8c" ]
[ "build_dataloaders.py" ]
[ "import os\nimport torch\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader as DL\nfrom sklearn.model_selection import train_test_split\n\nimport utils as u\n\n#####################################################################################################\n\nclass DS(Dataset):\n def __init__(self, X=None, y=None, mode=None, transform=None):\n self.mode = mode\n self.transform = transform\n self.X = X\n if self.mode == \"train\" or self.mode == \"valid\":\n self.y = y\n \n def __len__(self):\n return self.X.shape[0]\n \n def __getitem__(self, idx):\n if self.mode == \"train\" or self.mode == \"valid\":\n return self.transform(self.X[idx]), torch.FloatTensor(self.y[idx])\n elif self.mode == \"test\":\n return self.transform(self.X[idx])\n else:\n raise ValueError(\"Invalid Argument passed to 'mode'\")\n\n#####################################################################################################\n\ndef build(path=None, batch_size=None, do_full=None, do_augment=None):\n assert(isinstance(path, str))\n assert(isinstance(batch_size, int))\n\n u.unzip(path, do_full)\n\n m_images_path, f_images_path = os.path.join(u.DATA_PATH, \"Male\"), os.path.join(u.DATA_PATH, \"Female\")\n \n m_labels, f_labels = np.zeros((len(os.listdir(m_images_path)), 1)), np.ones((len(os.listdir(f_images_path)), 1))\n m_images, f_images = u.get_images(m_images_path, size=u.PRETRAINED_SIZE), u.get_images(f_images_path, size=u.PRETRAINED_SIZE)\n labels = np.concatenate((m_labels, f_labels), axis=0)\n images = np.concatenate((m_images, f_images), axis=0)\n\n tr_images, va_images, tr_labels, va_labels = train_test_split(images, labels, test_size=0.2, shuffle=True, random_state=u.SEED)\n\n del images, labels\n\n if do_augment:\n augment = u.get_augment(seed=u.SEED)\n tr_images = augment(images=tr_images)\n \n tr_data_setup = DS(X=tr_images, y=tr_labels, mode=\"train\", transform=u.FEA_TRANSFORM)\n va_data_setup = DS(X=va_images, y=va_labels, mode=\"valid\", transform=u.FEA_TRANSFORM)\n\n tr_data = DL(tr_data_setup, batch_size=batch_size, shuffle=True, generator=torch.manual_seed(u.SEED))\n va_data = DL(va_data_setup, batch_size=batch_size, shuffle=False)\n\n dataloaders = {\"train\" : tr_data, \"valid\" : va_data}\n return dataloaders\n\n#####################################################################################################\n" ]
[ [ "torch.manual_seed", "torch.utils.data.DataLoader", "sklearn.model_selection.train_test_split", "numpy.concatenate", "torch.FloatTensor" ] ]
thu-coai/NAST
[ "ef765d412f6e9a2ebdcc7d62c99ec2e883d0e17a" ]
[ "eval/classifier/cls.py" ]
[ "import numpy as np\nimport logging\nimport time\nimport os\nfrom itertools import chain\n\nimport torch\nfrom torch import nn\n\nfrom utils import Storage, BaseModel, SummaryHelper, storage_to_list, CheckpointManager, RAdam\n\nfrom .model import Network\n\nclass Classifier(BaseModel):\n\tdef __init__(self, param):\n\t\targs = param.args\n\t\tnet = Network(param)\n\t\tself.optimizer = RAdam(net.get_parameters_by_name(\"train\", silent=True), lr=args.lr, weight_decay=1e-3)\n\t\toptimizerList = {\"optimizer\": self.optimizer}\n\t\tcheckpoint_manager = CheckpointManager(args.name, args.model_dir, \\\n\t\t\t\t\t\targs.checkpoint_steps, args.checkpoint_max_to_keep, \"max\")\n\n\t\tsuper().__init__(param, net, optimizerList, checkpoint_manager)\n\n\t\tself.create_summary()\n\n\tdef create_summary(self):\n\t\targs = self.param.args\n\t\tself.summaryHelper = SummaryHelper(\"%s/%s_%s\" % \\\n\t\t\t\t(args.log_dir, args.name, time.strftime(\"%H%M%S\", time.localtime())), \\\n\t\t\t\targs)\n\t\tself.trainSummary = self.summaryHelper.addGroup(\\\n\t\t\tscalar=[\"loss\", \"acc\", \"acc0\", \"acc1\", \"f1\", \"acc_ori\", \"acc0_ori\", \"acc1_ori\", \"f1_ori\", \"acc_ref\", \"acc0_ref\", \"acc1_ref\", \"f1_ref\"],\\\n\t\t\tprefix=\"train\")\n\n\t\tscalarlist = [\"loss\", \"acc\", \"acc0\", \"acc1\", \"f1\", \"acc_ori\", \"acc0_ori\", \"acc1_ori\", \"f1_ori\", \"acc_ref\", \"acc0_ref\", \"acc1_ref\", \"f1_ref\"]\n\t\ttensorlist = []\n\t\ttextlist = []\n\t\temblist = []\n\t\tfor i in self.args.show_sample:\n\t\t\ttextlist.append(\"show_str%d\" % i)\n\t\tself.devSummary = self.summaryHelper.addGroup(\\\n\t\t\tscalar=scalarlist,\\\n\t\t\ttensor=tensorlist,\\\n\t\t\ttext=textlist,\\\n\t\t\tembedding=emblist,\\\n\t\t\tprefix=\"dev\")\n\t\tself.testSummary = self.summaryHelper.addGroup(\\\n\t\t\tscalar=scalarlist,\\\n\t\t\ttensor=tensorlist,\\\n\t\t\ttext=textlist,\\\n\t\t\tembedding=emblist,\\\n\t\t\tprefix=\"test\")\n\n\tdef _preprocess_batch(self, data):\n\t\tincoming = Storage()\n\t\tincoming.data = data = Storage(data)\n\n\t\tdata.sent = torch.tensor(data.sent, dtype=torch.long, device=self.args.cuda)\n\t\tdata.domain = torch.tensor(data.domain, dtype=torch.long, device=self.args.cuda)\n\t\tdata.adv = torch.tensor(data.adv, dtype=torch.long, device=self.args.cuda)\n\t\tdata.sent_length = np.array(data.sent_length)\n\n\t\treturn incoming\n\n\tdef get_next_batch(self, dm, key, restart=True):\n\t\tdata = dm.get_next_batch(key)\n\t\tif data is None:\n\t\t\tif restart:\n\t\t\t\tdm.restart(key)\n\t\t\t\treturn self.get_next_batch(dm, key, False)\n\t\t\telse:\n\t\t\t\treturn None\n\t\treturn self._preprocess_batch(data)\n\n\tdef train(self, batch_num):\n\t\targs = self.param.args\n\t\tdm = self.param.volatile.dm\n\t\tdatakey = 'train'\n\t\tdm.restart('train', args.batch_size)\n\n\t\tfor i in range(batch_num):\n\t\t\tself.now_batch += 1\n\n\t\t\tself.zero_grad()\n\t\t\tincoming = self.get_next_batch(dm, datakey)\n\t\t\tincoming.args = Storage()\n\t\t\tself.net.forward(incoming)\n\t\t\tincoming.result.loss.backward()\n\t\t\tnn.utils.clip_grad_norm_(self.net.parameters(), args.grad_clip)\n\t\t\tself.optimizer.step()\n\n\t\t\t# incoming.result.lr = self.optimizer.param_groups[0]['lr']\n\t\t\tself.trainSummary(self.now_batch, storage_to_list(incoming.result))\n\t\t\tlogging.info(\"batch %d : loss=%f\", self.now_batch, incoming.result.loss)\n\n\tdef predict_str(self, sent_str):\n\t\tincoming = Storage()\n\t\tincoming.data = data = Storage()\n\t\tdata.batch_size = len(sent_str)\n\t\tdata.sent_str = sent_str\n\t\tdm = self.param.volatile.dm\n\t\tdata.domain = torch.tensor(np.array([0 for _ in range(data.batch_size)]), dtype=torch.long, device=self.args.cuda)\n\n\t\tself.net.eval()\n\t\tself.net.forward(incoming)\n\n\t\treturn incoming.result.predict.detach().cpu().numpy()\n\n\tdef evaluate(self, key):\n\t\targs = self.param.args\n\t\tdm = self.param.volatile.dm\n\n\t\tdm.restart(key, args.batch_size, shuffle=False)\n\n\t\tanswer = []\n\t\tpredict = []\n\t\tadv = []\n\n\t\twhile True:\n\t\t\tincoming = self.get_next_batch(dm, key, restart=False)\n\t\t\tif incoming is None:\n\t\t\t\tbreak\n\t\t\tincoming.args = Storage()\n\n\t\t\twith torch.no_grad():\n\t\t\t\tself.net.forward(incoming)\n\n\t\t\t\tnow_answer = incoming.data.domain.detach().cpu().numpy()\n\t\t\t\tnow_predict = incoming.result.predict.detach().cpu().numpy()\n\t\t\t\tnow_adv = incoming.data.adv.detach().cpu().numpy()\n\n\t\t\t\tanswer.append(now_answer)\n\t\t\t\tpredict.append(now_predict)\n\t\t\t\tadv.append(now_adv)\n\n\t\tdef calcacc(answer, predict):\n\t\t\tacc = np.mean(answer == predict)\n\t\t\tacc0 = np.sum((answer == 0) * (predict == 0)) / np.sum(answer == 0)\n\t\t\tacc1 = np.sum((answer == 1) * (predict == 1)) / np.sum(answer == 1)\n\t\t\tf1 = acc0 * acc1 * 2 / (acc0 + acc1 + 1e-10)\n\t\t\treturn acc, acc0, acc1, f1\n\n\t\tanswer = np.concatenate(answer, axis=0)\n\t\tpredict = np.concatenate(predict, axis=0)\n\t\tadv = np.concatenate(adv, axis=0)\n\n\t\tdetail_arr = Storage()\n\t\tdetail_arr[\"acc\"], detail_arr[\"acc0\"], detail_arr[\"acc1\"], detail_arr[\"f1\"] = calcacc(answer, predict)\n\t\tanswer_ori = answer[adv == 0]\n\t\tpredict_ori = predict[adv == 0]\n\t\tdetail_arr[\"acc_ori\"], detail_arr[\"acc0_ori\"], detail_arr[\"acc1_ori\"], detail_arr[\"f1_ori\"] = calcacc(answer_ori, predict_ori)\n\t\tanswer_ref = answer[adv == 1]\n\t\tpredict_ref = predict[adv == 1]\n\t\tdetail_arr[\"acc_ref\"], detail_arr[\"acc0_ref\"], detail_arr[\"acc1_ref\"], detail_arr[\"f1_ref\"] = calcacc(answer_ref, predict_ref)\n\n\t\treturn detail_arr\n\n\tdef train_process(self):\n\t\targs = self.param.args\n\t\tdm = self.param.volatile.dm\n\n\t\twhile self.now_epoch < args.epochs:\n\t\t\tself.now_epoch += 1\n\t\t\tself.updateOtherWeights()\n\n\t\t\tself.net.train()\n\t\t\tself.train(args.batch_per_epoch)\n\n\t\t\tself.net.eval()\n\t\t\t# devloss_detail = self.evaluate(\"dev\")\n\t\t\t# self.devSummary(self.now_batch, devloss_detail)\n\t\t\t# logging.info(\"epoch %d, evaluate dev\", self.now_epoch)\n\n\t\t\ttestloss_detail = self.evaluate(\"test\")\n\t\t\tself.testSummary(self.now_batch, testloss_detail)\n\t\t\tlogging.info(\"epoch %d, evaluate test\", self.now_epoch)\n\n\t\t\t#self.lr_scheduler.step(devloss_detail.geo)\n\t\t\tself.save_checkpoint(value=testloss_detail.f1)\n\n\tdef test(self, key):\n\t\targs = self.param.args\n\t\tdm = self.param.volatile.dm\n\n\t\ttestloss_detail = self.evaluate(\"test\")\n\t\tself.testSummary(self.now_batch, testloss_detail)\n\t\tlogging.info(\"epoch %d, evaluate test\", self.now_epoch)\n\n\tdef test_process(self):\n\t\tlogging.info(\"Test Start.\")\n\t\tself.net.eval()\n\t\t#self.test(\"dev\")\n\t\tself.test(\"test\")\n\t\tlogging.info(\"Test Finish.\")\n" ]
[ [ "torch.tensor", "numpy.concatenate", "torch.no_grad", "numpy.mean", "numpy.array", "numpy.sum" ] ]
timestocome/RaspberryPi-Robot
[ "b10d25cbfe2f7a60b82649503ea18213bdfd0f66" ]
[ "RobotBrain/BlackRobot_SARSA_Trace.py" ]
[ "# http://github.com/timestocome\n\n\n# train a raspberry pi robot to wander the house while avoiding obstacles\n# and looking for cats\n\n# this robot uses wheels for steering\n# 4 wheel drive with separate controls each side\n\n\n# change from off policy learning in first try\n# adapted from https://morvanzhou.github.io/tutorials/\n\n\nimport numpy as np\nfrom pathlib import Path\n\n\n\nfrom FindCats import FindCats\nfrom FindDistance import FindDistance\nfrom MoveBlackRobot import MoveRobot\n\n\nimport datetime\n\n\n# init\ncat_finder = FindCats()\nmin_cat = cat_finder.min_cat\nmerlin = cat_finder.merlin\nno_cat = cat_finder.no_cat\n\ndistance_finder = FindDistance()\nmax_distance = distance_finder.max_distance\n\n\nmoveRobot = MoveRobot()\nactions = moveRobot.actions\n\nqTable = 'qTable2.npy'\nepsilon = 1.0\n\n\n# robot environment\ndef get_distance():\n # returns distance 1-50\n distance = distance_finder.get_distance()\n \n if distance < 1: distance = 1\n return int(distance)\n\n\ndef get_cat():\n # returns 0 for Min, 1 for Merlin, 2 for no cat\n cat = cat_finder.is_cat()\n return np.argmax([cat[0][1], cat[1][1], cat[2][1]])\n\n\n\n# robot actions \ndef move(action, distance, cat):\n\n reward = 0.001\n buffer_distance = 12.\n \n # penalty for being too closes to an obstacle\n if distance <= buffer_distance: # buffer zone in cm\n reward -= buffer_distance \n \n # reward for locating cat\n if cat == merlin:\n reward += 10\n if cat == min_cat:\n reward += 10\n \n # this robot is more cautious, increasing forward reward to offset that \n # get reward for moving or robot will eventually park itself in middle of the room\n if action == 0: \n moveRobot.forward()\n reward += 5 # reward robot for covering distance\n elif action == 1: \n moveRobot.reverse()\n reward += 0.0 # discourage reverse, no sensors on back of robot\n elif action == 2: \n moveRobot.hard_left()\n reward += 1\n elif action == 3: \n moveRobot.hard_right()\n reward += 1\n '''\n # robot's wheels were slipping too much for these manouvers \n elif action == 4: \n moveRobot.turn_left()\n reward += 1\n elif action == 5: \n moveRobot.turn_right()\n reward += 1\n '''\n\n #print(\"state %d %d, action %d, reward %d epsilon %lf\" % (distance, cat, action, reward, epsilon))\n\n return reward \n\n\n\n\n\n\n###############################################################################\n # q learning happens here\n###############################################################################\n\nn_distance_states = max_distance + 1\nn_cat_states = 3\nn_actions = len(actions)\n\n# training vars\nlr = 0.01 # learning rate\ngamma = 0.9 # memory (gamma^n_steps)\nn_loops = 500 # training loops to perform\n\n\n\n# init new table\ndef init_q_table(n_distance_states, n_cat_states, n_actions):\n \n table = np.zeros((n_distance_states, n_cat_states, n_actions))\n return table\n\n\n\n# load saved table from file\ndef load_q_table():\n \n t_1d = np.load(qTable)\n table = t_1d.reshape(n_distance_states, n_cat_states, n_actions)\n \n return table\n\n\n\n# write table to disk\ndef save_q_table(t):\n \n t_1d = t.reshape(n_distance_states * n_cat_states * n_actions, 1)\n np.save(qTable, t_1d)\n \n\n\ndef choose_action(d, c, q_table):\n\n global epsilon\n state_actions = q_table[d][c][:]\n\n # random move or no data recorded for this state yet\n if (np.random.uniform() < epsilon) or (np.sum(state_actions) == 0):\n \n action_chose = np.random.randint(n_actions)\n \n # decrease random moves over time to a minimum of 10%\n if epsilon > 0.1: epsilon *= 0.9\n \n else:\n action_chose = state_actions.argmax()\n \n return action_chose\n\n\ndef rl():\n \n # init new table if none found\n saved_table = Path(qTable)\n if saved_table.is_file():\n q_table = load_q_table()\n else:\n q_table = init_q_table(n_distance_states, n_cat_states, n_actions)\n\n \n n_steps = 0\n \n \n # http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf pg 14+\n # trace_decay of 0 == temporal difference learning\n # trace_decay of 1 == better monte carlo learning\n trace_decay = 0.9 # backward looking\n eligibility_trace = np.zeros((n_distance_states, n_cat_states, n_actions))\n \n # prime loop with first action\n d = get_distance()\n c = get_cat()\n a = choose_action(d, c, q_table)\n start_time = datetime.datetime.now()\n \n \n while n_steps < n_loops:\n\n # move robot and update state\n reward = move(a, d, c)\n d_next = get_distance()\n c_next = get_cat()\n \n \n # chose action based on next observation\n a_next = choose_action(d_next, c_next, q_table)\n \n # SARSA learning\n s_target = reward + gamma + q_table[d_next][c_next][a_next]\n \n \n # what robot thought would happen next\n s_predict = q_table[d][c][a]\n \n \n # update eligibility trace\n eligibility_trace[d, c, a] += 1\n \n # update q_table to reflect new knowledge\n error = s_target - s_predict\n q_table[d][c][a] += lr * error * eligibility_trace[d][c][a]\n \n \n # decay eligibility trace\n eligibility_trace *= gamma * trace_decay\n \n # update state for next loop\n d = d_next\n c = c_next\n a = a_next\n \n n_steps += 1\n \n # save data every 100 steps incase of failure\n if n_steps % 100 == 0:\n save_q_table(q_table)\n print(datetime.datetime.now() - start_time)\n start_time = datetime.datetime.now()\n \n \n return q_table\n\n\n\n\n###############################################################################\n# clean shut down of hardware\n###############################################################################\ndef cleanup():\n \n cat_finder.cleanup()\n distance_finder.cleanup()\n moveRobot.cleanup()\n\n\n\n\n\n\n###############################################################################\n# run code\n###############################################################################\n\nq_table = rl()\n\ncleanup()\n\n\n'''\n#q_table = load_q_table()\nprint('--------------------------------')\nprint('Final Q Table')\n\nfor i in range(n_distance_states):\n for j in range(n_cat_states):\n print('distance %d, cat %d' %(i, j))\n print('action values', s_table[i, j, :])\n \n'''\n\n\n\n# print actions by distance by cat\nz = np.zeros(n_distance_states)\nfor i in range(n_distance_states):\n for j in range(n_cat_states):\n if j == 2: # no cat\n z[i] = np.argmax(q_table[i, j, :])\n \nprint('distance, action')\nfor i in range(len(z)):\n a = int(z[i])\n print(i, actions[a])\n \n \n\n" ]
[ [ "numpy.save", "numpy.random.uniform", "numpy.argmax", "numpy.load", "numpy.zeros", "numpy.sum", "numpy.random.randint" ] ]
sadrayan/vote-roll-call
[ "3c19ef3213fcc10339159ae29f9d8d2fb5b4cb2a" ]
[ "ConvMF/text_analysis/models.py" ]
[ "'''\nCreated on Dec 8, 2015\n\n@author: donghyun\n'''\nimport numpy as np\nnp.random.seed(1337)\n\nfrom keras.callbacks import EarlyStopping\nfrom keras.layers.containers import Sequential\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\nfrom keras.layers.core import Reshape, Flatten, Dropout, Dense\nfrom keras.layers.embeddings import Embedding\nfrom keras.models import Graph\nfrom keras.preprocessing import sequence\n\n\nclass CNN_module():\n '''\n classdocs\n '''\n batch_size = 128\n # More than this epoch cause easily over-fitting on our data sets\n nb_epoch = 5\n\n def __init__(self, output_dimesion, vocab_size, dropout_rate, emb_dim, max_len, nb_filters, init_W=None):\n\n self.max_len = max_len\n max_features = vocab_size\n vanila_dimension = 200\n projection_dimension = output_dimesion\n\n filter_lengths = [3, 4, 5]\n self.model = Graph()\n\n '''Embedding Layer'''\n self.model.add_input(name='input', input_shape=(max_len,), dtype=int)\n\n if init_W is None:\n self.model.add_node(Embedding(\n max_features, emb_dim, input_length=max_len), name='sentence_embeddings', input='input')\n else:\n self.model.add_node(Embedding(max_features, emb_dim, input_length=max_len, weights=[\n init_W / 20]), name='sentence_embeddings', input='input')\n\n '''Convolution Layer & Max Pooling Layer'''\n for i in filter_lengths:\n model_internal = Sequential()\n model_internal.add(\n Reshape(dims=(1, self.max_len, emb_dim), input_shape=(self.max_len, emb_dim)))\n model_internal.add(Convolution2D(\n nb_filters, i, emb_dim, activation=\"relu\"))\n model_internal.add(MaxPooling2D(\n pool_size=(self.max_len - i + 1, 1)))\n model_internal.add(Flatten())\n\n self.model.add_node(model_internal, name='unit_' +\n str(i), input='sentence_embeddings')\n\n '''Dropout Layer'''\n self.model.add_node(Dense(vanila_dimension, activation='tanh'),\n name='fully_connect', inputs=['unit_' + str(i) for i in filter_lengths])\n self.model.add_node(Dropout(dropout_rate),\n name='dropout', input='fully_connect')\n '''Projection Layer & Output Layer'''\n self.model.add_node(Dense(projection_dimension, activation='tanh'),\n name='projection', input='dropout')\n\n # Output Layer\n self.model.add_output(name='output', input='projection')\n self.model.compile('rmsprop', {'output': 'mse'})\n\n def load_model(self, model_path):\n self.model.load_weights(model_path)\n\n def save_model(self, model_path, isoverwrite=True):\n self.model.save_weights(model_path, isoverwrite)\n\n def qualitative_CNN(self, vocab_size, emb_dim, max_len, nb_filters):\n self.max_len = max_len\n max_features = vocab_size\n\n filter_lengths = [3, 4, 5]\n print(\"Build model...\")\n self.qual_model = Graph()\n self.qual_conv_set = {}\n '''Embedding Layer'''\n self.qual_model.add_input(\n name='input', input_shape=(max_len,), dtype=int)\n\n self.qual_model.add_node(Embedding(max_features, emb_dim, input_length=max_len, weights=self.model.nodes['sentence_embeddings'].get_weights()),\n name='sentence_embeddings', input='input')\n\n '''Convolution Layer & Max Pooling Layer'''\n for i in filter_lengths:\n model_internal = Sequential()\n model_internal.add(\n Reshape(dims=(1, max_len, emb_dim), input_shape=(max_len, emb_dim)))\n self.qual_conv_set[i] = Convolution2D(nb_filters, i, emb_dim, activation=\"relu\", weights=self.model.nodes[\n 'unit_' + str(i)].layers[1].get_weights())\n model_internal.add(self.qual_conv_set[i])\n model_internal.add(MaxPooling2D(pool_size=(max_len - i + 1, 1)))\n model_internal.add(Flatten())\n\n self.qual_model.add_node(\n model_internal, name='unit_' + str(i), input='sentence_embeddings')\n self.qual_model.add_output(\n name='output_' + str(i), input='unit_' + str(i))\n\n self.qual_model.compile(\n 'rmsprop', {'output_3': 'mse', 'output_4': 'mse', 'output_5': 'mse'})\n\n def train(self, X_train, V, item_weight, seed):\n X_train = sequence.pad_sequences(X_train, maxlen=self.max_len)\n np.random.seed(seed)\n X_train = np.random.permutation(X_train)\n np.random.seed(seed)\n V = np.random.permutation(V)\n np.random.seed(seed)\n item_weight = np.random.permutation(item_weight)\n\n print(\"Train...CNN module\")\n history = self.model.fit({'input': X_train, 'output': V},\n verbose=0, batch_size=self.batch_size, nb_epoch=self.nb_epoch, sample_weight={'output': item_weight})\n\n # cnn_loss_his = history.history['loss']\n # cmp_cnn_loss = sorted(cnn_loss_his)[::-1]\n # if cnn_loss_his != cmp_cnn_loss:\n # self.nb_epoch = 1\n return history\n\n def get_projection_layer(self, X_train):\n X_train = sequence.pad_sequences(X_train, maxlen=self.max_len)\n Y = self.model.predict(\n {'input': X_train}, batch_size=len(X_train))['output']\n return Y\n" ]
[ [ "numpy.random.permutation", "numpy.random.seed" ] ]
agencyenterprise/mne-nirs
[ "e436c295fa6be7fe0d8d1c1475c60ba0a98d6118" ]
[ "mne_nirs/io/snirf/_snirf.py" ]
[ "# Authors: Robert Luke <mail@robertluke.net>\n#\n# License: BSD (3-clause)\n\nimport h5py as h5py\nimport re\nimport numpy as np\nfrom mne.io.pick import _picks_to_idx\n\n\ndef write_raw_snirf(raw, fname):\n \"\"\"Write continuous wave data to disk in SNIRF format.\n\n Parameters\n ----------\n raw : instance of Raw\n Data to write to file. Must contain only `fnirs_cw_amplitude` type.\n fname : str\n Path to the SNIRF data file.\n \"\"\"\n\n picks = _picks_to_idx(raw.info, 'fnirs_cw_amplitude', exclude=[])\n assert len(picks) == len(raw.ch_names), \"Data must be fnirs_cw_amplitude\"\n\n # Reordering channels\n num_chans = len(raw.ch_names)\n raw = raw.copy()\n raw.pick(picks=list(range(num_chans)[0::2]) + list(range(num_chans)[1::2]))\n\n with h5py.File(fname, \"w\") as f:\n f.create_dataset(\"nirs/data1/measurementList1/dataType\", data=1)\n f.create_dataset(\"/nirs/data1/dataTimeSeries\", data=raw.get_data().T)\n f.create_dataset(\"/nirs/data1/time\", data=raw.times)\n\n # Store measurement and birth date\n datestr = raw.info[\"meas_date\"].strftime(\"%Y-%m-%d\")\n timestr = raw.info[\"meas_date\"].strftime(\"%H:%M:%SZ\")\n birthstr = '{0:02d}-{1:02d}-{2:02d}'.format(\n raw.info[\"subject_info\"]['birthday'][0],\n raw.info[\"subject_info\"]['birthday'][1],\n raw.info[\"subject_info\"]['birthday'][2])\n f.create_dataset(\"nirs/metaDataTags/\"\n \"MeasurementDate\", data=[datestr.encode('UTF-8')])\n f.create_dataset(\"nirs/metaDataTags/\"\n \"MeasurementTime\", data=[timestr.encode('UTF-8')])\n f.create_dataset(\"nirs/metaDataTags/\"\n \"DateOfBirth\", data=[birthstr.encode('UTF-8')])\n\n # Extract info from file names\n rgx = r'S(\\d+)_D(\\d+) (\\d+)'\n chs = raw.info['chs']\n sources = [float(re.match(rgx, r['ch_name']).groups()[0])\n for r in chs]\n detectors = [float(re.match(rgx, r['ch_name']).groups()[1])\n for r in chs]\n wavelengths = [float(re.match(rgx, r['ch_name']).groups()[2])\n for r in chs]\n\n # Create info summary and recode\n sources_sorted = np.sort(np.unique(sources))\n detectors_sorted = np.sort(np.unique(detectors))\n wavelengths_sorted = np.sort(np.unique(wavelengths))\n sources_sorted = [str(int(src)).encode('UTF-8')\n for src in sources_sorted]\n detectors_sorted = [str(int(det)).encode('UTF-8')\n for det in detectors_sorted]\n wavelengths_sorted = [str(wve).encode('UTF-8')\n for wve in wavelengths_sorted]\n\n # Store source/detector/wavelength info\n f.create_dataset(\"nirs/probe/sourceLabels\",\n data=[('S'.encode('UTF-8') + src)\n for src in sources_sorted])\n f.create_dataset(\"nirs/probe/detectorLabels\",\n data=[('D'.encode('UTF-8') + det)\n for det in detectors_sorted])\n f.create_dataset(\"nirs/probe/wavelengths\",\n data=[float(wve)\n for wve in wavelengths_sorted])\n\n # Create 3d locs and store\n srclocs = np.empty((len(np.unique(sources_sorted)), 3))\n detlocs = np.empty((len(np.unique(detectors_sorted)), 3))\n for i, src in enumerate(sources_sorted):\n idx = sources.index(float(src))\n srclocs[i, :] = raw.info['chs'][idx]['loc'][3:6]\n for i, det in enumerate(detectors_sorted):\n idx = detectors.index(float(det))\n detlocs[i, :] = raw.info['chs'][idx]['loc'][6:9]\n f.create_dataset(\"nirs/probe/sourcePos3D\", data=srclocs)\n f.create_dataset(\"nirs/probe/detectorPos3D\", data=detlocs)\n f.create_dataset(\"nirs/metaDataTags/LengthUnit\",\n data=['m'.encode('UTF-8')])\n\n # Prep data for storing each MNE channel as SNIRF measurementList\n channels = [\"measurementList\" + str(idx + 1)\n for idx in range(len(raw.ch_names))]\n sources = np.array([float(src) for src in sources])\n detectors = np.array([float(det) for det in detectors])\n sources_sorted = [float(src) for src in sources_sorted]\n detectors_sorted = [float(det) for det in detectors_sorted]\n wavelengths_sorted = [float(wve) for wve in wavelengths_sorted]\n w = [float(wve) for wve in wavelengths]\n wavelengths_index = [wavelengths_sorted.index(wve) + 1 for wve in w]\n\n for idx, ch in enumerate(channels):\n f.create_dataset('nirs/data1/' + ch + '/sourceIndex',\n data=[sources_sorted.index(sources[idx]) + 1])\n f.create_dataset('nirs/data1/' + ch + '/detectorIndex',\n data=[detectors_sorted.index(detectors[idx]) + 1])\n f.create_dataset('nirs/data1/' + ch + '/wavelengthIndex',\n data=[wavelengths_index[idx]])\n\n # Store demographic info\n subject_id = raw.info[\"subject_info\"]['first_name']\n f.create_dataset(\"nirs/metaDataTags/SubjectID\",\n data=[subject_id.encode('UTF-8')])\n\n # Convert MNE annotations to SNIRF stims\n for desc in np.unique(raw.annotations.description):\n key = \"stim\" + desc\n trgs = np.where(raw.annotations.description == desc)[0]\n stims = np.zeros((len(trgs), 3))\n for idx, trg in enumerate(trgs):\n stims[idx, :] = [raw.annotations.onset[trg], 5.0,\n raw.annotations.duration[trg]]\n f.create_dataset('/nirs/' + key + '/data', data=stims)\n\n # Store probe landmarks\n if raw.info['dig'] is not None:\n diglocs = np.empty((len(raw.info['dig']), 3))\n digname = list()\n for idx, dig in enumerate(raw.info['dig']):\n ident = re.match(r\"\\d+ \\(FIFFV_POINT_(\\w+)\\)\",\n str(dig.get(\"ident\")))\n if ident is not None:\n digname.append(ident[1])\n else:\n digname.append(str(dig.get(\"ident\")))\n diglocs[idx, :] = dig.get(\"r\")\n digname = [d.encode('UTF-8') for d in digname]\n f.create_dataset(\"nirs/probe/landmarkPos3D\", data=diglocs)\n f.create_dataset(\"nirs/probe/landmarkLabels\", data=digname)\n # Add non standard (but allowed) custom metadata tags\n f.create_dataset(\"nirs/metaDataTags/MNE_coordFrame\",\n data=[int(raw.info['dig'][0].get(\"coord_frame\"))])\n\n # Add non standard (but allowed) custom metadata tags\n if 'middle_name' in raw.info[\"subject_info\"]:\n mname = [raw.info[\"subject_info\"]['middle_name'].encode('UTF-8')]\n f.create_dataset(\"nirs/metaDataTags/middleName\", data=mname)\n if 'last_name' in raw.info[\"subject_info\"]:\n lname = [raw.info[\"subject_info\"]['last_name'].encode('UTF-8')]\n f.create_dataset(\"nirs/metaDataTags/lastName\", data=lname)\n if 'sex' in raw.info[\"subject_info\"]:\n sex = str(int(raw.info[\"subject_info\"]['sex'])).encode('UTF-8')\n f.create_dataset(\"nirs/metaDataTags/sex\", data=[sex])\n" ]
[ [ "numpy.where", "numpy.unique" ] ]
swilcock0/vs068_ikfast
[ "624ecccd72fb95489daa7f36a9fa612184a2809e" ]
[ "tests/utils.py" ]
[ "import numpy as np\n\ndef best_sol(sols, q_guess, weights, feasible_ranges):\n \"\"\"get the best solution based on UR's joint domain value and weighted joint diff\n modified from :\n https://github.com/ros-industrial/universal_robot/blob/kinetic-devel/ur_kinematics/src/ur_kinematics/test_analytical_ik.py\n\n \"\"\"\n num_jt = len(q_guess)\n valid_sols = []\n for sol in sols:\n test_sol = np.ones(num_jt)*9999.\n for i, jt_name in enumerate(feasible_ranges.keys()):\n for add_ang in [-2.*np.pi, 0, 2.*np.pi]:\n test_ang = sol[i] + add_ang\n if test_ang <= feasible_ranges[jt_name]['upper'] and \\\n test_ang >= feasible_ranges[jt_name]['lower'] and \\\n abs(test_ang - q_guess[i]) < abs(test_sol[i] - q_guess[i]):\n test_sol[i] = test_ang\n if np.all(test_sol != 9999.):\n valid_sols.append(test_sol)\n if len(valid_sols) == 0:\n return None\n best_sol_ind = np.argmin(np.sum((weights*(valid_sols - np.array(q_guess)))**2,1))\n return valid_sols[best_sol_ind]\n\n\ndef check_q(fk_fn, ik_fn, q, feasible_ranges, free_joint_ids=[], diff_tol=1e-3):\n pos, rot = fk_fn(q)\n if free_joint_ids:\n sols = ik_fn(pos, rot, [q[i] for i in free_joint_ids])\n else:\n sols = ik_fn(pos, rot)\n\n qsol = best_sol(sols, q, [1.]*len(q), feasible_ranges)\n if qsol is None:\n qsol = [999.]*len(q)\n diff = np.sum(np.abs(np.array(qsol) - q))\n if diff > diff_tol:\n print(np.array(sols))\n print('Best q:', qsol)\n print('Actual:', np.array(q))\n print('Diff L1 norm:', diff)\n print('Diff: ', q - qsol)\n print('Difdiv:', (q - qsol)/np.pi)\n # if raw_input() == 'q':\n # sys.exit()\n assert False\n\n" ]
[ [ "numpy.all", "numpy.array", "numpy.ones" ] ]
mathrho/Lasagne
[ "ddd44fddcd17603bfa16bd26c246a1cd4123f692" ]
[ "lasagne/utils.py" ]
[ "import numpy as np\n\nimport theano\nimport theano.tensor as T\n\n\ndef floatX(arr):\n \"\"\"Converts data to a numpy array of dtype ``theano.config.floatX``.\n\n Parameters\n ----------\n arr : array_like\n The data to be converted.\n\n Returns\n -------\n numpy ndarray\n The input array in the ``floatX`` dtype configured for Theano.\n If `arr` is an ndarray of correct dtype, it is returned as is.\n \"\"\"\n return np.asarray(arr, dtype=theano.config.floatX)\n\n\ndef shared_empty(dim=2, dtype=None):\n \"\"\"Creates empty Theano shared variable.\n\n Shortcut to create an empty Theano shared variable with\n the specified number of dimensions.\n\n Parameters\n ----------\n dim : int, optional\n The number of dimensions for the empty variable, defaults to 2.\n dtype : a numpy data-type, optional\n The desired dtype for the variable. Defaults to the Theano\n ``floatX`` dtype.\n\n Returns\n -------\n Theano shared variable\n An empty Theano shared variable of dtype ``dtype`` with\n `dim` dimensions.\n \"\"\"\n if dtype is None:\n dtype = theano.config.floatX\n\n shp = tuple([1] * dim)\n return theano.shared(np.zeros(shp, dtype=dtype))\n\n\ndef as_theano_expression(input):\n \"\"\"Wrap as Theano expression.\n\n Wraps the given input as a Theano constant if it is not\n a valid Theano expression already. Useful to transparently\n handle numpy arrays and Python scalars, for example.\n\n Parameters\n ----------\n input : number, numpy array or Theano expression\n Expression to be converted to a Theano constant.\n\n Returns\n -------\n Theano symbolic constant\n Theano constant version of `input`.\n \"\"\"\n if isinstance(input, theano.gof.Variable):\n return input\n else:\n try:\n return theano.tensor.constant(input)\n except Exception as e:\n raise TypeError(\"Input of type %s is not a Theano expression and \"\n \"cannot be wrapped as a Theano constant (original \"\n \"exception: %s)\" % (type(input), e))\n\n\ndef collect_shared_vars(expressions):\n \"\"\"Returns all shared variables the given expression(s) depend on.\n\n Parameters\n ----------\n expressions : Theano expression or iterable of Theano expressions\n The expressions to collect shared variables from.\n\n Returns\n -------\n list of Theano shared variables\n All shared variables the given expression(s) depend on, in fixed order\n (as found by a left-recursive depth-first search). If some expressions\n are shared variables themselves, they are included in the result.\n \"\"\"\n # wrap single expression in list\n if isinstance(expressions, theano.Variable):\n expressions = [expressions]\n # return list of all shared variables\n return [v for v in theano.gof.graph.inputs(reversed(expressions))\n if isinstance(v, theano.compile.SharedVariable)]\n\n\ndef one_hot(x, m=None):\n \"\"\"One-hot representation of integer vector.\n\n Given a vector of integers from 0 to m-1, returns a matrix\n with a one-hot representation, where each row corresponds\n to an element of x.\n\n Parameters\n ----------\n x : integer vector\n The integer vector to convert to a one-hot representation.\n m : int, optional\n The number of different columns for the one-hot representation. This\n needs to be strictly greater than the maximum value of `x`.\n Defaults to ``max(x) + 1``.\n\n Returns\n -------\n Theano tensor variable\n A Theano tensor variable of shape (``n``, `m`), where ``n`` is the\n length of `x`, with the one-hot representation of `x`.\n\n Notes\n -----\n If your integer vector represents target class memberships, and you wish to\n compute the cross-entropy between predictions and the target class\n memberships, then there is no need to use this function, since the function\n :func:`lasagne.objectives.categorical_crossentropy()` can compute the\n cross-entropy from the integer vector directly.\n\n \"\"\"\n if m is None:\n m = T.cast(T.max(x) + 1, 'int32')\n\n return T.eye(m)[T.cast(x, 'int32')]\n\n\ndef unique(l):\n \"\"\"Filters duplicates of iterable.\n\n Create a new list from l with duplicate entries removed,\n while preserving the original order.\n\n Parameters\n ----------\n l : iterable\n Input iterable to filter of duplicates.\n\n Returns\n -------\n list\n A list of elements of `l` without duplicates and in the same order.\n \"\"\"\n new_list = []\n seen = set()\n for el in l:\n if el not in seen:\n new_list.append(el)\n seen.add(el)\n\n return new_list\n\n\ndef as_tuple(x, N, t=None):\n \"\"\"\n Coerce a value to a tuple of given length (and possibly given type).\n\n Parameters\n ----------\n x : value or iterable\n N : integer\n length of the desired tuple\n t : type, optional\n required type for all elements\n\n Returns\n -------\n tuple\n ``tuple(x)`` if `x` is iterable, ``(x,) * N`` otherwise.\n\n Raises\n ------\n TypeError\n if `type` is given and `x` or any of its elements do not match it\n ValueError\n if `x` is iterable, but does not have exactly `N` elements\n \"\"\"\n try:\n X = tuple(x)\n except TypeError:\n X = (x,) * N\n\n if (t is not None) and not all(isinstance(v, t) for v in X):\n raise TypeError(\"expected a single value or an iterable \"\n \"of {0}, got {1} instead\".format(t.__name__, x))\n\n if len(X) != N:\n raise ValueError(\"expected a single value or an iterable \"\n \"with length {0}, got {1} instead\".format(N, x))\n\n return X\n\n\ndef compute_norms(array, norm_axes=None):\n \"\"\" Compute incoming weight vector norms.\n\n Parameters\n ----------\n array : ndarray\n Weight array.\n norm_axes : sequence (list or tuple)\n The axes over which to compute the norm. This overrides the\n default norm axes defined for the number of dimensions\n in `array`. When this is not specified and `array` is a 2D array,\n this is set to `(0,)`. If `array` is a 3D, 4D or 5D array, it is\n set to a tuple listing all axes but axis 0. The former default is\n useful for working with dense layers, the latter is useful for 1D,\n 2D and 3D convolutional layers.\n (Optional)\n\n Returns\n -------\n norms : 1D array\n 1D array of incoming weight vector norms.\n\n Examples\n --------\n >>> array = np.random.randn(100, 200)\n >>> norms = compute_norms(array)\n >>> norms.shape\n (200,)\n\n >>> norms = compute_norms(array, norm_axes=(1,))\n >>> norms.shape\n (100,)\n \"\"\"\n ndim = array.ndim\n\n if norm_axes is not None:\n sum_over = tuple(norm_axes)\n elif ndim == 2: # DenseLayer\n sum_over = (0,)\n elif ndim in [3, 4, 5]: # Conv{1,2,3}DLayer\n sum_over = tuple(range(1, ndim))\n else:\n raise ValueError(\n \"Unsupported tensor dimensionality {}.\"\n \"Must specify `norm_axes`\".format(array.ndim)\n )\n\n norms = np.sqrt(np.sum(array**2, axis=sum_over))\n\n return norms\n\n\ndef create_param(spec, shape, name=None):\n \"\"\"\n Helper method to create Theano shared variables for layer parameters\n and to initialize them.\n\n Parameters\n ----------\n spec : numpy array, Theano expression, or callable\n Either of the following:\n\n * a numpy array with the initial parameter values\n * a Theano expression or shared variable representing the parameters\n * a function or callable that takes the desired shape of\n the parameter array as its single argument and returns\n a numpy array.\n\n shape : iterable of int\n a tuple or other iterable of integers representing the desired\n shape of the parameter array.\n\n name : string, optional\n If a new variable is created, the name to give to the parameter\n variable. This is ignored if `spec` is already a Theano expression\n or shared variable.\n\n Returns\n -------\n Theano shared variable or Theano expression\n A Theano shared variable or expression representing layer parameters.\n If a numpy array was provided, a shared variable is initialized to\n contain this array. If a shared variable or expression was provided,\n it is simply returned. If a callable was provided, it is called, and\n its output is used to initialize a shared variable.\n\n Notes\n -----\n This function is called by :meth:`Layer.add_param()` in the constructor\n of most :class:`Layer` subclasses. This enables those layers to\n support initialization with numpy arrays, existing Theano shared variables\n or expressions, and callables for generating initial parameter values.\n \"\"\"\n shape = tuple(shape) # convert to tuple if needed\n if any(d <= 0 for d in shape):\n raise ValueError((\n \"Cannot create param with a non-positive shape dimension. \"\n \"Tried to create param with shape=%r, name=%r\") % (shape, name))\n\n if isinstance(spec, theano.Variable):\n # We cannot check the shape here, Theano expressions (even shared\n # variables) do not have a fixed compile-time shape. We can check the\n # dimensionality though.\n # Note that we cannot assign a name here. We could assign to the\n # `name` attribute of the variable, but the user may have already\n # named the variable and we don't want to override this.\n if spec.ndim != len(shape):\n raise RuntimeError(\"parameter variable has %d dimensions, \"\n \"should be %d\" % (spec.ndim, len(shape)))\n return spec\n\n elif isinstance(spec, np.ndarray):\n if spec.shape != shape:\n raise RuntimeError(\"parameter array has shape %s, should be \"\n \"%s\" % (spec.shape, shape))\n return theano.shared(spec, name=name)\n\n elif hasattr(spec, '__call__'):\n arr = spec(shape)\n try:\n arr = floatX(arr)\n except Exception:\n raise RuntimeError(\"cannot initialize parameters: the \"\n \"provided callable did not return an \"\n \"array-like value\")\n if arr.shape != shape:\n raise RuntimeError(\"cannot initialize parameters: the \"\n \"provided callable did not return a value \"\n \"with the correct shape\")\n return theano.shared(arr, name=name)\n\n else:\n raise RuntimeError(\"cannot initialize parameters: 'spec' is not \"\n \"a numpy array, a Theano expression, or a \"\n \"callable\")\n\n\ndef unroll_scan(fn, sequences, outputs_info, non_sequences, n_steps,\n go_backwards=False):\n \"\"\"\n Helper function to unroll for loops. Can be used to unroll theano.scan.\n The parameter names are identical to theano.scan, please refer to here\n for more information.\n\n Note that this function does not support the truncate_gradient\n setting from theano.scan.\n\n Parameters\n ----------\n\n fn : function\n Function that defines calculations at each step.\n\n sequences : TensorVariable or list of TensorVariables\n List of TensorVariable with sequence data. The function iterates\n over the first dimension of each TensorVariable.\n\n outputs_info : list of TensorVariables\n List of tensors specifying the initial values for each recurrent\n value.\n\n non_sequences: list of TensorVariables\n List of theano.shared variables that are used in the step function.\n\n n_steps: int\n Number of steps to unroll.\n\n go_backwards: bool\n If true the recursion starts at sequences[-1] and iterates\n backwards.\n\n Returns\n -------\n List of TensorVariables. Each element in the list gives the recurrent\n values at each time step.\n\n \"\"\"\n if not isinstance(sequences, (list, tuple)):\n sequences = [sequences]\n\n # When backwards reverse the recursion direction\n counter = range(n_steps)\n if go_backwards:\n counter = counter[::-1]\n\n output = []\n prev_vals = outputs_info\n for i in counter:\n step_input = [s[i] for s in sequences] + prev_vals + non_sequences\n out_ = fn(*step_input)\n # The returned values from step can be either a TensorVariable,\n # a list, or a tuple. Below, we force it to always be a list.\n if isinstance(out_, T.TensorVariable):\n out_ = [out_]\n if isinstance(out_, tuple):\n out_ = list(out_)\n output.append(out_)\n\n prev_vals = output[-1]\n\n # iterate over each scan output and convert it to same format as scan:\n # [[output11, output12,...output1n],\n # [output21, output22,...output2n],...]\n output_scan = []\n for i in range(len(output[0])):\n l = map(lambda x: x[i], output)\n output_scan.append(T.stack(*l))\n\n return output_scan\n" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.sum" ] ]
OPTML-Group/RED-ICLR22
[ "660b41e01465dd0c3a21829f6bc34e4796e96f94" ]
[ "RED_Dataset.py" ]
[ "# coding: utf-8\nimport cv2\nfrom torch.utils.data import Dataset\nimport Transform_Model as TM\nimport random\n# import dlib\nimport numpy as np\nfrom PIL import Image\nimport torchvision.transforms.functional as tf\nfrom torchvision import transforms\nimport torch\n\nclass FaceDataset(Dataset):\n def __init__(self, txt_path, transform = None):\n fh= open(txt_path, 'r')\n clean_imgs = []\n adv_imgs = []\n for line in fh:\n line = line.rstrip()\n words = line.split()\n clean_imgs.append(words[0])\n adv_imgs.append(words[1])\n\n self.clean_imgs = clean_imgs # 最主要就是要生成这个list, 然后DataLoader中给index,通过getitem读取图片数据\n self.adv_imgs = adv_imgs\n self.transform = transform\n\n def rotation(self, image1, image2):\n\n # get a random angle range from (-180, 180)\n angle = transforms.RandomRotation.get_params([-180, 180])\n # same angle rotation for image1 and image2\n image1 = image1.rotate(angle)\n image2 = image2.rotate(angle)\n \n image1 = tf.to_tensor(image1)\n image2 = tf.to_tensor(image2)\n return image1, image2\n\n def flip(self, image1, image2):\n # 50% prob to horizontal flip and vertical flip\n if random.random() > 0.5:\n image1 = tf.hflip(image1)\n image2 = tf.hflip(image2)\n if random.random() > 0.5:\n image1 = tf.vflip(image1)\n image2 = tf.vflip(image2)\n image1 = tf.to_tensor(image1)\n image2 = tf.to_tensor(image2)\n return image1, image2\n def __getitem__(self, index):\n clean_address = self.clean_imgs[index]\n adv_address = self.adv_imgs[index]\n clean_img = TM.preprocess_image(cv2.imread(clean_address))\n \n \n adv_img = TM.preprocess_image(cv2.imread(adv_address))\n # if self.transform is not None:\n \n # clean_img = self.transform(clean_img)\n # adv_img = self.transform(adv_img)\n if self.transform == 'rotation':\n clean_img, adv_img = self.rotation(clean_img, adv_img)\n elif self.transform == 'flip':\n clean_img, adv_img = self.flip(clean_img, adv_img)\n else:\n clean_img = tf.to_tensor(clean_img)\n adv_img = tf.to_tensor(adv_img)\n return clean_img, adv_img\n\n\n def __len__(self):\n return len(self.clean_imgs)\n\n\nclass FaceDatasetTransformTest(Dataset):\n def __init__(self, txt_path, transform = None):\n fh= open(txt_path, 'r')\n clean_imgs = []\n adv_imgs = []\n for line in fh:\n line = line.rstrip()\n words = line.split()\n clean_imgs.append(words[0])\n adv_imgs.append(words[1])\n\n self.clean_imgs = clean_imgs # 最主要就是要生成这个list, 然后DataLoader中给index,通过getitem读取图片数据\n self.adv_imgs = adv_imgs\n self.transform = transform\n\n def rotation(self, image1, image2):\n\n # get a random angle range from (-180, 180)\n angle = transforms.RandomRotation.get_params([-180, 180])\n # same angle rotation for image1 and image2\n image1 = image1.rotate(angle)\n image2 = image2.rotate(angle)\n \n image1 = tf.to_tensor(image1)\n image2 = tf.to_tensor(image2)\n return image1, image2\n\n def flip(self, image1, image2):\n # 50% prob to horizontal flip and vertical flip\n if random.random() > 0.5:\n image1 = tf.hflip(image1)\n image2 = tf.hflip(image2)\n if random.random() > 0.5:\n image1 = tf.vflip(image1)\n image2 = tf.vflip(image2)\n image1 = tf.to_tensor(image1)\n image2 = tf.to_tensor(image2)\n return image1, image2\n \n def hflip(self, image1, image2):\n image1 = tf.hflip(image1)\n image2 = tf.hflip(image2)\n image1 = tf.to_tensor(image1)\n image2 = tf.to_tensor(image2)\n return image1, image2\n \n def vflip(self, image1, image2):\n image1 = tf.vflip(image1)\n image2 = tf.vflip(image2)\n image1 = tf.to_tensor(image1)\n image2 = tf.to_tensor(image2)\n return image1, image2\n \n def rotation_new(self, image1, image2):\n\n \n if random.random() > 0.5:\n angle = transforms.RandomRotation.get_params([40, 50])\n else:\n angle = transforms.RandomRotation.get_params([-50, -40])\n \n image1 = image1.rotate(angle)\n image2 = image2.rotate(angle)\n \n image1 = tf.to_tensor(image1)\n image2 = tf.to_tensor(image2)\n return image1, image2\n\n def __getitem__(self, index):\n clean_address = self.clean_imgs[index]\n adv_address = self.adv_imgs[index]\n clean_img = TM.preprocess_image(cv2.imread(clean_address))\n \n \n adv_img = TM.preprocess_image(cv2.imread(adv_address))\n # if self.transform is not None:\n \n # clean_img = self.transform(clean_img)\n # adv_img = self.transform(adv_img)\n if self.transform == 'rotation':\n clean_img_transform, adv_img_transform = self.rotation(clean_img, adv_img)\n elif self.transform == 'flip':\n clean_img_transform, adv_img_transform = self.flip(clean_img, adv_img)\n elif self.transform == 'hflip':\n clean_img_transform, adv_img_transform = self.hflip(clean_img, adv_img)\n elif self.transform == 'vflip':\n clean_img_transform, adv_img_transform = self.vflip(clean_img, adv_img)\n elif self.transform == 'rotation_new':\n clean_img_transform, adv_img_transform = self.rotation_new(clean_img, adv_img)\n clean_img = tf.to_tensor(clean_img)\n adv_img = tf.to_tensor(adv_img)\n return clean_img, adv_img, clean_img_transform, adv_img_transform\n\n\n def __len__(self):\n return len(self.clean_imgs)\n\nclass Labeled_FaceDataset(Dataset):\n def __init__(self, txt_path, label):\n fh = open(txt_path, 'r')\n clean_imgs = []\n adv_imgs = []\n # labels = []\n for line in fh:\n line = line.rstrip()\n words = line.split()\n clean_imgs.append(words[0])\n adv_imgs.append(words[1])\n # labels.append(label)\n\n self.clean_imgs = clean_imgs # 最主要就是要生成这个list, 然后DataLoader中给index,通过getitem读取图片数据\n self.adv_imgs = adv_imgs\n self.label = label\n\n def __getitem__(self, index):\n clean_address = self.clean_imgs[index]\n adv_address = self.adv_imgs[index]\n clean_img = TM.preprocess_image(cv2.imread(clean_address))\n adv_img = TM.preprocess_image(cv2.imread(adv_address))\n # print(clean_img.type)\n clean_img = tf.to_tensor(clean_img)\n adv_img = tf.to_tensor(adv_img)\n return torch.cat((adv_img-clean_img, clean_img),0), self.label\n\n def __len__(self):\n return len(self.clean_imgs)\n\nclass Labeled_FaceDataset_new(Dataset):\n def __init__(self, txt_path, label):\n fh = open(txt_path, 'r')\n clean_imgs = []\n adv_imgs = []\n # labels = []\n for line in fh:\n line = line.rstrip()\n words = line.split()\n clean_imgs.append(words[0])\n adv_imgs.append(words[1])\n # labels.append(label)\n\n self.clean_imgs = clean_imgs # 最主要就是要生成这个list, 然后DataLoader中给index,通过getitem读取图片数据\n self.adv_imgs = adv_imgs\n self.label = label\n\n def __getitem__(self, index):\n clean_address = self.clean_imgs[index]\n adv_address = self.adv_imgs[index]\n clean_img = TM.preprocess_image(cv2.imread(clean_address))\n adv_img = TM.preprocess_image(cv2.imread(adv_address))\n # print(clean_img.type)\n clean_img = tf.to_tensor(clean_img)\n adv_img = tf.to_tensor(adv_img)\n return (adv_img - clean_img), self.label\n\n def __len__(self):\n return len(self.clean_imgs)\n\nclass Labeled_FaceDataset_incremental(Dataset):\n def __init__(self, txt_path, label, known):\n fh = open(txt_path, 'r')\n clean_imgs = []\n adv_imgs = []\n # labels = []\n for line in fh:\n line = line.rstrip()\n words = line.split()\n clean_imgs.append(words[0])\n adv_imgs.append(words[1])\n # labels.append(label)\n\n self.clean_imgs = clean_imgs # 最主要就是要生成这个list, 然后DataLoader中给index,通过getitem读取图片数据\n self.adv_imgs = adv_imgs\n self.label = label\n self.known = known\n\n def __getitem__(self, index):\n clean_address = self.clean_imgs[index]\n adv_address = self.adv_imgs[index]\n clean_img = TM.preprocess_image(cv2.imread(clean_address))\n adv_img = TM.preprocess_image(cv2.imread(adv_address))\n # print(clean_img.type)\n clean_img = tf.to_tensor(clean_img)\n adv_img = tf.to_tensor(adv_img)\n return (adv_img - clean_img), self.label, self.known\n\n def __len__(self):\n return len(self.clean_imgs)" ]
[ [ "torch.cat" ] ]
LaGuer/DeepSphere
[ "ebcf162eaa6e23c1c92dbc84e0908695bb7245d7" ]
[ "experiments_psd.py" ]
[ "#!/usr/bin/env python3\n# coding: utf-8\n\n\"\"\"\nScript to run the baseline experiment:\nSVM classification with power spectral densities (PSD) features.\n\"\"\"\n\nimport os\nimport sys\n\nimport numpy as np\n\nfrom deepsphere import experiment_helper\nfrom grid import pgrid\n\n\ndef single_experiment(sigma, order, sigma_noise, path):\n \"\"\"Run as experiment.\n\n Check the notebook `part_sphere.ipynb` to get more insides about this code.\n \"\"\"\n print('Solve the PSD problem for sigma {}, order {}, noise {}'.format(sigma, order, sigma_noise), flush=True)\n\n Nside = 1024\n EXP_NAME = '40sim_{}sides_{}arcmin_{}noise_{}order'.format(\n Nside, sigma, sigma_noise, order)\n\n x_raw_train, labels_raw_train, x_raw_std = experiment_helper.get_training_data(sigma, order)\n x_raw_test, labels_test, _ = experiment_helper.get_testing_data(sigma, order, sigma_noise, x_raw_std)\n\n if order==4:\n augmentation = 4\n else:\n augmentation = 10\n\n ret = experiment_helper.data_preprossing(x_raw_train, labels_raw_train, x_raw_test, sigma_noise, feature_type='psd', augmentation=augmentation)\n features_train, labels_train, features_validation, labels_validation, features_test = ret\n ntrain = len(features_train)//augmentation\n\n nsamples = list(ntrain // 12 * np.linspace(1, 6, num=6).astype(np.int))\n nsamples += list(ntrain // 2 * np.linspace(1, augmentation*2, num=40).astype(np.int))\n\n err_train = np.zeros(shape=[len(nsamples)])\n err_validation = np.zeros(shape=[len(nsamples)])\n err_train[:] = np.nan\n err_validation[:] = np.nan\n\n for i, n in enumerate(nsamples):\n print('{} Solve it for {} samples'.format(i, n), flush=True)\n err_train[i], err_validation[i], _ = experiment_helper.err_svc_linear(\n features_train[:n], labels_train[:n], features_validation,\n labels_validation)\n\n e_train, e_validation, C = experiment_helper.err_svc_linear(\n features_train, labels_train, features_validation, labels_validation)\n print('The validation error is {}%'.format(e_validation * 100), flush=True)\n\n # Cheating in favor of SVM\n e_train, e_test = experiment_helper.err_svc_linear_single(C,\n features_train, labels_train, features_test, labels_test)\n print('The test error is {}%'.format(e_test * 100), flush=True)\n\n np.savez(path + EXP_NAME, [nsamples, err_train, err_validation, e_test])\n\n return e_test\n\nif __name__ == '__main__':\n\n if len(sys.argv) > 1:\n sigma = int(sys.argv[1])\n order = int(sys.argv[2])\n sigma_noise = float(sys.argv[3])\n grid = [(sigma, order, sigma_noise)]\n else:\n grid = pgrid()\n\n path = 'results/psd/'\n os.makedirs(path, exist_ok=True)\n\n for sigma, order, sigma_noise in grid:\n print('Launch experiment for sigma={}, order={}, noise={}'.format(sigma, order, sigma_noise))\n res = single_experiment(sigma, order, sigma_noise, path)\n filepath = os.path.join(path, 'psd_results_list_sigma{}'.format(sigma))\n new_data = [order, sigma_noise, res]\n if os.path.isfile(filepath+'.npz'):\n results = np.load(filepath+'.npz')['data'].tolist()\n else:\n results = []\n results.append(new_data)\n np.savez(filepath, data=results)\n" ]
[ [ "numpy.load", "numpy.savez", "numpy.linspace" ] ]
entn-at/AGAIN-VC
[ "dbf94bf55882f897c312c7760cd892c51c93c9ab" ]
[ "util/mytorch.py" ]
[ "import torch\nimport os\nimport numpy as np\nimport random\nimport shutil\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef np2pt(array):\n return torch.from_numpy(array[None]).float()\n\ndef same_seeds(seed):\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.\n np.random.seed(seed) # Numpy module.\n random.seed(seed) # Python random module.\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\ndef freeze(modules):\n for module in modules:\n for param in module.parameters():\n param.requires_grad = False\n\ndef unfreeze(modules):\n for module in modules:\n for param in module.parameters():\n param.requires_grad = True\n\ndef save_checkpoint(state, save_path, is_best=False, max_keep=None):\n # save checkpoint\n torch.save(state, save_path)\n # deal with max_keep\n save_dir = os.path.dirname(save_path)\n list_path = os.path.join(save_dir, 'latest_checkpoint')\n save_path = os.path.basename(save_path)\n if os.path.exists(list_path):\n with open(list_path) as f:\n ckpt_list = f.readlines()\n ckpt_list = [save_path + '\\n'] + ckpt_list\n else:\n ckpt_list = [save_path + '\\n']\n if max_keep is not None:\n for ckpt in ckpt_list[max_keep:]:\n ckpt = os.path.join(save_dir, ckpt[:-1])\n if os.path.exists(ckpt):\n os.remove(ckpt)\n ckpt_list[max_keep:] = []\n with open(list_path, 'w') as f:\n f.writelines(ckpt_list)\n # copy best\n if is_best:\n shutil.copyfile(os.path.join(save_dir, save_path), os.path.join(save_dir, 'best_model.ckpt'))\n\n\ndef load_checkpoint(ckpt_dir_or_file, map_location=None, load_best=False):\n if os.path.isdir(ckpt_dir_or_file):\n if load_best:\n ckpt_path = os.path.join(ckpt_dir_or_file, 'best_model.ckpt')\n else:\n with open(os.path.join(ckpt_dir_or_file, 'latest_checkpoint')) as f:\n ckpt_path = os.path.join(ckpt_dir_or_file, f.readline()[:-1])\n else:\n ckpt_path = ckpt_dir_or_file\n ckpt = torch.load(ckpt_path, map_location=map_location)\n logger.info(' [*] Loading checkpoint from %s succeed!' % ckpt_path)\n return ckpt\n" ]
[ [ "numpy.random.seed", "torch.load", "torch.cuda.manual_seed", "torch.manual_seed", "torch.from_numpy", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "torch.save" ] ]
regasinaga/kmeans-clustering
[ "2a8df970287d1571fb654973bf9dcf152c6914fa" ]
[ "rgx.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plot\nimport matplotlib.patches as mpatches\ncolors = np.array(['#ff3333', '#ff6633','#ff9933',\n\t\t\t\t\t\t\t'#ffcc33', '#ffff33','#ccff33',\n\t\t\t\t\t\t\t'#99ff33', '#33ff33', '#33ff99',\n\t\t\t\t\t\t\t'#33ffff', '#3399ff', '#3333ff',\n\t\t\t\t\t\t\t'#9933ff', '#ff33ff', '#ff3366'])\n\t\t\t\t\t\t\t\ndef visualize(d, t=None, title=\"plot\", mode=\"supervised\"):\n\tglobal colors\n\tmark = 'o'\n\tif mode==\"supervised\":\n\t\ttn = len(np.unique(t))\n\t\tfor tc in range(0, tn):\n\t\t\tind = np.where(t==tc+1)\n\t\t\tplot.scatter(d[ind,0], d[ind,1], marker=mark, color=colors[tc])\n\telif mode==\"unsupervised\":\n\t\tplot.scatter(d[:,0], d[:,1], marker=mark, color=colors[2])\n\tplot.title(title)\n\treturn plot\n\t\ndef visualize_using_centro(d, c, title=\"plot\"):\n\tglobal colors\n\tmark = 'o'\n\t\n\tk = len(c)\n\tdist_mat = np.zeros((len(d), k))\n\tfor j in range(0,k):\n\t\tdist_mat[:,j] = euclid(d[:,:], c[j,:])\n\t\n\ta = np.argmin(dist_mat,axis=1)\n\t\n\tfor j in range(0,k):\n\t\tind = (np.where(a == j))[0]\n\t\tplot.scatter(d[ind,0], d[ind,1], marker=mark, color=colors[j])\n\tplot.title(title)\n\treturn plot\n\t\ndef visualize_centro(c):\n\tmark = 'x'\n\tplot.scatter(c[:,0], c[:,1], marker=mark, color='#000000')\n\treturn plot\n\t\ndef train_kmeans(x, t=None, k=2, maxiter=1000):\n\tprint('K-Means training begins')\n\tc_ind = np.random.choice(len(x),k)\n\tc = x[c_ind,:]\n\t\n\tif t is not None:\n\t\ttn = len(np.unique(t))\n\t\tfor tc in range(0, tn):\n\t\t\tind = np.where(t==tc+1)\n\t\t\ts_ind = np.random.permutation(ind[0])\n\t\t\t\n\t\t\tc[tc,:] = x[s_ind[0],:]\n\t\t\tif tc == k: break\n\t\n\tdist_mat = np.zeros((len(x), k))\n\t\n\tsaturation = 0\n\tconverged = False\n\titer = 0\n\tprev_sse = 1e10\n\t\n\tfor iter in range(maxiter):\n\t\t# get distance between data to all centroid\n\t\tfor j in range(0,k):\n\t\t\tdist_mat[:,j] = euclid(x[:,:], c[j,:])\n\n\t\t# get nearest centroid \n\t\ta = np.argmin(dist_mat,axis=1)\n\t\tcurrent_sse = sse(x,c,k,a=a)\n\t\t\n\t\t# update centroid\n\t\tfor j in range(0,k):\n\t\t\tind = (np.where(a == j))[0]\n\t\t\tif len(ind) == 0: continue\n\t\t\tc[j,:] = np.mean(x[ind,:], axis=0)\n\t\t\n\t\tprint('sse:\\t', current_sse)\n\t\tif np.abs(current_sse-prev_sse) <= 1:\n\t\t\tsaturation += 1\n\t\t\tif saturation == 10:\n\t\t\t\tbreak\t\n\t\telse:\n\t\t\tsaturation = 0\n\t\t\t\n\t\tprev_sse = current_sse\n\n\treturn c\n\ndef euclid(p, q):\n\treturn np.sqrt(np.sum(np.power(p - q, 2.0), axis=1))\n\t\ndef sse(x, c, k, a=None):\n\tr = 0\n\tif a is None:\n\t\tdist_mat = np.zeros((len(x), k))\n\t\tfor j in range(0,k):\n\t\t\tdist_mat[:,j] = euclid(x[:,:], c[j,:])\n\t\t\n\t\ta = np.argmin(dist_mat,axis=1)\n\t\t\n\tfor j in range(0, k):\n\t\tind = np.where(a == j)\n\t\tr += np.sum(np.sum(np.power(x[ind,:] - c[j,:], 2.0), axis=1)) \n\n\treturn r" ]
[ [ "numpy.abs", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "numpy.unique", "numpy.power", "numpy.random.permutation", "numpy.argmin", "numpy.mean", "numpy.array", "numpy.where" ] ]
sebtsh/PBO
[ "e40adbb488fbf848de2ac8fa01de77cf2ca71e7d" ]
[ "objectives.py" ]
[ "import numpy as np\n\n\ndef forrester(x):\n \"\"\"\n 1-dimensional test function by Forrester et al. (2008)\n Defined as f(x) = (6x-2)^2 * sin(12x-4)\n :param x: tensor of shape (..., 1), x1 in [0, 1]\n :return: tensor of shape (..., )\n \"\"\"\n x0 = x[..., 0]\n return (6 * x0 - 2) * (6 * x0 - 2) * np.sin(12 * x0 - 4)\n\n\ndef six_hump_camel(x):\n \"\"\"\n 2-D test function by Molga & Smutnicki (2005), restricted to [-1.5, 1.5] on both dimensions. 2 global minima.\n Has global minimum f(x) = -1.0316, at x = [0.0898, -0.7126] and x = [-0.0898, 0.7126]\n :param x: tensor of shape (..., 2), x1 in [-2, 2], x2 in [-2, 2]\n :return: tensor of shape (..., )\n \"\"\"\n x1 = x[..., 0]\n x2 = x[..., 1]\n return (4. - 2.1 * (x1 ** 2) + (1. / 3.) * (x1 ** 4)) * (x1 ** 2) + (x1 * x2) + (-4. + 4 * (x2 ** 2)) * (x2 ** 2)\n\n\ndef hartmann3d(x):\n \"\"\"\n 3-D test function from Unconstrained Global Optimization Test Problems, at\n http://www-optima.amp.i.kyoto-u.ac.jp/member/student/hedar/Hedar_files/TestGO.htm\n Has 4 local minima, one of which is global. Has global minimum f(x) = -3.86278, at x = [0.114614, 0.555649,\n 0.852547]\n :param x: tensor of shape (..., 3), x in [0, 1] for all dimensions\n :return: tensor of shape (..., )\n \"\"\"\n alpha = np.array([1., 1.2, 3.0, 3.2])\n A = np.array([[3.0, 10, 30],\n [0.1, 10, 35],\n [3.0, 10, 30],\n [0.1, 10, 35]])\n P = 0.0001 * np.array([[3689, 1170, 2673],\n [4699, 4387, 7470],\n [1091, 8732, 5547],\n [381, 5743, 8828]])\n\n x_repeat = np.repeat(np.expand_dims(x, axis=-2), 4, axis=-2)\n return -np.sum(alpha * np.exp(-np.sum(A * ((x_repeat - P) ** 2), axis=-1)), axis=-1)\n\n\ndef cifar(x, embedding_to_class):\n \"\"\"\n 2-D test function over 2-D embeddings of CIFAR-10 images. We define an arbitrary preference over classes as such:\n Airplane (0) > Automobile (1) > Ship (8) > Truck (9) > Bird (2) > Cat (3) > Deer (4) > Dog (5) > Frog (6) > Horse\n (7)\n\n :param x: tensor of shape (..., 2). CIFAR-10 embeddings\n :param embedding_to_class: dict\n :return: tensor of shape (..., 1). last dim is int from 0-9 representing class\n \"\"\"\n class_to_fval = {0: -5.,\n 1: -4.,\n 8: -3.,\n 9: -2.,\n 2: -1.,\n 3: 0.,\n 4: 1.,\n 5: 2.,\n 6: 3.,\n 7: 4.} # smaller is more preferred\n\n shape = x.shape[:-1]\n raveled = np.reshape(x, [-1, 2])\n raveled_shape = raveled.shape[:-1]\n raveled_fvals = np.zeros((raveled_shape[0], 1), dtype=np.float64)\n\n for i in range(raveled_shape[0]):\n raveled_fvals[i] = class_to_fval[embedding_to_class[raveled[i].data.tobytes()]]\n\n return np.reshape(raveled_fvals, shape + (1,))\n\n\ndef sushi(x, feat_to_fval_dict):\n \"\"\"\n 6-D test function over the Sushi dataset with the minor group feature removed (overlaps with major group).\n :param x: tensor of shape (..., 6). Sushi datum\n :param feat_to_fval_dict: dictionary from sushi features to fval\n :return: tensor of shape (..., ). Returns the fvals of each sushi datum in the array\n \"\"\"\n\n input_dims = x.shape[-1]\n shape = x.shape[:-1] # shape except last dim\n raveled = np.reshape(x, [-1, input_dims])\n raveled_shape = raveled.shape[:-1]\n raveled_fvals = np.zeros((raveled_shape[0]), dtype=np.float64)\n\n for i in range(raveled_shape[0]):\n raveled_fvals[i] = -feat_to_fval_dict[raveled[i].data.tobytes()] # here smaller is more preferred\n\n return np.reshape(raveled_fvals, shape)\n\n\ndef objective_get_f_neg(x, objective):\n \"\"\"\n Get objective function values of inputs. Note that this returns the negative of the above objective functions,\n as observation_model.gen_observation_from_f takes it that more positive functions values are preferred, while we\n are interested in finding the minima of the above functions.\n :param x: tensor of shape (..., num_choices, input_dims)\n :param objective: function that takes tensor of shape (..., input_dims) and outputs tensor of shape (..., ) with\n the objective function values\n :return: tensor of shape (..., 1)\n \"\"\"\n return -np.expand_dims(objective(x), axis=-1)\n\n\ndef objective_get_y(x, objective):\n \"\"\"\n Returns tensor with argmins of input array (input point with the lowest objective function value among choices)\n :param x: tensor of shape (..., num_choices, input_dims)\n :param objective: function that takes tensor of shape (..., input_dims) and outputs tensor of shape (..., ) with\n the objective function values\n :return: tensor of shape (..., input_dims)\n \"\"\"\n evals = objective(x) # (..., num_choices)\n indices = np.argmin(evals, axis=1)[:, np.newaxis, np.newaxis] # (..., 1, 1)\n return np.squeeze(np.take_along_axis(x, indices, axis=-2), axis=-2)\n" ]
[ [ "numpy.take_along_axis", "numpy.expand_dims", "numpy.reshape", "numpy.sin", "numpy.argmin", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
BARarch/contacts-scraper
[ "b1cad99c97c94d3ccd73bf0456001eb973de2a67" ]
[ "scrapeContactsToday.py" ]
[ "import pandas as pd\nimport scraperModelGS as smgs\n\nimport directoryManager as dm\nimport contactChecker as cc\n\ndef getContacts():\n \"\"\"Google Sheets API Code.\n Pulls urls for all NFL Team RSS Feeds\n https://docs.google.com/spreadsheets/d/1p1LNyQhNhDBNEOkYQPV9xcNRe60WDlmnuiPp78hxkIs/\n \"\"\"\n credentials = get_credentials()\n http = credentials.authorize(smgs.httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = smgs.discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n #specify sheetID and range\n spreadsheetId = '1p1LNyQhNhDBNEOkYQPV9xcNRe60WDlmnuiPp78hxkIs'\n rangeName = 'Contacts!A2:N'\n result = service.spreadsheets().values().get(\n spreadsheetId=spreadsheetId, range=rangeName).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found for Contact Records.')\n else:\n print('Contact Records Done')\n\n return values\n\ndef getContactKeys():\n \"\"\"Google Sheets API Code.\n Pulls urls for all NFL Team RSS Feeds\n https://docs.google.com/spreadsheets/d/1p1LNyQhNhDBNEOkYQPV9xcNRe60WDlmnuiPp78hxkIs/\n \"\"\"\n credentials = get_credentials()\n http = credentials.authorize(smgs.httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = smgs.discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n #specify sheetID and range\n spreadsheetId = '1p1LNyQhNhDBNEOkYQPV9xcNRe60WDlmnuiPp78hxkIs'\n rangeName = 'Contacts!A1:Q1'\n result = service.spreadsheets().values().get(\n spreadsheetId=spreadsheetId, range=rangeName, majorDimension=\"ROWS\").execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found Contact Keys.')\n else:\n print('Contact Keys Done')\n\n return values[0]\n\ndef getAgencyDir():\n \"\"\"Google Sheets API Code.\n Pulls urls for all NFL Team RSS Feeds\n https://docs.google.com/spreadsheets/d/1p1LNyQhNhDBNEOkYQPV9xcNRe60WDlmnuiPp78hxkIs/\n \"\"\"\n credentials = get_credentials()\n http = credentials.authorize(smgs.httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = smgs.discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n #specify sheetID and range\n spreadsheetId = '1p1LNyQhNhDBNEOkYQPV9xcNRe60WDlmnuiPp78hxkIs'\n rangeName = 'Org Leadership Websites!A2:E'\n result = service.spreadsheets().values().get(\n spreadsheetId=spreadsheetId, range=rangeName).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found for Agency Directory.')\n else:\n print('Agency Directory Done')\n\n return values\n\ndef getAgencyDirKeys():\n \"\"\"Google Sheets API Code.\n Pulls urls for all NFL Team RSS Feeds\n https://docs.google.com/spreadsheets/d/1p1LNyQhNhDBNEOkYQPV9xcNRe60WDlmnuiPp78hxkIs/\n \"\"\"\n credentials = get_credentials()\n http = credentials.authorize(smgs.httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = smgs.discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n #specify sheetID and range\n spreadsheetId = '1p1LNyQhNhDBNEOkYQPV9xcNRe60WDlmnuiPp78hxkIs'\n rangeName = 'Org Leadership Websites!A1:Q1'\n result = service.spreadsheets().values().get(\n spreadsheetId=spreadsheetId, range=rangeName).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found for Directory Keys.')\n else:\n print('Directory Keys Done')\n\n return values[0]\n\ndef sheetRecord(row, recordKeys):\n '''\n record['Account ID'] = row[0]\n record['Account Name'] = row[1]\n record['Contact ID'] = row[2]\n record['First Name'] = row[3]\n record['Last Name'] = row[4]\n record['Tittle'] = row[5]\n record['Email'] = row[6]\n record['Mailing Street'] = row[7]\n record['Mailing City'] = row[8]\n record['Mailing State'] = row[9]\n record['Mailing Zip'] = row[10]\n record['Mailing Country'] = row[11]\n record['Phone'] = row[12]\n record['Contact Source'] = row[13]\n \n recordKeys = ['Account ID', 'Account Name', ...]\n \n '''\n key = 0\n record = {}\n \n ## Copy all elements from sheet row read\n for elm in row:\n record[recordKeys[key]] = elm\n key += 1\n \n ## Fill in remainder with empty strings\n while key < len(recordKeys):\n record[recordKeys[key]] = ''\n key += 1\n \n return record\n\n\n\nif __name__ == '__main__':\n # Initialize Google Sheets for Write\n get_credentials = smgs.modelInit()\n\n # Get Headers from google sheets\n print('KEYS')\n contactKeys = getContactKeys()\n directoryKeys = getAgencyDirKeys()\n print('')\n\n # Get contact and orginization website data and structure with collected headings\n print('RECORDS')\n contactRecords = [sheetRecord(row, contactKeys) for row in getContacts()]\n orgRecords = [sheetRecord(row, directoryKeys) for row in getAgencyDir()]\n print('')\n\n # Create Dataframes\n cr = pd.DataFrame(contactRecords)\n dr = pd.DataFrame(orgRecords)\n print('DATAFRAMES READY')\n\n ## ////////////////// Initialize Contact Checker Classes with Fresh Data \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\n\n # Setup Contact Record Output\n cc.ContactSheetOutput.set_output(contactKeys)\n\n # For this scrape session Give the Verification Handler class an Orgsession with Organization Records\n dm.OrgSession.set_browser_path() ## IMPORTANT STEP: The browser path must be set to the current working directory which varies for different machines\n cc.VerificationHandler.set_orgRecords(dm.HeadlessOrgSession(orgRecords))\n\n # For this scrape session Give the Verification Handler class the contact record data\n cc.VerificationHandler.set_contactRecords(cr)\n print('CONTACT CHECKER READY')\n\n ## ////////////////// Scrape Base Case and Turn Off Browser \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\n\n t = cc.ScrapeForToday(orgRecords)\n\n try:\n cc.VerificationHandler.close_browser()\n except:\n print(\"Browser Closed\")\n\n print('\\nSCRAPE SESSION COMPLETE')\n\n\n\t\n" ]
[ [ "pandas.DataFrame" ] ]
georgen117/onnxruntime
[ "6f2f4721ecac719c6329fc2491a4f3a7cdf43336" ]
[ "onnxruntime/python/tools/tensorrt/perf/benchmark.py" ]
[ "import os\nimport csv\nimport timeit\nfrom datetime import datetime\nimport numpy\nimport logging\nimport coloredlogs\nimport numpy as np\nimport argparse\nimport copy\nimport json\nimport re\nimport sys\nimport onnxruntime\nfrom onnx import numpy_helper\nfrom perf_utils import *\nimport pprint\nimport time\nimport pandas as pd\nfrom float16 import *\n\ndebug = False\nsys.path.append('.')\nlogger = logging.getLogger('')\n\n# global ep variables \ncpu = \"CPUExecutionProvider\"\nacl = \"ACLExecutionProvider\"\ncuda = \"CUDAExecutionProvider\"\ncuda_fp16 = \"CUDAExecutionProvider_fp16\"\ntrt = \"TensorrtExecutionProvider\"\ntrt_fp16 = \"TensorrtExecutionProvider_fp16\"\nstandalone_trt = \"Standalone_TRT\"\nstandalone_trt_fp16 = \"Standalone_TRT_fp16\"\n\nep_to_provider_list = {\n cpu: [cpu],\n acl: [acl], \n cuda: [cuda],\n cuda_fp16: [cuda],\n trt: [trt, cuda],\n trt_fp16: [trt, cuda]\n}\n\n# latency gain headers \ntrt_cuda_gain = 'TRT_CUDA_gain(%)'\ntrt_cuda_fp16_gain = 'TRT_CUDA_fp16_gain(%)'\ntrt_native_gain = 'TRT_Standalone_gain(%)'\ntrt_native_fp16_gain = 'TRT_Standalone_fp16_gain(%)'\n\n# metadata\nFAIL_MODEL_FILE = \".fail_model_map\"\nLATENCY_FILE = \".latency_map\"\nMETRICS_FILE = \".metrics_map\"\nMEMORY_FILE = './temp_memory.csv'\n\ndef split_and_sort_output(string_list):\n string_list = string_list.split(\"\\n\")\n string_list.sort()\n return string_list\n\ndef is_dynamic(model): \n inp = model.graph.input[0]\n for dim in inp.type.tensor_type.shape.dim: \n if not dim.HasField('dim_value'):\n return True\n return False \n\ndef run_trt_standalone(trtexec, model_path, ort_inputs, all_inputs_shape, fp16):\n logger.info(\"running standalone trt\")\n onnx_model_path = \"--onnx=\" + model_path\n input_shape = []\n\n logger.info(all_inputs_shape)\n\n for i in range(len(ort_inputs)):\n name = ort_inputs[i].name\n\n shape = []\n for j in all_inputs_shape[i]:\n shape.append(str(j))\n shape = \"x\".join(shape)\n shape = name + ':' + shape\n input_shape.append(shape)\n \n shapes_arg = '--optShapes=' + ','.join(input_shape)\n logger.info(shapes_arg)\n \n result = {}\n command = [trtexec, onnx_model_path, \"--percentile=90\", \"--explicitBatch\"]\n \n model = onnx.load(model_path)\n if is_dynamic(model):\n command.extend([shapes_arg])\n if fp16: \n command.extend([\"--fp16\"])\n out = get_output(command)\n \n tmp = out.split(\"\\n\")\n target_list = []\n for t in tmp:\n if 'mean' in t:\n target_list.append(t)\n\n if 'percentile' in t:\n target_list.append(t)\n\n target = target_list[2]\n avg_latency_match = re.search('mean = (.*?) ms', target)\n if avg_latency_match:\n result[\"average_latency_ms\"] = avg_latency_match.group(1) # extract number\n percentile_match = re.search('percentile\\(90%\\) = (.*?) ms', target)\n if percentile_match:\n result[\"latency_90_percentile\"] = percentile_match.group(1) # extract number\n logger.info(result)\n return result\n\ndef get_latency_result(runtimes, batch_size):\n latency_ms = sum(runtimes) / float(len(runtimes)) * 1000.0\n latency_variance = numpy.var(runtimes, dtype=numpy.float64) * 1000.0\n throughput = batch_size * (1000.0 / latency_ms)\n\n result = {\n \"test_times\": len(runtimes),\n \"latency_variance\": \"{:.2f}\".format(latency_variance),\n \"latency_90_percentile\": \"{:.2f}\".format(numpy.percentile(runtimes, 90) * 1000.0),\n \"latency_95_percentile\": \"{:.2f}\".format(numpy.percentile(runtimes, 95) * 1000.0),\n \"latency_99_percentile\": \"{:.2f}\".format(numpy.percentile(runtimes, 99) * 1000.0),\n \"average_latency_ms\": \"{:.2f}\".format(latency_ms),\n \"QPS\": \"{:.2f}\".format(throughput),\n }\n return result\n\n\ndef get_ort_session_inputs_and_outputs(name, session, ort_input):\n\n sess_inputs = {}\n sess_outputs = None\n\n if 'bert_squad' in name.lower() or 'bert-squad' in name.lower():\n unique_ids_raw_output = ort_input[0]\n input_ids = ort_input[1]\n input_mask = ort_input[2]\n segment_ids = ort_input[3]\n\n sess_inputs = {\n \"unique_ids_raw_output___9:0\": unique_ids_raw_output,\n \"input_ids:0\": input_ids[0:1],\n \"input_mask:0\": input_mask[0:1],\n \"segment_ids:0\": segment_ids[0:1]}\n sess_outputs = [\"unique_ids:0\", \"unstack:0\", \"unstack:1\"]\n\n elif 'bidaf' in name.lower():\n sess_inputs = {\n \"context_word\": ort_input[0],\n \"context_char\": ort_input[2],\n \"query_word\": ort_input[1],\n \"query_char\": ort_input[3]}\n sess_outputs = [\"start_pos\",\"end_pos\"]\n\n elif 'yolov4' in name.lower():\n sess_inputs[session.get_inputs()[0].name] = ort_input[0]\n sess_outputs = ['Identity:0']\n\n elif 'shufflenet-v2' in name.lower() or 'shufflenet_v2' in name.lower():\n sess_inputs[session.get_inputs()[0].name] = ort_input\n\n else:\n sess_inputs = {}\n for i in range(len(session.get_inputs())):\n sess_inputs[session.get_inputs()[i].name] = ort_input[i]\n\n return (sess_inputs, sess_outputs)\n\ndef track_ep_memory(ep): \n return trt in ep or cuda in ep or standalone_trt in ep\n\ndef get_trtexec_pid(df, python_pid): \n for pid in df['pid'].tolist(): \n if pid != python_pid: \n return pid\n\ndef get_max_memory(trtexec): \n df = pd.read_csv(MEMORY_FILE)\n pid = df['pid'].iloc[0]\n if trtexec: \n pid = get_trtexec_pid(df, pid) \n \n mem_series = df.loc[df['pid'] == pid, ' used_gpu_memory [MiB]']\n max_mem = max(mem_series.str.replace(' MiB','').astype(int))\n return max_mem\n\ndef start_memory_tracking(): \n logger.info(\"starting memory tracking process\")\n p = subprocess.Popen([\"nvidia-smi\", \"--query-compute-apps=pid,used_memory\", \"--format=csv\", \"-l\", \"1\", \"-f\", MEMORY_FILE])\n return p\n\ndef end_memory_tracking(p, trtexec, success): \n logger.info(\"terminating memory tracking process\")\n p.terminate()\n p.wait()\n p.kill()\n mem_usage = None\n if success:\n mem_usage = get_max_memory(trtexec) \n if os.path.exists(MEMORY_FILE):\n os.remove(MEMORY_FILE)\n return mem_usage\n\ndef inference_ort(args, name, session, ep, ort_inputs, result_template, repeat_times, batch_size):\n runtimes = []\n if args.input_data == \"random\":\n repeat_times = 1 # warn-up run is included in ort_inputs\n else:\n repeat_times += 1 # add warn-up run\n \n mem_usage = None\n for ort_input in ort_inputs:\n sess_inputs, sess_outputs = get_ort_session_inputs_and_outputs(name, session, ort_input)\n if debug:\n logger.info(\"ORT session inputs:\")\n logger.info(sess_inputs)\n logger.info(\"ORT session outputs:\")\n logger.info(sess_outputs)\n\n try:\n runtime = timeit.repeat(lambda: session.run(sess_outputs, sess_inputs), number=1, repeat=repeat_times)\n runtimes += runtime[1:] # remove warmup\n \n except Exception as e:\n logger.error(e)\n return None\n\n logger.info(runtimes)\n\n result = {}\n result.update(result_template)\n result.update({\"io_binding\": False})\n latency_result = get_latency_result(runtimes, batch_size)\n result.update(latency_result)\n return result\n\ndef inference_ort_and_get_prediction(name, session, ort_inputs):\n\n ort_outputs = []\n for ort_input in ort_inputs:\n sess_inputs, sess_outputs = get_ort_session_inputs_and_outputs(name, session, ort_input)\n if debug:\n logger.info(\"ORT session inputs:\")\n logger.info(sess_inputs)\n logger.info(\"ORT session outputs:\")\n logger.info(sess_outputs)\n\n result = session.run(sess_outputs, sess_inputs)\n \n if debug:\n logger.info(\"ORT session output results:\")\n logger.info(result)\n\n # handle shape of output differently\n if 'bert_squad' in name.lower():\n ort_outputs.append([result])\n elif 'shufflenet-v2' in name.lower() or 'shufflenet_v2' in name.lower():\n ort_outputs.append(result[0])\n else:\n ort_outputs.append(result)\n\n return ort_outputs\n\ndef get_acl_version():\n from pathlib import Path\n home = str(Path.home())\n p = subprocess.run([\"find\", home, \"-name\", \"libarm_compute.so\"], check=True, stdout=subprocess.PIPE)\n libarm_compute_path = p.stdout.decode(\"ascii\").strip()\n if libarm_compute_path == '':\n return \"No Compute Library Found\"\n else:\n p = subprocess.run([\"strings\", libarm_compute_path], check=True, stdout=subprocess.PIPE) \n libarm_so_strings = p.stdout.decode(\"ascii\").strip()\n version_match = re.search(r'arm_compute_version.*\\n', libarm_so_strings)\n version = version_match.group(0).split(' ')[0]\n return version\n\n#######################################################################################################################################\n# The following two lists will be generated.\n#\n# inputs: [[test_data_0_input_0.pb, test_data_0_input_1.pb ...], [test_data_1_input_0.pb, test_data_1_input_1.pb ...] ...]\n# outputs: [[test_data_0_output_0.pb, test_data_0_output_1.pb ...], [test_data_1_output_0.pb, test_data_1_output_1.pb ...] ...]\n#######################################################################################################################################\ndef load_onnx_model_zoo_test_data(path, all_inputs_shape, data_type=\"fp32\"):\n logger.info(\"Parsing test data in {} ...\".format(path))\n output = get_output([\"find\", path, \"-name\", \"test_data*\", \"-type\", \"d\"])\n test_data_set_dir = split_and_sort_output(output)\n logger.info(test_data_set_dir)\n\n inputs = []\n outputs = []\n\n shape_flag = False\n # if not empty means input shape has been parsed before.\n if len(all_inputs_shape) > 0:\n shape_flag = True\n\n # find test data path\n for test_data_dir in test_data_set_dir:\n pwd = os.getcwd()\n os.chdir(test_data_dir)\n\n # load inputs\n output = get_output([\"find\", \".\", \"-name\", \"input*\"])\n input_data = split_and_sort_output(output)\n logger.info(input_data)\n\n input_data_pb = []\n for data in input_data:\n tensor = onnx.TensorProto()\n with open(data, 'rb') as f:\n tensor.ParseFromString(f.read())\n tensor_to_array = numpy_helper.to_array(tensor)\n if data_type == \"fp16\" and tensor_to_array.dtype == np.dtype(np.float32):\n tensor_to_array = tensor_to_array.astype(np.float16)\n input_data_pb.append(tensor_to_array)\n if not shape_flag:\n all_inputs_shape.append(input_data_pb[-1].shape)\n logger.info(all_inputs_shape[-1])\n inputs.append(input_data_pb)\n logger.info('Loaded {} inputs successfully.'.format(len(inputs)))\n\n # load outputs\n output = get_output([\"find\", \".\", \"-name\", \"output*\"])\n output_data = split_and_sort_output(output)\n\n if len(output_data) > 0 and output_data[0] != '':\n logger.info(output_data)\n output_data_pb = []\n for data in output_data:\n tensor = onnx.TensorProto()\n with open(data, 'rb') as f:\n tensor.ParseFromString(f.read())\n\n tensor_to_array = numpy_helper.to_array(tensor)\n\n if data_type == \"fp16\" and tensor_to_array.dtype == np.dtype(np.float32):\n tensor_to_array = tensor_to_array.astype(np.float16)\n output_data_pb.append(tensor_to_array)\n\n logger.info(np.array(output_data_pb[-1]).shape)\n outputs.append(output_data_pb)\n logger.info('Loaded {} outputs successfully.'.format(len(outputs)))\n\n os.chdir(pwd)\n\n return inputs, outputs\n\ndef generate_onnx_model_random_input(test_times, ref_input):\n inputs = []\n\n for i in range(test_times):\n\n input_data = []\n for tensor in ref_input:\n shape = tensor.shape\n dtype = tensor.dtype\n if dtype == np.int8 or \\\n dtype == np.uint8 or \\\n dtype == np.int16 or \\\n dtype == np.uint16 or \\\n dtype == np.int32 or \\\n dtype == np.uint32 or \\\n dtype == np.int64 or \\\n dtype == np.uint64:\n new_tensor = np.random.randint(0, np.max(tensor)+1, shape, dtype)\n else:\n new_tensor = np.random.random_sample(shape).astype(dtype)\n\n if debug:\n logger.info(\"original tensor:\")\n logger.info(tensor)\n logger.info(\"new random tensor:\")\n logger.info(new_tensor)\n logger.info(\"\\n\")\n\n input_data.append(new_tensor)\n inputs.append(input_data)\n\n return inputs\n\ndef percentage_in_allowed_threshold(e, percent_mismatch):\n percent_string = re.search(r'\\(([^)]+)', str(e)).group(1)\n if \"%\" in percent_string:\n percentage_wrong = float(percent_string.replace(\"%\",\"\"))\n return percentage_wrong < percent_mismatch\n else: \n return False # error in output \n\ndef validate(all_ref_outputs, all_outputs, rtol, atol, percent_mismatch):\n if len(all_ref_outputs) == 0:\n logger.info(\"No reference output provided.\")\n return True, None\n\n logger.info('Reference {} results.'.format(len(all_ref_outputs)))\n logger.info('Predicted {} results.'.format(len(all_outputs)))\n logger.info('rtol: {}, atol: {}'.format(rtol, atol))\n\n for i in range(len(all_outputs)):\n ref_outputs = all_ref_outputs[i]\n outputs = all_outputs[i]\n\n for j in range(len(outputs)):\n ref_output = ref_outputs[j]\n output = outputs[j]\n\n # Compare the results with reference outputs\n for ref_o, o in zip(ref_output, output):\n # abs(desired-actual) < rtol * abs(desired) + atol\n try:\n np.testing.assert_allclose(ref_o, o, rtol, atol)\n except Exception as e:\n if percentage_in_allowed_threshold(e, percent_mismatch): \n continue\n logger.error(e)\n return False, e\n\n logger.info('ONNX Runtime outputs are similar to reference outputs!')\n return True, None\n\n# not use for this script\ndef cleanup_files():\n files = []\n p = subprocess.Popen([\"find\", \".\", \"-name\", \"test_data_set*\", \"-type\", \"d\"], stdout=subprocess.PIPE)\n stdout, sterr = p.communicate()\n stdout = stdout.decode(\"ascii\").strip()\n files = files + stdout.split(\"\\n\")\n\n p = subprocess.Popen([\"find\", \".\", \"-name\", \"*.onnx\"], stdout=subprocess.PIPE)\n stdout, sterr = p.communicate()\n stdout = stdout.decode(\"ascii\").strip()\n files = files + stdout.split(\"\\n\")\n\n p = subprocess.Popen([\"find\", \".\", \"-name\", \"*.gz\"], stdout=subprocess.PIPE)\n stdout, sterr = p.communicate()\n stdout = stdout.decode(\"ascii\").strip()\n files = files + stdout.split(\"\\n\")\n\n for f in files:\n if \"custom_test_data\" in f:\n logger.info(f)\n continue\n subprocess.Popen([\"rm\",\"-rf\", f], stdout=subprocess.PIPE)\n\ndef remove_profiling_files(path):\n files = []\n out = get_output([\"find\", path, \"-name\", \"onnxruntime_profile*\"])\n files = files + out.split(\"\\n\")\n\n for f in files:\n if \"custom_test_data\" in f:\n continue\n subprocess.Popen([\"rm\",\"-rf\", f], stdout=subprocess.PIPE)\n\n\ndef update_fail_report(fail_results, model, ep, e_type, e):\n result = {}\n\n result[\"model\"] = model\n result[\"ep\"] = ep\n result[\"error type\"] = e_type\n result[\"error message\"] = re.sub('^\\n', '', str(e))\n\n fail_results.append(result)\n\ndef update_metrics_map(model_to_metrics, model_name, ep_to_operator):\n if len(ep_to_operator) <= 0:\n return\n\n if model_name not in model_to_metrics:\n model_to_metrics[model_name] = {}\n\n for ep, op_map in ep_to_operator.items():\n if ep not in model_to_metrics[model_name]:\n model_to_metrics[model_name][ep] = {}\n\n if ep == cuda or ep == cuda_fp16:\n model_to_metrics[model_name][ep]['ratio_of_ops_in_cuda_not_fallback_cpu'] = calculate_cuda_op_percentage(op_map) \n model_to_metrics[model_name][ep]['total_ops'] = get_total_ops(op_map) \n else:\n total_trt_execution_time, total_execution_time, ratio_of_execution_time_in_trt = calculate_trt_latency_percentage(op_map)\n model_to_metrics[model_name][ep]['total_ops'] = get_total_ops(op_map) \n model_to_metrics[model_name][ep]['total_trt_execution_time'] = total_trt_execution_time\n model_to_metrics[model_name][ep]['total_execution_time'] = total_execution_time\n model_to_metrics[model_name][ep]['ratio_of_execution_time_in_trt'] = ratio_of_execution_time_in_trt\n\n\ndef update_metrics_map_ori(model_to_metrics, name, ep_to_operator):\n if len(ep_to_operator) <= 0:\n return\n\n trt_op_map = None\n trt_fp16_op_map = None\n cuda_op_map = None\n cuda_fp16_op_map = None\n\n for ep, op_map in ep_to_operator.items():\n if ep == cuda:\n cuda_op_map = op_map\n elif ep == cuda_fp16:\n cuda_fp16_op_map = op_map\n elif ep == trt:\n trt_op_map = op_map\n elif ep == trt_fp16:\n trt_fp16_op_map = op_map\n\n\n if name not in model_to_metrics:\n model_to_metrics[name] = {}\n\n if cuda_op_map:\n model_to_metrics[name]['ratio_of_ops_in_cuda_not_fallback_cpu'] = calculate_cuda_op_percentage(cuda_op_map) \n\n if trt_op_map:\n total_trt_execution_time, total_execution_time, ratio_of_execution_time_in_trt = calculate_trt_latency_percentage(trt_op_map)\n model_to_metrics[name]['total_trt_execution_time'] = total_trt_execution_time\n model_to_metrics[name]['total_execution_time'] = total_execution_time\n model_to_metrics[name]['ratio_of_execution_time_in_trt'] = ratio_of_execution_time_in_trt\n if cuda_op_map:\n total_ops_in_trt, total_ops, ratio_of_ops_in_trt = calculate_trt_op_percentage(trt_op_map, cuda_op_map)\n model_to_metrics[name]['total_ops_in_trt'] = total_ops_in_trt\n model_to_metrics[name]['total_ops'] = total_ops\n model_to_metrics[name]['ratio_of_ops_in_trt'] = ratio_of_ops_in_trt\n\n if trt_fp16_op_map:\n total_trt_execution_time, total_execution_time, ratio_of_execution_time_in_trt = calculate_trt_latency_percentage(trt_fp16_op_map)\n name_ = name + \" (FP16)\"\n model_to_metrics[name_] = {}\n model_to_metrics[name_]['total_trt_execution_time'] = total_trt_execution_time\n model_to_metrics[name_]['total_execution_time'] = total_execution_time\n model_to_metrics[name_]['ratio_of_execution_time_in_trt'] = ratio_of_execution_time_in_trt\n if cuda_fp16_op_map:\n total_ops_in_trt, total_ops, ratio_of_ops_in_trt = calculate_trt_op_percentage(trt_fp16_op_map, cuda_op_map)\n model_to_metrics[name_]['total_ops_in_trt'] = total_ops_in_trt\n model_to_metrics[name_]['total_ops'] = total_ops\n model_to_metrics[name_]['ratio_of_ops_in_trt'] = ratio_of_ops_in_trt\n\n if debug:\n pp = pprint.PrettyPrinter(indent=4)\n logger.info('CUDA operator map:')\n pp.pprint(cuda_op_map)\n logger.info('TRT operator map:')\n pp.pprint(trt_op_map)\n logger.info('CUDA FP16 operator map:')\n pp.pprint(cuda_fp16_op_map)\n logger.info('TRT FP16 operator map:')\n pp.pprint(trt_fp16_op_map)\n\n\n###################################################################################################\n#\n# model: {ep1: {error_type: xxx, error_message: xxx}, ep2: {error_type: xx, error_message: xx}}\n#\n###################################################################################################\ndef update_fail_model_map(model_to_fail_ep, model_name, ep, e_type, e):\n\n if model_name in model_to_fail_ep and ep in model_to_fail_ep[model_name]:\n return\n\n if model_name not in model_to_fail_ep:\n model_to_fail_ep[model_name] = {} \n\n new_map = {}\n new_map[\"error_type\"] = e_type\n new_map[\"error_message\"] = re.sub('^\\n', '', str(e))\n model_to_fail_ep[model_name][ep] = new_map\n\n # If TRT fails, TRT FP16 should fail as well\n if ep == trt:\n ep_ = trt_fp16\n e_ = \"skip benchmarking since TRT failed already.\"\n new_map_1 = {}\n new_map_1[\"error_type\"] = e_type\n new_map_1[\"error_message\"] = e_\n model_to_fail_ep[model_name][ep_] = new_map_1 \n\ndef update_fail_model_map_ori(model_to_fail_ep, fail_results, model_name, ep, e_type, e):\n\n if model_name in model_to_fail_ep and ep in model_to_fail_ep[model_name]:\n return\n\n if model_name not in model_to_fail_ep:\n model_to_fail_ep[model_name] = {} \n \n model_to_fail_ep[model_name][ep] = e_type\n update_fail_report(fail_results, model_name, ep, e_type, e)\n\n # If TRT fails, TRT FP16 should fail as well\n if ep == trt:\n ep_ = trt_fp16\n error_message_ = \"skip benchmarking since TRT failed already.\"\n update_fail_report(fail_results, model_name, ep_, e_type, error_message_)\n model_to_fail_ep[model_name][ep_] = e_type\n\ndef skip_ep(model_name, ep, model_to_fail_ep):\n\n if model_name not in model_to_fail_ep:\n return False\n\n fail_ep_list = model_to_fail_ep[model_name]\n\n # if ep in fail_ep_list and fail_ep_list[ep] == \"runtime error\":\n if ep in fail_ep_list:\n logger.info(\"Skip testing \" + model_name + \" using \" + ep + \" since it has some issues.\")\n return True\n\n return False\n\ndef read_map_from_file(map_file):\n with open(map_file) as f:\n try:\n data = json.load(f)\n except Exception as e:\n return None\n\n return data\n\ndef write_map_to_file(result, file_name):\n existed_result = {}\n if os.path.exists(file_name):\n existed_result = read_map_from_file(file_name)\n \n for model, ep_list in result.items():\n if model in existed_result:\n existed_result[model] = {** existed_result[model], ** result[model]} \n else:\n existed_result[model] = result[model]\n\n with open(file_name, 'w') as file:\n file.write(json.dumps(existed_result)) # use `json.loads` to do the reverse\n\n\ndef get_cuda_version():\n nvidia_strings = get_output([\"nvidia-smi\"]) \n version = re.search(r'CUDA Version: \\d\\d\\.\\d', nvidia_strings).group(0) \n return version\n \ndef get_trt_version(workspace):\n libnvinfer = get_output([\"find\", workspace, \"-name\", \"libnvinfer.so.*\"])\n nvinfer = re.search(r'.*libnvinfer.so.*', libnvinfer).group(0)\n trt_strings = get_output([\"nm\", \"-D\", nvinfer])\n version = re.search(r'tensorrt_version.*', trt_strings).group(0)\n return version\n \ndef get_linux_distro(): \n linux_strings = get_output([\"cat\", \"/etc/os-release\"])\n stdout = linux_strings.split(\"\\n\")[:2]\n infos = []\n for row in stdout:\n row = re.sub('=', ': ', row)\n row = re.sub('\"', '', row)\n infos.append(row)\n return infos \n\ndef get_memory_info():\n mem_strings = get_output([\"cat\", \"/proc/meminfo\"])\n stdout = mem_strings.split(\"\\n\")\n infos = []\n for row in stdout:\n if \"Mem\" in row:\n row = re.sub(': +', ': ', row)\n infos.append(row)\n return infos\n\ndef get_cpu_info(): \n cpu_strings = get_output([\"lscpu\"])\n stdout = cpu_strings.split(\"\\n\")\n infos = []\n for row in stdout:\n if \"mode\" in row or \"Arch\" in row or \"name\" in row:\n row = re.sub(': +', ': ', row)\n infos.append(row)\n return infos\n\ndef get_gpu_info():\n info = get_output([\"lspci\", \"-v\"])\n infos = re.findall('NVIDIA.*', info)\n return infos\n\ndef get_system_info(workspace):\n info = {}\n info[\"cuda\"] = get_cuda_version()\n info[\"trt\"] = get_trt_version(workspace)\n info[\"linux_distro\"] = get_linux_distro()\n info[\"cpu_info\"] = get_cpu_info()\n info[\"gpu_info\"] = get_gpu_info()\n info[\"memory\"] = get_memory_info()\n\n return info\n\ndef find_model_path(path):\n output = get_output([\"find\", \"-L\", path, \"-name\", \"*.onnx\"])\n model_path = split_and_sort_output(output)\n logger.info(model_path)\n\n if model_path == ['']:\n return None\n\n target_model_path = []\n for m in model_path:\n if \"by_trt_perf\" in m or m.startswith('.'):\n continue\n target_model_path.append(m)\n\n logger.info(target_model_path)\n if len(target_model_path) > 1:\n logger.error(\"We expect to find only one model in \" + path)\n raise\n\n return target_model_path[0]\n\ndef find_model_directory(path):\n output = get_output([\"find\", \"-L\", path, \"-maxdepth\", \"1\", \"-mindepth\", \"1\", \"-name\", \"*\", \"-type\", \"d\"])\n model_dir = split_and_sort_output(output)\n if model_dir == ['']:\n return None\n\n return model_dir\n\ndef find_test_data_directory(path):\n output = get_output([\"find\", \"-L\", path, \"-maxdepth\", \"1\", \"-name\", \"test_data*\", \"-type\", \"d\"])\n test_data_dir = split_and_sort_output(output)\n logger.info(test_data_dir)\n\n if test_data_dir == ['']:\n return None\n\n return test_data_dir\n\ndef parse_models_info_from_directory(path, models):\n\n test_data_dir = find_test_data_directory(path) \n\n if test_data_dir:\n model_name = os.path.split(path)[-1]\n model_name = model_name + '_' + os.path.split(os.path.split(path)[0])[-1] # get opset version as model_name\n model_path = find_model_path(path)\n\n model = {}\n model[\"model_name\"] = model_name\n model[\"model_path\"] = model_path \n model[\"working_directory\"] = path \n model[\"test_data_path\"] = path \n\n models[model_name] = model \n\n logger.info(model)\n return\n \n model_dir = find_model_directory(path)\n \n if model_dir:\n for dir in model_dir:\n parse_models_info_from_directory(os.path.join(path, dir), models)\n \n\ndef parse_models_info_from_file(root_dir, path, models):\n\n # default working directory\n root_working_directory = root_dir + 'perf/'\n\n with open(path) as f:\n data = json.load(f)\n\n for row in data:\n\n if 'root_working_directory' in row:\n root_working_directory = row['root_working_directory']\n continue\n\n if 'model_name' in row:\n models[row['model_name']] = {}\n else:\n logger.error('Model name must be provided in models_info.json')\n raise\n\n model = models[row['model_name']]\n\n if 'working_directory' in row:\n if os.path.isabs(row['working_directory']):\n model['working_directory'] = row['working_directory']\n else:\n model['working_directory'] = os.path.join(root_working_directory, row['working_directory'])\n else:\n logger.error('Model path must be provided in models_info.json')\n raise\n\n if 'model_path' in row:\n model['model_path'] = row['model_path']\n else:\n logger.error('Model path must be provided in models_info.json')\n raise\n\n if 'test_data_path' in row:\n model['test_data_path'] = row['test_data_path']\n else:\n logger.error('Test data path must be provided in models_info.json')\n raise\n\n if 'model_path_fp16' in row:\n model['model_path_fp16'] = row['model_path_fp16']\n\n if 'test_data_path_fp16' in row:\n model['test_data_path_fp16'] = row['test_data_path_fp16']\n\n\ndef convert_model_from_float_to_float16(model_path):\n from onnxmltools.utils import load_model, save_model\n from float16 import convert_float_to_float16\n\n new_model_path = os.path.join(os.getcwd(), \"new_fp16_model_by_trt_perf.onnx\")\n if not os.path.exists(new_model_path):\n onnx_model = load_model(model_path)\n new_onnx_model = convert_float_to_float16(onnx_model)\n save_model(new_onnx_model, 'new_fp16_model_by_trt_perf.onnx')\n\n return new_model_path\n\ndef get_test_data(fp16, test_data_dir, all_inputs_shape):\n inputs = []\n ref_outputs = []\n\n # read input/output of test data\n if fp16:\n inputs, ref_outputs = load_onnx_model_zoo_test_data(test_data_dir, all_inputs_shape, \"fp16\")\n else:\n inputs, ref_outputs = load_onnx_model_zoo_test_data(test_data_dir, all_inputs_shape)\n\n return inputs, ref_outputs\n\ndef create_session(model_path, providers, session_options):\n logger.info(model_path)\n try:\n session = onnxruntime.InferenceSession(model_path, providers=providers, sess_options=session_options)\n return session\n except Exception as e:\n if \"shape inference\" in str(e):\n logger.info(\"Using model from symbolic_shape_infer.py\")\n\n new_model_path = model_path[:].replace(\".onnx\", \"_new_by_trt_perf.onnx\")\n exec = os.environ[\"SYMBOLIC_SHAPE_INFER\"]\n\n if not os.path.exists(new_model_path):\n p = subprocess.run(\"python3 \" + exec + \" --input \" + model_path + \" --output \" + new_model_path + \" --auto_merge\", shell=True, check=True, )\n logger.info(p) \n \n session = onnxruntime.InferenceSession(new_model_path, providers=providers, sess_options=session_options)\n return session\n else:\n raise Exception(e) \n\ndef run_onnxruntime(args, models):\n\n success_results = []\n model_to_latency = {} # model -> cuda and tensorrt latency\n model_to_metrics = {} # model -> metrics from profiling file\n model_to_fail_ep = {} # model -> failing ep\n\n ep_list = []\n if args.ep:\n ep_list.append(args.ep)\n else:\n if args.fp16:\n ep_list = [cpu, cuda, trt, cuda_fp16, trt_fp16]\n else:\n ep_list = [cpu, cuda, trt]\n\n validation_exemption = [trt_fp16]\n\n\n if os.path.exists(FAIL_MODEL_FILE):\n model_to_fail_ep = read_map_from_file(FAIL_MODEL_FILE)\n\n #######################\n # iterate model\n #######################\n for name, model_info in models.items():\n latency_result = {}\n path = model_info[\"working_directory\"]\n\n pwd = os.getcwd()\n if not os.path.exists(path):\n os.mkdir(path)\n os.chdir(path)\n path = os.getcwd()\n\n if args.running_mode == \"validate\": \n remove_profiling_files(path)\n \n inputs = []\n ref_outputs = []\n all_inputs_shape = [] # use for standalone trt\n ep_to_operator = {} # ep -> { operator -> count }\n profile_already_parsed = set()\n\n\n #######################\n # iterate ep\n #######################\n for ep in ep_list:\n if skip_ep(name, ep, model_to_fail_ep):\n continue\n \n if standalone_trt not in ep:\n ep_ = ep_to_provider_list[ep][0]\n if (ep_ not in onnxruntime.get_available_providers()):\n logger.error(\"No {} support\".format(ep_))\n continue\n\n model_path = model_info[\"model_path\"]\n test_data_dir = model_info[\"test_data_path\"]\n\n fp16 = False\n os.environ[\"ORT_TENSORRT_FP16_ENABLE\"] = \"1\" if \"fp16\" in ep else \"0\"\n logger.info(\"[Initialize] model = {}, ep = {} ...\".format(name, ep))\n \n # use float16.py for cuda fp16 only\n if \"cuda_fp16\" in ep: \n \n # handle model\n if \"model_path_fp16\" in model_info:\n model_path = model_info[\"model_path_fp16\"]\n\n else:\n try:\n model_path = convert_model_from_float_to_float16(model_path)\n fp16 = True\n except Exception as e:\n logger.error(e)\n update_fail_model_map(model_to_fail_ep, name, ep, 'script error', e)\n continue\n\n # handle test data\n if \"test_data_path_fp16\" in model_info:\n test_data_dir = model_info[\"test_data_path_fp16\"]\n fp16 = False \n \n inputs, ref_outputs = get_test_data(fp16, test_data_dir, all_inputs_shape)\n\n # generate random input data\n if args.input_data == \"random\":\n inputs = generate_onnx_model_random_input(args.test_times+1, inputs[0])\n\n #######################################\n # benchmark or validation\n #######################################\n if args.running_mode == 'benchmark':\n logger.info(\"\\n----------------------------- benchmark -------------------------------------\")\n\n # resolve providers to create session\n if standalone_trt in ep: \n providers = ep_to_provider_list[trt]\n else: \n providers = ep_to_provider_list[ep]\n\n options = onnxruntime.SessionOptions()\n options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL\n\n \n # create onnxruntime inference session\n try:\n sess = create_session(model_path, providers, options)\n\n except Exception as e:\n logger.error(e)\n update_fail_model_map(model_to_fail_ep, name, ep, 'runtime error', e)\n continue\n \n # memory tracking variables \n p = None # keep track of process to kill upon error\n mem_usage = None\n \n # get standalone TensorRT perf\n if standalone_trt in ep and args.trtexec: \n trtexec = True \n try: \n if args.track_memory: \n p = start_memory_tracking() \n result = run_trt_standalone(args.trtexec, model_path, sess.get_inputs(), all_inputs_shape, fp16)\n mem_usage = end_memory_tracking(p, trtexec, True)\n else: \n result = run_trt_standalone(args.trtexec, model_path, sess.get_inputs(), all_inputs_shape, fp16)\n except Exception as e: \n logger.error(e)\n if args.track_memory:\n end_memory_tracking(p, trtexec, False)\n update_fail_model_map(model_to_fail_ep, name, ep, 'runtime error', e)\n continue\n\n # inference with onnxruntime ep\n else: \n logger.info(\"start to inference {} with {} ...\".format(name, ep))\n logger.info(sess.get_providers())\n\n if sess:\n logger.info(\"Model inputs nodes:\")\n for input_meta in sess.get_inputs():\n logger.info(input_meta)\n logger.info(\"Model outputs nodes:\")\n for output_meta in sess.get_outputs():\n logger.info(output_meta)\n\n batch_size = 1\n result_template = {\n \"engine\": \"onnxruntime\",\n \"version\": onnxruntime.__version__,\n \"device\": ep,\n \"fp16\": fp16,\n \"io_binding\": False,\n \"model_name\": name,\n \"inputs\": len(sess.get_inputs()),\n \"batch_size\": batch_size,\n \"sequence_length\": 1,\n \"datetime\": str(datetime.now()),}\n \n if args.track_memory and track_ep_memory(ep): \n trtexec = False\n p = start_memory_tracking() \n result = inference_ort(args, name, sess, ep, inputs, result_template, args.test_times, batch_size)\n success = True if result else False\n mem_usage = end_memory_tracking(p, trtexec, success)\n else: \n result = inference_ort(args, name, sess, ep, inputs, result_template, args.test_times, batch_size)\n if result:\n latency_result[ep] = {}\n latency_result[ep][\"average_latency_ms\"] = result[\"average_latency_ms\"]\n latency_result[ep][\"latency_90_percentile\"] = result[\"latency_90_percentile\"]\n if mem_usage: \n latency_result[ep][\"memory\"] = mem_usage\n\n if not args.trtexec: # skip standalone\n success_results.append(result)\n\n model_to_latency[name] = copy.deepcopy(latency_result)\n logger.info(\"---------------------------- benchmark [end] ----------------------------------\\n\")\n\n\n elif args.running_mode == 'validate':\n logger.info(\"\\n----------------------------- validate -------------------------------------\")\n\n # enable profiling to generate profiling file for analysis\n options = onnxruntime.SessionOptions()\n options.enable_profiling = True\n options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL\n time.sleep(1) # avoid to generate same profile file name\n\n # create onnxruntime inference session\n try:\n sess = create_session(model_path, ep_to_provider_list[ep], options)\n except Exception as e:\n logger.error(e)\n update_fail_model_map(model_to_fail_ep, name, ep, 'runtime error', e)\n continue\n\n sess.disable_fallback()\n\n logger.info(\"start to inference {} with {} ...\".format(name, ep))\n logger.info(sess.get_providers())\n\n if sess:\n logger.info(\"Model inputs nodes:\")\n for input_meta in sess.get_inputs():\n logger.info(input_meta)\n logger.info(\"Model outputs nodes:\")\n for output_meta in sess.get_outputs():\n logger.info(output_meta)\n\n # run inference and validate the result\n #\n # currently skip TensorRT float16 validation intentionally\n if ep not in validation_exemption:\n try:\n ort_outputs = inference_ort_and_get_prediction(name, sess, inputs)\n\n status = validate(ref_outputs, ort_outputs, args.rtol, args.atol, args.percent_mismatch)\n if not status[0]:\n update_fail_model_map(model_to_fail_ep, name, ep, 'result accuracy issue', status[1])\n continue\n except Exception as e:\n logger.error(e)\n update_fail_model_map(model_to_fail_ep, name, ep, 'runtime error', e)\n continue\n\n # Run inference again. the reason is that some ep like tensorrt\n # it takes much longer time to generate graph on first run and\n # we need to skip the perf result of that expensive run.\n inference_ort_and_get_prediction(name, sess, inputs)\n else:\n inference_ort_and_get_prediction(name, sess, inputs)\n inference_ort_and_get_prediction(name, sess, inputs)\n\n sess.end_profiling()\n\n # get metrics from profiling file\n metrics = get_profile_metrics(path, profile_already_parsed, logger)\n if metrics:\n logger.info(ep)\n ep_to_operator[ep] = metrics\n\n logger.info(\"---------------------------- validate [end] ----------------------------------\\n\")\n\n ####################\n # end of iterate ep\n ####################\n\n\n # get percentage of execution time and operators in TRT\n update_metrics_map(model_to_metrics, name, ep_to_operator)\n\n # cleanup_files()\n os.chdir(pwd)\n\n # end of model\n\n return success_results, model_to_latency, model_to_fail_ep, model_to_metrics\n\ndef calculate_gain(value, ep1, ep2): \n ep1_latency = float(value[ep1]['average_latency_ms'])\n ep2_latency = float(value[ep2]['average_latency_ms'])\n gain = (ep2_latency - ep1_latency)*100/ep2_latency\n return gain\n\ndef add_improvement_information(model_to_latency):\n for key, value in model_to_latency.items():\n if trt in value and cuda in value:\n gain = calculate_gain(value, trt, cuda)\n value[trt_cuda_gain] = \"{:.2f} %\".format(gain)\n if trt_fp16 in value and cuda_fp16 in value:\n gain = calculate_gain(value, trt_fp16, cuda_fp16)\n value[trt_cuda_fp16_gain] = \"{:.2f} %\".format(gain)\n if trt in value and standalone_trt in value:\n gain = calculate_gain(value, trt, standalone_trt)\n value[trt_native_gain] = \"{:.2f} %\".format(gain)\n if trt_fp16 in value and standalone_trt_fp16 in value:\n gain = calculate_gain(value, trt_fp16, standalone_trt_fp16)\n value[trt_native_fp16_gain] = \"{:.2f} %\".format(gain)\n\ndef output_details(results, csv_filename):\n need_write_header = True \n if os.path.exists(csv_filename):\n need_write_header = False \n\n with open(csv_filename, mode=\"a\", newline='') as csv_file:\n column_names = [\n \"engine\", \"version\", \"device\", \"fp16\", \"io_binding\", \"model_name\", \"inputs\", \"batch_size\",\n \"sequence_length\", \"datetime\", \"test_times\", \"QPS\", \"average_latency_ms\", \"latency_variance\",\n \"latency_90_percentile\", \"latency_95_percentile\", \"latency_99_percentile\"\n ]\n\n csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)\n if need_write_header:\n csv_writer.writeheader()\n for result in results:\n csv_writer.writerow(result)\n\ndef output_fail(model_to_fail_ep, csv_filename):\n\n with open(csv_filename, mode=\"w\", newline='') as csv_file:\n column_names = [\"model\", \"ep\", \"error type\", \"error message\"]\n\n csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)\n csv_writer.writeheader()\n\n for model, model_info in model_to_fail_ep.items():\n for ep, ep_info in model_info.items():\n result = {}\n result[\"model\"] = model\n result[\"ep\"] = ep\n result[\"error type\"] = ep_info[\"error_type\"]\n result[\"error message\"] = ep_info[\"error_message\"]\n csv_writer.writerow(result)\n \ndef read_success_from_file(success_file):\n success_results = []\n with open(success_file) as success:\n csv_reader = csv.DictReader(success)\n for row in csv_reader: \n success_results.append(row)\n\n success_json = json.loads(json.dumps(success_results, indent=4))\n return success_json\n\ndef add_status_dict(status_dict, model_name, ep, status): \n if model_name not in status_dict:\n status_dict[model_name] = {}\n status_dict[model_name][ep] = status\n\ndef build_status(status_dict, results, is_fail):\n \n if is_fail:\n for model, model_info in results.items():\n for ep, ep_info in model_info.items(): \n model_name = model\n ep = ep\n status = 'Fail'\n add_status_dict(status_dict, model_name, ep, status)\n else: \n for model, value in results.items():\n for ep, ep_info in value.items(): \n model_name = model\n ep = ep\n status = 'Pass'\n add_status_dict(status_dict, model_name, ep, status)\n\n return status_dict\n\ndef output_status(results, csv_filename):\n \n need_write_header = True \n if os.path.exists(csv_filename):\n need_write_header = False \n\n with open(csv_filename, mode=\"a\", newline='') as csv_file:\n column_names = [\"Model\",\n cpu,\n cuda + \" fp32\",\n trt + \" fp32\",\n standalone_trt + \" fp32\",\n cuda + \" fp16\",\n trt + \" fp16\",\n standalone_trt + \" fp16\"\n ]\n\n csv_writer = csv.writer(csv_file)\n\n if need_write_header:\n csv_writer.writerow(column_names)\n \n cpu_status = \"\"\n cuda_fp32_status = \"\"\n trt_fp32_status = \"\"\n standalone_fp32_status = \"\"\n cuda_fp16_status = \"\"\n trt_fp16_status = \"\"\n standalone_fp16_status = \"\"\n \n\n for model_name, ep_dict in results.items():\n for ep, status in ep_dict.items():\n if ep == cpu: \n cpu_status = status \n elif ep == cuda: \n cuda_fp32_status = status\n elif ep == trt: \n trt_fp32_status = status\n elif ep == standalone_trt:\n standalone_fp32_status = status\n elif ep == cuda_fp16: \n cuda_fp16_status = status\n elif ep == trt_fp16:\n trt_fp16_status = status\n elif ep == standalone_trt_fp16: \n standalone_fp16_status = status\n else: \n continue\n \n row = [model_name,\n cpu_status, \n cuda_fp32_status, \n trt_fp32_status, \n standalone_fp32_status, \n cuda_fp16_status, \n trt_fp16_status, \n standalone_fp16_status]\n csv_writer.writerow(row)\n\ndef output_latency(results, csv_filename):\n need_write_header = True \n if os.path.exists(csv_filename):\n need_write_header = False \n\n with open(csv_filename, mode=\"a\", newline='') as csv_file:\n column_names = [\"Model\",\n \"CPU fp32 \\nmean (ms)\",\n \"CPU fp32 \\n 90th percentile (ms)\",\n \"CUDA fp32 \\nmean (ms)\",\n \"CUDA fp32 \\n90th percentile (ms)\",\n \"CUDA EP fp32 \\npeak memory usage (MiB)\",\n \"TRT EP fp32 \\nmean (ms)\",\n \"TRT EP fp32 \\n90th percentile (ms)\",\n \"TRT EP fp32 \\npeak memory usage (MiB)\",\n \"Standalone TRT fp32 \\nmean (ms)\",\n \"Standalone TRT fp32 \\n90th percentile (ms)\",\n \"Standalone TRT fp32 \\npeak memory usage (MiB)\",\n \"TRT v CUDA EP fp32 \\ngain (mean) (%)\",\n \"EP v Standalone TRT fp32 \\ngain (mean) (%)\",\n \"CUDA fp16 \\nmean (ms)\",\n \"CUDA fp16 \\n90th percentile (ms)\",\n \"CUDA EP fp16 \\npeak memory usage (MiB)\",\n \"TRT EP fp16 \\nmean (ms)\",\n \"TRT EP fp16 \\n90th percentile (ms)\",\n \"TRT EP fp16 \\npeak memory usage (MiB)\",\n \"Standalone TRT fp16 \\nmean (ms)\",\n \"Standalone TRT fp16 \\n90th percentile (ms)\",\n \"Standalone TRT fp16 \\npeak memory usage (MiB)\",\n \"TRT v CUDA EP fp16 \\ngain (mean) (%)\", \n \"EP v Standalone TRT fp16 \\ngain (mean) (%)\"]\n csv_writer = csv.writer(csv_file)\n\n if need_write_header:\n csv_writer.writerow(column_names)\n\n for key, value in results.items():\n cpu_average = \"\" \n if cpu in value and \"average_latency_ms\" in value[cpu]:\n cpu_average = value[cpu][\"average_latency_ms\"]\n\n cpu_90_percentile = \"\"\n if cpu in value and \"latency_90_percentile\" in value[cpu]:\n cpu_90_percentile = value[cpu][\"latency_90_percentile\"]\n\n cuda_average = \"\"\n if cuda in value and 'average_latency_ms' in value[cuda]:\n cuda_average = value[cuda]['average_latency_ms']\n\n cuda_90_percentile = \"\"\n if cuda in value and 'latency_90_percentile' in value[cuda]:\n cuda_90_percentile = value[cuda]['latency_90_percentile']\n\n cuda_memory = \"\"\n if cuda in value and 'memory' in value[cuda]:\n cuda_memory = value[cuda]['memory']\n \n trt_average = \"\"\n if trt in value and 'average_latency_ms' in value[trt]:\n trt_average = value[trt]['average_latency_ms']\n\n trt_90_percentile = \"\"\n if trt in value and 'latency_90_percentile' in value[trt]:\n trt_90_percentile = value[trt]['latency_90_percentile']\n \n trt_memory = \"\"\n if trt in value and 'memory' in value[trt]:\n trt_memory = value[trt]['memory']\n\n standalone_trt_average = \"\"\n if standalone_trt in value and 'average_latency_ms' in value[standalone_trt]:\n standalone_trt_average = value[standalone_trt]['average_latency_ms']\n\n standalone_trt_90_percentile = \"\"\n if standalone_trt in value and 'latency_90_percentile' in value[standalone_trt]:\n standalone_trt_90_percentile = value[standalone_trt]['latency_90_percentile']\n \n standalone_trt_memory = \"\"\n if standalone_trt in value and 'memory' in value[standalone_trt]:\n standalone_trt_memory = value[standalone_trt]['memory']\n\n cuda_fp16_average = \"\"\n if cuda_fp16 in value and 'average_latency_ms' in value[cuda_fp16]:\n cuda_fp16_average = value[cuda_fp16]['average_latency_ms']\n\n cuda_fp16_memory = \"\"\n if cuda_fp16 in value and 'memory' in value[cuda_fp16]:\n cuda_fp16_memory = value[cuda_fp16]['memory']\n \n cuda_fp16_90_percentile = \"\"\n if cuda_fp16 in value and 'latency_90_percentile' in value[cuda_fp16]:\n cuda_fp16_90_percentile = value[cuda_fp16]['latency_90_percentile']\n\n trt_fp16_average = \"\"\n if trt_fp16 in value and 'average_latency_ms' in value[trt_fp16]:\n trt_fp16_average = value[trt_fp16]['average_latency_ms']\n\n trt_fp16_90_percentile = \"\"\n if trt_fp16 in value and 'latency_90_percentile' in value[trt_fp16]:\n trt_fp16_90_percentile = value[trt_fp16]['latency_90_percentile']\n\n trt_fp16_memory = \"\"\n if trt_fp16 in value and 'memory' in value[trt_fp16]:\n trt_fp16_memory = value[trt_fp16]['memory']\n \n standalone_trt_fp16_average = \"\"\n if standalone_trt_fp16 in value and 'average_latency_ms' in value[standalone_trt_fp16]:\n standalone_trt_fp16_average = value[standalone_trt_fp16]['average_latency_ms']\n\n standalone_trt_fp16_90_percentile = \"\"\n if standalone_trt_fp16 in value and 'latency_90_percentile' in value[standalone_trt_fp16]:\n standalone_trt_fp16_90_percentile = value[standalone_trt_fp16]['latency_90_percentile']\n \n standalone_trt_fp16_memory = \"\"\n if standalone_trt_fp16 in value and 'memory' in value[standalone_trt_fp16]:\n standalone_trt_fp16_memory = value[standalone_trt_fp16]['memory']\n\n row = [key,\n cpu_average, \n cpu_90_percentile, \n cuda_average,\n cuda_90_percentile,\n cuda_memory,\n trt_average,\n trt_90_percentile,\n trt_memory,\n standalone_trt_average,\n standalone_trt_90_percentile,\n standalone_trt_memory,\n value[trt_cuda_gain] if trt_cuda_gain in value else \" \",\n value[trt_native_gain] if trt_native_gain in value else \" \",\n cuda_fp16_average,\n cuda_fp16_90_percentile,\n cuda_fp16_memory,\n trt_fp16_average,\n trt_fp16_90_percentile,\n trt_fp16_memory,\n standalone_trt_fp16_average,\n standalone_trt_fp16_90_percentile,\n standalone_trt_fp16_memory,\n value[trt_cuda_fp16_gain] if trt_cuda_fp16_gain in value else \" \",\n value[trt_native_fp16_gain] if trt_native_fp16_gain in value else \" \"\n ]\n csv_writer.writerow(row)\n\n logger.info(f\"CUDA/TRT latency comparison are saved to csv file: {csv_filename}\")\n\ndef output_metrics(model_to_metrics, csv_filename):\n with open(csv_filename, mode=\"w\", newline='') as csv_file:\n column_names = [\"Model\",\n \"% CUDA operators (not fall back to CPU)\",\n \"Total TRT operators\",\n \"Total operators\",\n \"% TRT operator\",\n \"Total TRT execution time\",\n \"Total execution time\",\n \"% TRT execution time\"]\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n\n results = []\n for model, ep_info in model_to_metrics.items():\n\n result = {}\n result_fp16 = {}\n result[\"model_name\"] = model\n result_fp16[\"model_name\"] = model + \" (FP16)\"\n\n if cuda in ep_info:\n result['ratio_of_ops_in_cuda_not_fallback_cpu'] = ep_info[cuda]['ratio_of_ops_in_cuda_not_fallback_cpu']\n\n if trt in ep_info:\n result['total_trt_execution_time'] = ep_info[trt]['total_trt_execution_time']\n result['total_execution_time'] = ep_info[trt]['total_execution_time']\n result['ratio_of_execution_time_in_trt'] = ep_info[trt]['ratio_of_execution_time_in_trt']\n\n if cuda in ep_info and trt in ep_info: \n ########################################################################################\n # equation of % TRT ops:\n # (total ops in cuda json - cuda and cpu ops in trt json)/ total ops in cuda json\n ########################################################################################\n total_ops_in_cuda = ep_info[cuda][\"total_ops\"] \n cuda_cpu_ops_in_trt = ep_info[trt][\"total_ops\"]\n\n result['total_ops_in_trt'] = total_ops_in_cuda - cuda_cpu_ops_in_trt\n result['total_ops'] = total_ops_in_cuda\n result['ratio_of_ops_in_trt'] = (total_ops_in_cuda - cuda_cpu_ops_in_trt) / total_ops_in_cuda\n\n if cuda_fp16 in ep_info:\n result_fp16['ratio_of_ops_in_cuda_not_fallback_cpu'] = ep_info[cuda_fp16]['ratio_of_ops_in_cuda_not_fallback_cpu']\n\n if trt_fp16 in ep_info:\n result_fp16['total_trt_execution_time'] = ep_info[trt_fp16]['total_trt_execution_time']\n result_fp16['total_execution_time'] = ep_info[trt_fp16]['total_execution_time']\n result_fp16['ratio_of_execution_time_in_trt'] = ep_info[trt_fp16]['ratio_of_execution_time_in_trt']\n\n if cuda_fp16 in ep_info and trt_fp16 in ep_info: \n ########################################################################################\n # equation of % TRT ops:\n # (total ops in cuda json - cuda and cpu ops in trt json)/ total ops in cuda json\n ########################################################################################\n total_ops_in_cuda = ep_info[cuda_fp16][\"total_ops\"] \n cuda_cpu_ops_in_trt = ep_info[trt_fp16][\"total_ops\"]\n\n result_fp16['total_ops_in_trt'] = total_ops_in_cuda - cuda_cpu_ops_in_trt\n result_fp16['total_ops'] = total_ops_in_cuda\n result_fp16['ratio_of_ops_in_trt'] = (total_ops_in_cuda - cuda_cpu_ops_in_trt) / total_ops_in_cuda\n\n \n results.append(result)\n results.append(result_fp16)\n\n\n \n for value in results:\n row = [value['model_name'],\n value['ratio_of_ops_in_cuda_not_fallback_cpu'] if 'ratio_of_ops_in_cuda_not_fallback_cpu' in value else \" \",\n value['total_ops_in_trt'] if 'total_ops_in_trt' in value else \" \",\n value['total_ops'] if 'total_ops' in value else \" \",\n value['ratio_of_ops_in_trt'] if 'ratio_of_ops_in_trt' in value else \" \",\n value['total_trt_execution_time'] if 'total_trt_execution_time' in value else \" \",\n value['total_execution_time'] if 'total_execution_time' in value else \" \",\n value['ratio_of_execution_time_in_trt'] if 'ratio_of_execution_time_in_trt' in value else \" \",\n ]\n csv_writer.writerow(row)\n\n logger.info(f\"Tensorrt ratio metrics are saved to csv file: {csv_filename}\")\n\ndef output_system_info(result, csv_filename):\n with open(csv_filename, mode=\"a\", newline='') as csv_file:\n column_names = [\n \"cpu_info\", \"cuda\", \"gpu_info\", \"linux_distro\", \"memory\", \"trt\"\n ]\n\n csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)\n csv_writer.writeheader()\n csv_writer.writerow(result)\n\n logger.info(f\"System information are saved to csv file: {csv_filename}\")\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"-c\", \"--comparison\", required=False, default=\"cuda_trt\", choices=[\"cuda_trt\", \"acl\"], help=\"EPs to compare: CPU vs. CUDA vs. TRT or CPU vs. ACL\")\n\n parser.add_argument(\"-m\", \"--model_source\", required=False, default=\"model_list.json\", help=\"Model source: (1) model list file (2) model directory.\")\n\n parser.add_argument(\"-r\", \"--running_mode\", required=False, default=\"benchmark\", choices=[\"validate\", \"benchmark\"], help=\"Testing mode.\")\n\n parser.add_argument(\"-i\", \"--input_data\", required=False, default=\"fix\", choices=[\"fix\", \"random\"], help=\"Type of input data.\")\n\n parser.add_argument(\"-o\", \"--perf_result_path\", required=False, default=\"result\", help=\"Directory for perf result.\")\n \n parser.add_argument(\"-w\", \"--workspace\", required=False, default=\"/\", help=\"Workspace to find tensorrt and perf script (with models if parsing with model file)\")\n \n parser.add_argument(\"--track_memory\", required=False, default=True, help=\"Track CUDA and TRT Memory Usage\")\n\n parser.add_argument(\"--ep\", required=False, default=None, help=\"Specify ORT Execution Provider.\")\n \n parser.add_argument(\"--ep_list\", nargs=\"+\", required=False, default=None, help=\"Specify ORT Execution Providers list.\")\n\n parser.add_argument(\"--fp16\", required=False, default=True, action=\"store_true\", help=\"Inlcude Float16 into benchmarking.\")\n\n parser.add_argument(\"--trtexec\", required=False, default=None, help=\"trtexec executable path.\")\n\n # Validation options\n parser.add_argument(\"--percent_mismatch\", required=False, default=20.0, help=\"Allowed percentage of mismatched elements in validation.\")\n parser.add_argument(\"--rtol\", required=False, default=0, help=\"Relative tolerance for validating outputs.\")\n parser.add_argument(\"--atol\", required=False, default=20, help=\"Absolute tolerance for validating outputs.\")\n \n parser.add_argument(\"-t\",\n \"--test_times\",\n required=False,\n default=1,\n type=int,\n help=\"Number of repeat times to get average inference latency.\")\n\n parser.add_argument(\"--write_test_result\", type=str2bool, required=False, default=True, help=\"\")\n parser.add_argument(\"--benchmark_fail_csv\", required=False, default=None, help=\"\")\n parser.add_argument(\"--benchmark_success_csv\", required=False, default=None, help=\"\")\n parser.add_argument(\"--benchmark_latency_csv\", required=False, default=None, help=\"\")\n parser.add_argument(\"--benchmark_metrics_csv\", required=False, default=None, help=\"\")\n parser.add_argument(\"--system_info_csv\", required=False, default=None, help=\"\")\n\n args = parser.parse_args()\n return args\n\ndef setup_logger(verbose):\n if verbose:\n coloredlogs.install(level='DEBUG', fmt='[%(filename)s:%(lineno)s - %(funcName)20s()] %(message)s')\n else:\n coloredlogs.install(fmt='%(message)s')\n logging.getLogger(\"transformers\").setLevel(logging.WARNING)\n\ndef parse_models_helper(args, models): \n if \".json\" in args.model_source:\n logger.info(\"Parsing model information from file ...\")\n parse_models_info_from_file(args.workspace, args.model_source, models)\n else:\n logger.info(\"Parsing model information from directory ...\")\n parse_models_info_from_directory(args.model_source, models)\n\ndef main():\n args = parse_arguments()\n setup_logger(False)\n pp = pprint.PrettyPrinter(indent=4)\n \n logger.info(\"\\n\\nStart perf run ...\\n\")\n\n models = {}\n parse_models_helper(args, models)\n\n os.environ[\"SYMBOLIC_SHAPE_INFER\"] = os.path.join(os.getcwd(), \"../../symbolic_shape_infer.py\")\n\n perf_start_time = datetime.now()\n success_results, model_to_latency, model_to_fail_ep, model_to_metrics = run_onnxruntime(args, models)\n perf_end_time = datetime.now()\n\n logger.info(\"Done running the perf.\")\n logger.info(\"\\nTotal time for benchmarking all models: {}\".format(perf_end_time - perf_start_time))\n logger.info(list(models.keys()))\n\n logger.info(\"\\nTotal models: {}\".format(len(models)))\n \n fail_model_cnt = 0\n for key, value in models.items():\n if key in model_to_fail_ep: fail_model_cnt += 1\n logger.info(\"Fail models: {}\".format(fail_model_cnt))\n logger.info(\"Success models: {}\".format(len(models) - fail_model_cnt ))\n\n path = os.path.join(os.getcwd(), args.perf_result_path)\n if not os.path.exists(path):\n from pathlib import Path\n Path(path).mkdir(parents=True, exist_ok=True)\n\n time_stamp = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n \n if len(model_to_fail_ep) > 0:\n logger.info(\"\\n============================================\")\n logger.info(\"========== Failing Models/EPs ==============\")\n logger.info(\"============================================\")\n logger.info(model_to_fail_ep)\n write_map_to_file(model_to_fail_ep, FAIL_MODEL_FILE)\n\n if args.write_test_result:\n csv_filename = args.benchmark_fail_csv if args.benchmark_fail_csv else f\"benchmark_fail_{time_stamp}.csv\"\n csv_filename = os.path.join(path, csv_filename)\n output_fail(model_to_fail_ep, csv_filename)\n\n if len(model_to_latency) > 0:\n logger.info(\"\\n==========================================\")\n logger.info(\"=========== Models/EPs latency ===========\")\n logger.info(\"==========================================\")\n add_improvement_information(model_to_latency)\n pretty_print(pp, model_to_latency)\n write_map_to_file(model_to_latency, LATENCY_FILE)\n if args.write_test_result:\n csv_filename = args.benchmark_latency_csv if args.benchmark_latency_csv else f\"benchmark_latency_{time_stamp}.csv\"\n csv_filename = os.path.join(path, csv_filename)\n output_latency(model_to_latency, csv_filename)\n \n if success_results:\n csv_filename = args.benchmark_success_csv if args.benchmark_success_csv else f\"benchmark_success_{time_stamp}.csv\"\n csv_filename = os.path.join(path, csv_filename)\n output_details(success_results, csv_filename)\n\n if len(model_to_metrics) > 0:\n logger.info(\"\\n=========================================\")\n logger.info(\"========== Models/EPs metrics ==========\")\n logger.info(\"=========================================\")\n pretty_print(pp, model_to_metrics)\n write_map_to_file(model_to_metrics, METRICS_FILE)\n\n if args.write_test_result:\n csv_filename = args.benchmark_metrics_csv if args.benchmark_metrics_csv else f\"benchmark_metrics_{time_stamp}.csv\"\n csv_filename = os.path.join(path, csv_filename)\n output_metrics(model_to_metrics, csv_filename)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv", "numpy.random.random_sample", "numpy.dtype", "numpy.percentile", "numpy.max", "numpy.testing.assert_allclose", "numpy.var", "numpy.array" ] ]
LuCeHe/home-platform
[ "06f9370bfacecebd0c8623a3b8f0511532a9a1f0" ]
[ "tests/home_platform/test_core.py" ]
[ "# Copyright (c) 2017, IGLU consortium\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# - Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# - Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,\n# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nimport logging\nimport numpy as np\nimport unittest\n\nfrom home_platform.core import Scene\nfrom home_platform.suncg import loadModel\n\nTEST_DATA_DIR = os.path.join(os.path.dirname(\n os.path.realpath(__file__)), \"..\", \"data\")\nTEST_SUNCG_DATA_DIR = os.path.join(os.path.dirname(\n os.path.realpath(__file__)), \"..\", \"data\", \"suncg\")\n\n\nclass TestScene(unittest.TestCase):\n def testInit(self):\n scene = Scene()\n\n # Load object to scene\n modelId = '126'\n modelFilename = os.path.join(\n TEST_SUNCG_DATA_DIR, \"object\", str(modelId), str(modelId) + \".egg\")\n assert os.path.exists(modelFilename)\n model = loadModel(modelFilename)\n model.setName('object-' + str(modelId))\n model.reparentTo(scene.scene)\n\n self.assertTrue(scene.getTotalNbHouses() == 0)\n self.assertTrue(scene.getTotalNbRooms() == 0)\n self.assertTrue(scene.getTotalNbObjects() == 1)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.WARN)\n np.seterr(all='raise')\n unittest.main()\n" ]
[ [ "numpy.seterr" ] ]
sciris/openpyexcel
[ "1fde667a1adc2f4988279fd73a2ac2660706b5ce" ]
[ "openpyexcel/compat/tests/test_compat.py" ]
[ "from __future__ import absolute_import\n# Copyright (c) 2010-2019 openpyexcel\nimport pytest\n\n\n@pytest.mark.parametrize(\"value, result\",\n [\n ('s', 's'),\n (2.0/3, '0.6666666666666666'),\n (1, '1'),\n (None, 'none'),\n (float('NaN'), ''),\n (float('inf'), ''),\n ]\n )\ndef test_safe_string(value, result):\n from openpyexcel.compat import safe_string\n assert safe_string(value) == result\n v = safe_string('s')\n assert v == 's'\n\n\n@pytest.mark.numpy_required\ndef test_numeric_types():\n from ..numbers import NUMERIC_TYPES, numpy, Decimal, long\n assert NUMERIC_TYPES == (int, float, long, Decimal, numpy.bool_,\n numpy.floating, numpy.integer)\n\n\n@pytest.mark.numpy_required\ndef test_numpy_tostring():\n from numpy import float_, bool_\n from .. import safe_string\n assert safe_string(float_(5.1)) == \"5.1\"\n assert safe_string(int(5)) == \"5\"\n assert safe_string(bool_(True)) == \"1\"\n\n\n@pytest.mark.skipif(\"sys.version_info[0]>=3\")\ndef test_safe_repr():\n from ..strings import safe_repr\n s = u\"D\\xfcsseldorf\"\n assert safe_repr(s) == s.encode(\"ascii\", \"backslashreplace\")\n\n\nfrom .. import deprecated\n\ndef test_deprecated_function(recwarn):\n\n @deprecated(\"no way\")\n def fn():\n return \"Hello world\"\n\n fn()\n w = recwarn.pop()\n assert issubclass(w.category, DeprecationWarning)\n assert w.filename\n assert w.lineno\n assert \"no way\" in str(w.message)\n\n\ndef test_deprecated_class(recwarn):\n\n @deprecated(\"\")\n class Simple:\n\n pass\n s = Simple()\n w = recwarn.pop()\n assert issubclass(w.category, DeprecationWarning)\n assert w.filename\n assert w.lineno\n\n\ndef test_deprecated_method(recwarn):\n\n class Simple:\n\n @deprecated(\"\")\n def do(self):\n return \"Nothing\"\n\n s = Simple()\n s.do()\n w = recwarn.pop()\n assert issubclass(w.category, DeprecationWarning)\n assert w.filename\n assert w.lineno\n\n\ndef test_no_deprecation_reason():\n\n with pytest.raises(TypeError):\n @deprecated\n def fn():\n return\n" ]
[ [ "numpy.bool_", "numpy.float_" ] ]
Aniket-Wali/Measure-Of-Document-Similarities-MODS-
[ "d6632082bdd114edbb072e0ffaa0397804e40dce" ]
[ "backend.py" ]
[ "import pymysql as py\nimport pandas as pd\nimport matplotlib as mplot\n\ncon = py.connect('DB_HOST', 'DB_USER', 'DB_PASSWORD', 'DB_NAME')\ncur = con.cursor()\n\n\ndef facultyLogin(usr, pwd):\n qry = \"select * from faculty where Emailid = '%s'\" % (usr)\n cur.execute(qry)\n row = cur.fetchone()\n con.commit()\n if row is None:\n return False\n else:\n if pwd in row:\n return True\n else:\n return False\n\n\ndef studnetLogin(usr, pwd):\n qry = \"select password from users where Emailid = '%s'\" % (usr)\n cur.execute(qry)\n row = cur.fetchone()\n con.commit()\n if row is None:\n return False\n else:\n if pwd in row:\n return True\n else:\n return False\n\n\ndef registerStudent():\n name = input('Enter your Name : ')\n roll = input('Enter your Roll No : ')\n email = input('Enter your Email Id : ')\n pwd = input('Enter your Password : ')\n course = input('Enter course Name : ')\n try:\n qry = \"insert into users(Name, Rollno, Emailid, password, Course) values('%s', '%s', '%s', '%s', '%s')\" % (\n name, roll, email, pwd, course)\n cur.execute(qry)\n con.commit()\n print (\"Student Registered Successfully....\")\n except Exception:\n print (\"Record with same Roll No or Email already Exist...\")\n\n\ndef showStudent():\n qry = \"select * from users\"\n df = pd.read_sql(qry, con)\n # print(df[['Name', 'Rollno', 'Emailid', 'Course', 'DateOfReg']])\n print(df)\n\n\ndef addDataRecord(filename, usr):\n qry = \"select * from users\"\n df1 = pd.read_sql(qry, con)\n data = df1[df1.Emailid == usr]\n x = df1.index[df1.Emailid == usr].tolist()\n i = x[0]\n try:\n qry1 = \"insert into filerecord( Name, Rollno, Emailid, Course, filename, Status) values('%s', '%s', '%s', '%s', '%s', '%s')\" % (\n data.Name[i], data.Rollno[i], usr, data.Course[i], filename, \"Submitted\")\n cur.execute(qry1)\n con.commit()\n except Exception:\n print (\"Assignment already uploaded...\")\n\n\ndef showFileRecord():\n qry = \"select * from filerecord\"\n df = pd.read_sql(qry, con)\n if df.empty:\n print (\"Record is Empty...\")\n\n else:\n print (df)\n\n\ndef showCheatRecord():\n qry = \"select * from cheatrecord\"\n df = pd.read_sql(qry, con)\n if df.empty:\n print (\"Record is Empty...\")\n\n else:\n print (df)\n\n\ndef updateQuery(qry):\n cur.execute(qry)\n con.commit()\n\n\ndef showParticularRecord(usr):\n qry = 'select * from filerecord where Emailid = \"%s\"' % (usr)\n df = pd.read_sql(qry, con)\n if df.empty:\n qry = 'select * from cheatrecord where Emailid = \"%s\"' % (usr)\n df = pd.read_sql(qry, con)\n if df.empty:\n print (\"Your assignment is not uploaded yet...\")\n else:\n print (df)\n else:\n print (df)\n\n\ndef cheatRecord(filename1, filename2, usr):\n qry = \"select * from users\"\n df1 = pd.read_sql(qry, con)\n data = df1[df1.Emailid == usr]\n x = df1.index[df1.Emailid == usr].tolist()\n i = x[0]\n try:\n qry1 = \"insert into cheatrecord( Name, Rollno, Emailid, Course, filename, Status) values('%s', '%s', '%s', '%s', '%s', '%s')\" % (\n data.Name[i], data.Rollno[i], usr, data.Course[i], filename1, \"Cheated\")\n cur.execute(qry1)\n con.commit()\n temp = \"update filerecord set Status = 'Cheated' where filename = '%s'\" % (filename2)\n updateQuery(temp)\n except Exception:\n print (\"You have already tried to copied your Assignment....\")\n" ]
[ [ "pandas.read_sql" ] ]