repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
GNovich/CBIS-DDSM_alpha
[ "fe0ce942473af9f7bff3b0bd3b2306e1da41cb03" ]
[ "one_pixle_Adversery_CIFAR.py" ]
[ "from config import get_config\nimport argparse\nfrom ShapeLearner import ShapeLearner\nfrom ShapeLoader import ShapeDataSet\nfrom torch.utils.data import DataLoader, RandomSampler\nimport numpy as np\nimport pickle\nimport torch\nimport os\nimport pathlib\nimport sys\nfrom itertools import product\nimport re\nfrom os import path\nfrom tqdm import tqdm\nimport pandas as pd\nimport eagerpy as ep\nfrom functools import partial\nfrom foolbox import PyTorchModel\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torchvision import transforms, datasets\nsys.path.append('one-pixel-attack-pytorch')\nfrom attack import attack_all as OnePixleAttack\n\nsys.path.append('/mnt/md0/orville/Miriam/modular-loss-experiments-morph/')\nfrom src.models import DenseModel, ConvModel, DenseNet\nfrom src.argument_parser import parse_args\nfrom src.dataloader import get_dataloaders\nfrom src.distributions import distributions\nclass_dir = {0:'airplane', 1:'automobile', 2:'bird', 3:'cat', 4:'deer',\n 5:'dog', 6:'frog', 7:'horse', 8:'ship', 9:'truck'}\n\n# Momentum Iterative Method (MIM)\n# Jacobian-based Saliency Map Attack (JSMA)\n\ndatasets_dict = {'CIFAR-10': {\n 'dataset': datasets.CIFAR10,\n 'train_transform': transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, padding=4),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2470, 0.2435, 0.2616))\n ]),\n 'transform': transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2470, 0.2435, 0.2616))\n ]),\n 'clean': transforms.Compose([\n transforms.ToTensor()\n ]),\n 'dset_kwargs': {},\n 'val_size': 10000,\n 'distribution': 'categorical',\n 'input_shape': (3, 32, 32),\n 'output_dim': 10\n}\n}\n\n\ndef get_dataloaders_(batch_size, trial_i, dataset='MNIST', augment=False, early_stop=False, use_morph=False, depth=None,\n n_workers=0):\n data_dir = './data/{}'.format(dataset)\n\n params = datasets_dict[dataset]\n\n datasets = {}\n for split in ['train_clean', 'train', 'valid', 'test']:\n if augment and split == 'train' and 'train_transform' in params.keys():\n transform = params['train_transform']\n else:\n transform = params['transform']\n if split == 'train_clean':\n transform = params['clean']\n\n dset = params['dataset'](root=data_dir,\n train=(split != 'test'),\n download=True,\n transform=transform,\n **params['dset_kwargs'])\n datasets[split] = dset\n\n # Deterministic train/val split based on trial number\n if True:\n indices = list(range(len(datasets['train'])))\n val_size = params['val_size']\n\n s = np.random.RandomState(trial_i)\n valid_idx = s.choice(indices, size=val_size, replace=False)\n train_idx = list(set(indices) - set(valid_idx))\n\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(valid_idx)\n\n default_dloader_args = {\n 'batch_size': batch_size,\n 'pin_memory': True,\n 'num_workers': n_workers,\n 'drop_last': True,\n 'shuffle': False\n }\n\n dataloaders = {}\n\n # If we're not doing early stopping, don't use a separate validation set\n if early_stop:\n dataloaders['train'] = DataLoader(dataset=datasets['train'],\n sampler=train_sampler,\n **default_dloader_args)\n dataloaders['valid'] = DataLoader(dataset=datasets['valid'],\n sampler=valid_sampler,\n **default_dloader_args)\n dataloaders['test'] = DataLoader(dataset=datasets['test'],\n shuffle=False,\n **default_dloader_args)\n dataloaders['morph'] = None\n else:\n if use_morph:\n train_morph_loader = load_morph_data(batch_size, dataset, depth)\n dataloaders['morph'] = train_morph_loader\n else:\n dataloaders['morph'] = None\n\n dataloaders['train'] = DataLoader(dataset=datasets['train'],\n **default_dloader_args)\n dataloaders['train_clean'] = DataLoader(dataset=datasets['train_clean'],\n **default_dloader_args)\n dataloaders['valid'] = DataLoader(dataset=datasets['test'],\n **default_dloader_args)\n dataloaders['test'] = DataLoader(dataset=datasets['test'],\n **default_dloader_args)\n\n return dataloaders, params\n\n\n\ndef prep_learner():\n conf = get_config(2)\n conf.device = 'cpu'\n conf.net_mode = 'resnet18'\n conf.n_shapes = 1\n conf.n_colors = 3\n conf.shape_only = False\n conf.color_only = False\n return ShapeLearner(conf, inference=True)\n\n\ndef set_distractors(learner):\n # set OOD data\n triangle_ds = ShapeDataSet(no_bkg=True)\n triangle_ds.shapes = ['triangle']\n triangle_ds.colors = [[(255, 255), (0, 0), (0, 0)],\n [(0, 0), (255, 255), (0, 0)],\n [(0, 0), (0, 0), (255, 255)]]\n triangle_ds.n_shapes = 1\n triangle_ds.n_colors = 3\n ziped_classes = enumerate(product(range(1), range(3)))\n triangle_ds.label_map = {v: -1 for k, v in ziped_classes}\n triangle_ds.label_names = [-1]\n learner.ds = triangle_ds\n\n dloader_args = {\n 'batch_size': 32,\n 'pin_memory': True,\n 'num_workers': conf.num_workers,\n 'drop_last': False,\n }\n learner.loader = DataLoader(learner.ds, **dloader_args)\n eval_sampler = RandomSampler(learner.ds, replacement=True, num_samples=len(learner.ds) // 10)\n learner.eval_loader = DataLoader(learner.ds, sampler=eval_sampler, **dloader_args)\n return learner\n\ndef set_probes(learner):\n # set OOD data\n triangle_ds = ShapeDataSet(no_bkg=True)\n triangle_ds.shapes = ['rectangle', 'circle']\n triangle_ds.colors = [[(255, 255), (0, 0), (0, 0)],\n [(0, 0), (255, 255), (0, 0)],\n ] # [(0, 0), (0, 0), (255, 255)]]\n triangle_ds.n_shapes = 2\n triangle_ds.n_colors = 2 # TODO fix this! we need the right 2 in the right order!\n ziped_classes = enumerate(product(range(triangle_ds.n_shapes), range(triangle_ds.n_colors)))\n triangle_ds.label_map = {v: k for k, v in ziped_classes}\n triangle_ds.label_names = [str(x) for x in product(triangle_ds.shapes, range(triangle_ds.n_colors))]\n learner.ds = triangle_ds\n\n dloader_args = {\n 'batch_size': 32,\n 'pin_memory': True,\n 'num_workers': conf.num_workers,\n 'drop_last': False,\n }\n learner.loader = DataLoader(learner.ds, **dloader_args)\n eval_sampler = RandomSampler(learner.ds, replacement=True, num_samples=len(learner.ds) // 10)\n learner.eval_loader = DataLoader(learner.ds, sampler=eval_sampler, **dloader_args)\n return learner\n\ndef get_evaluation(learner):\n # evaluate OOD data\n for i in range(len(learner.models)):\n learner.models[i].eval()\n do_mean = -1 if len(learner.models) > 1 else 0\n ind_iter = range(do_mean, len(learner.models))\n predictions = dict(zip(ind_iter, [[] for i in ind_iter]))\n prob = dict(zip(ind_iter, [[] for i in ind_iter]))\n labels = []\n learner.eval_loader.dataset.set_mode('test') # todo check this works :)\n for imgs, label in tqdm(learner.eval_loader, total=len(learner.eval_loader)):\n imgs = imgs.to(conf.device)\n thetas = [model(imgs).detach() for model in learner.models]\n if len(learner.models) > 1: thetas = [torch.mean(torch.stack(thetas), 0)] + thetas\n for ind, theta in zip(range(do_mean, len(learner.models)), thetas):\n val, arg = torch.max(theta, dim=1)\n predictions[ind].append(arg.cpu().numpy())\n prob[ind].append(theta.cpu().numpy())\n labels.append(label.detach().cpu().numpy())\n\n labels = np.hstack(labels)\n for ind in range(do_mean, len(learner.models)):\n predictions[ind] = np.hstack(predictions[ind])\n prob[ind] = np.vstack(prob[ind])\n return prob, predictions, labels\n\n\nclass ModelMeanEP(torch.nn.Module):\n def __init__(self, model, device='cpu'):\n super(ModelMeanEP, self).__init__()\n self.model = torch.nn.DataParallel(model, dim=0)\n\n def forward(self, x):\n res = torch.nn.Softmax(-1)(self.model(x))\n return torch.cat([x.mean(0) for x in torch.chunk(res, 4)])\n #return torch.cat([x.max(0).values for x in torch.chunk(res, 4)])\n\n\ndef run_attacks(res_path):\n MORPH_MODEL_DIR = '/mnt/md0/orville/Miriam/modular-loss-experiments-morph/results_morph_correct/CIFAR-10/densenet-82-8-8'\n MODEL_DIR = '/mnt/md0/orville/Miriam/modular-loss-experiments-morph/results/CIFAR-10/densenet-82-8-8'\n # UNCORR_MODEL_DIR = 'alpha_0.0_gamma_0.0_n_models_2_1581641733617'\n # CORR_MODEL_DIR = 'alpha_0.1_gamma_0.0_n_models_2_1581641746832'\n # CORR_MODEL_DIR_2 = 'alpha_0.2_gamma_0.0_n_models_2_1581641777871'\n UNCORR_MODEL_DIR = 'alpha_0.0_gamma_0.0_n_models_3_1585505819121'\n CORR_MODEL_DIR = 'alpha_0.1_gamma_0.0_n_models_3_1585505685528'\n CORR_MODEL_DIR_2 = 'alpha_0.2_gamma_0.0_n_models_3_1585505042819'\n\n rel_dirs = [UNCORR_MODEL_DIR, CORR_MODEL_DIR, CORR_MODEL_DIR_2]\n alpha = ['0', '0.1', '0.2']\n\n res = dict.fromkeys(alpha)\n batch_size = 516\n n_workers = 20\n dataset = 'CIFAR-10'\n network = 'densenet-82-8-8'\n loaders, _ = get_dataloaders_(batch_size, 0, dataset, False, early_stop=False, n_workers=n_workers)\n n_models = 3\n\n params = {}\n params['densenet-82-8-8'] = {'num_modules': 2, 'bottleneck': True, 'reduction': 0.5, 'depth': 82, 'growth_rate': 8,\n 'input_shape': (3, 32, 32), 'output_dim': 10}\n network = 'densenet-82-8-8'\n model = DenseNet(input_shape=params[network]['input_shape'],\n output_dim=params[network]['output_dim'],\n growth_rate=params[network]['growth_rate'],\n depth=params[network]['depth'],\n reduction=params[network]['reduction'],\n bottleneck=params[network]['bottleneck'],\n num_modules=n_models)\n\n device = torch.device(\"cuda\")\n\n for model_path, curr_alpha in tqdm(zip(rel_dirs, alpha), total=len(alpha)):\n weight_path = path.join(MODEL_DIR, model_path, 'trial_0/0.0/weights/final_weights.pt')\n model.reset_parameters()\n model.load_state_dict(torch.load(weight_path))\n model.eval() # model.train(mode=False)\n fmodel = PyTorchModel(ModelMeanEP(model), bounds=(0, 1), device=device)\n\n res[curr_alpha] = dict()\n for (attack, eps), attack_name in tqdm(zip(attack_list, attack_list_names),\n desc='attaking ' + str(curr_alpha), total=len(attack_list)):\n attack = attack()\n success_tot = []\n for images, labels in tqdm(loaders['test'], total=len(loaders['test']), desc=attack_name):\n images, labels = images.to(device), labels.to(device)\n _, _, success = attack(fmodel, images, labels, epsilons=eps)\n success_tot.append(success)\n success_tot = torch.cat(success_tot, -1)\n\n # calculate and report the robust accuracy\n robust_accuracy = 1 - success_tot.float().mean(axis=-1)\n for epsilon, acc in zip(eps, robust_accuracy):\n res[curr_alpha][attack_name + '_' + str(epsilon)] = acc.item()\n\n pickle.dump(res, open(res_path, 'wb'))\n pickle.dump(res, open(res_path, 'wb'))\n pickle.dump(res, open(res_path, 'wb'))\n\n\ndef run_attacks_cleverhans(res_path):\n MORPH_MODEL_DIR = '/mnt/md0/orville/Miriam/modular-loss-experiments-morph/results_morph_correct/CIFAR-10/densenet-82-8-8'\n MODEL_DIR = '/mnt/md0/orville/Miriam/modular-loss-experiments-morph/results/CIFAR-10/densenet-82-8-8'\n # UNCORR_MODEL_DIR = 'alpha_0.0_gamma_0.0_n_models_2_1581641733617'\n # CORR_MODEL_DIR = 'alpha_0.1_gamma_0.0_n_models_2_1581641746832'\n # CORR_MODEL_DIR_2 = 'alpha_0.2_gamma_0.0_n_models_2_1581641777871'\n UNCORR_MODEL_DIR = 'alpha_0.0_gamma_0.0_n_models_3_1585505819121'\n CORR_MODEL_DIR = 'alpha_0.1_gamma_0.0_n_models_3_1585505685528'\n CORR_MODEL_DIR_2 = 'alpha_0.2_gamma_0.0_n_models_3_1585505042819'\n\n rel_dirs = [UNCORR_MODEL_DIR, CORR_MODEL_DIR, CORR_MODEL_DIR_2]\n alpha = ['0', '0.1', '0.2']\n\n res = dict.fromkeys(alpha)\n batch_size = 128 # 516\n n_workers = 20\n dataset = 'CIFAR-10'\n network = 'densenet-82-8-8'\n loaders, _ = get_dataloaders_(batch_size, 0, dataset, False, early_stop=False, n_workers=n_workers)\n n_models = 3\n\n params = {}\n params['densenet-82-8-8'] = {'num_modules': 2, 'bottleneck': True, 'reduction': 0.5, 'depth': 82, 'growth_rate': 8,\n 'input_shape': (3, 32, 32), 'output_dim': 10}\n network = 'densenet-82-8-8'\n model = DenseNet(input_shape=params[network]['input_shape'],\n output_dim=params[network]['output_dim'],\n growth_rate=params[network]['growth_rate'],\n depth=params[network]['depth'],\n reduction=params[network]['reduction'],\n bottleneck=params[network]['bottleneck'],\n num_modules=n_models)\n\n device = torch.device(\"cuda\")\n reports = dict.fromkeys(alpha)\n for model_path, curr_alpha in tqdm(zip(rel_dirs, alpha), total=len(alpha)):\n weight_path = path.join(MODEL_DIR, model_path, 'trial_0/0.0/weights/final_weights.pt')\n model.reset_parameters()\n model.load_state_dict(torch.load(weight_path))\n model.eval() # model.train(mode=False)\n net = ModelMeanEP(model).to(device)\n\n report = dict()\n for x, y in tqdm(loaders['test'], total=len(loaders['test'])):\n x, y = x.to(device), y.to(device)\n report['nb_test'] = report.get('nb_test', 0) + y.size(0)\n\n _, y_pred = net(x).max(1) # model prediction on clean examples\n report['acc'] = report.get('acc', 0) + y_pred.eq(y).sum().item()\n\n # model prediction on FGM adversarial examples\n x_adv = fast_gradient_method(net, x, 0.02, np.inf)\n _, y_pred = net(x_adv).max(1) # model prediction on FGM adversarial examples\n report['FGM_0.02'] = report.get('FGM_0.02', 0) + y_pred.eq(y).sum().item()\n\n x_adv = fast_gradient_method(net, x, 0.04, np.inf)\n _, y_pred = net(x_adv).max(1) # model prediction on FGM adversarial examples\n report['FGM_0.04'] = report.get('FGM_0.04', 0) + y_pred.eq(y).sum().item()\n\n # model prediction on BIM adversarial examples\n x_adv = projected_gradient_descent(net, x, eps=0.01, eps_iter=0.01 / 10, nb_iter=10, norm=np.inf, rand_init=0)\n _, y_pred = net(x_adv).max(1)\n report['BIM_0.01'] = report.get('BIM_0.01', 0) + y_pred.eq(y).sum().item()\n\n x_adv = projected_gradient_descent(net, x, eps=0.02, eps_iter=0.02 / 10, nb_iter=10, norm=np.inf, rand_init=0)\n _, y_pred = net(x_adv).max(1)\n report['BIM_0.02'] = report.get('BIM_0.02', 0) + y_pred.eq(y).sum().item()\n\n # model prediction on PGD adversarial examples\n x_adv = projected_gradient_descent(net, x, eps=0.01, eps_iter=0.01 / 10, nb_iter=10, norm=np.inf)\n _, y_pred = net(x_adv).max(1)\n report['PGD_0.01'] = report.get('PGD_0.01', 0) + y_pred.eq(y).sum().item()\n\n x_adv = projected_gradient_descent(net, x, eps=0.02, eps_iter=0.02 / 10, nb_iter=10, norm=np.inf)\n _, y_pred = net(x_adv).max(1)\n report['PGD_0.02'] = report.get('PGD_0.02', 0) + y_pred.eq(y).sum().item()\n\n for key in ['acc', 'FGM_0.02', 'FGM_0.04', 'BIM_0.01', 'BIM_0.02', 'PGD_0.01', 'PGD_0.02']:\n report[key] = (report[key] / report['nb_test']) * 100.\n\n reports[curr_alpha] = report\n pickle.dump(reports, open(res_path, 'wb'))\n pickle.dump(reports, open(res_path, 'wb'))\n\n\ndef get_TTR_FTR_curve(prob_prob, distractors_prob, prob_labels):\n open_set_1st_labels_0 = np.argmax(distractors_prob[0], 1)\n open_set_1st_labels_1 = np.argmax(distractors_prob[1], 1)\n open_set_1st_scores_0 = np.max(distractors_prob[0], 1)\n open_set_1st_scores_1 = np.max(distractors_prob[1], 1)\n\n mean_pred = np.argmax(prob_prob[-1], 1)\n mean_score = np.max(prob_prob[-1], 1)\n\n a = np.sum((open_set_1st_labels_0 == open_set_1st_labels_1))\n b = len(prob_labels)\n corr = (100.0*a / b)\n\n prev_FTR = -1\n prev_TTR = -1\n THs = []\n TTRs = []\n FTRs = []\n for i, TH in enumerate(np.arange(0, 1, 0.00001)):\n FTR = np.sum((open_set_1st_labels_0 == open_set_1st_labels_1) & (open_set_1st_scores_0 > TH) & (open_set_1st_scores_1 > TH)) / b\n TTR = np.sum((mean_score > TH) & (mean_pred == prob_labels)) / b\n\n if (prev_FTR != FTR and prev_TTR != TTR) or (i%100 == 0):\n prev_FTR = FTR\n prev_TTR = TTR\n THs.append(TH)\n TTRs.append(TTR)\n FTRs.append(FTR)\n return THs, TTRs, FTRs, corr\n\n\ndef ood_test(MODEL_DIR, res_path):\n rel_dirs = [x for x in os.listdir(MODEL_DIR) if '2020' in x]\n alpha = [re.findall('a=([0-9, \\.]*)_', d)[0] for d in rel_dirs]\n learner = prep_learner()\n\n res_dir = dict.fromkeys(alpha)\n for model_path, curr_alpha in zip(rel_dirs, alpha):\n conf.save_path = pathlib.Path(path.join(MODEL_DIR, model_path))\n fix_str = [x for x in os.listdir(path.join(MODEL_DIR, model_path)) if '2020' in x][0][8:]\n learner.load_state(conf, fix_str, model_only=True, from_save_folder=True)\n\n # distractors\n set_distractors(learner)\n distractors_prob, distractors_predictions, distractors_labels = get_evaluation(learner)\n\n # probs\n set_probes(learner)\n prob_prob, prob_predictions, prob_labels = get_evaluation(learner)\n\n THs, TTRs, FTRs, corr = get_TTR_FTR_curve(prob_prob, distractors_prob, prob_labels)\n print(curr_alpha, corr)\n res_dir[curr_alpha] = [THs, TTRs, FTRs, corr]\n pickle.dump(res_dir, open(res_path, 'wb'))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='for CBIS-DDSM')\n parser.add_argument(\"-ood\", \"--ood_test\", help=\"to ood test instead?\", default=0, type=int)\n\n args = parser.parse_args()\n conf = get_config()\n\n if args.ood_test:\n res_path = str('cifar_ood_res.pkl')\n ood_test(res_path)\n else:\n res_path = str('cifar_attack_res.pkl')\n run_attacks_cleverhans(res_path)\n #run_attacks(res_path)\n" ]
[ [ "numpy.hstack", "torch.nn.Softmax", "torch.chunk", "torch.max", "torch.load", "torch.cat", "numpy.arange", "torch.utils.data.DataLoader", "torch.utils.data.sampler.SubsetRandomSampler", "numpy.max", "numpy.argmax", "torch.device", "torch.nn.DataParallel", "numpy.random.RandomState", "numpy.sum", "numpy.vstack", "torch.stack" ] ]
shawnwang18/nvtx-plugins
[ "12ea33203cc18ccd546f835e228359e3eecbdb14" ]
[ "nvtx_plugins/python/nvtx/plugins/tf/ops.py" ]
[ "# ! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport wrapt\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import ops\n\nfrom nvtx.plugins.tf.ext_utils import load_library\nfrom nvtx.plugins.tf.ext_utils import get_ext_suffix\n\n__all__ = ['nvtx_tf_ops', 'start', 'end', 'trace']\n\n\nnvtx_tf_ops = load_library('lib/nvtx_ops' + get_ext_suffix())\n\n\n@ops.RegisterGradient('NvtxStart')\ndef _nvtx_start_grad(op, grad, marker_id, domain_handle):\n # grad_message and grad_domain_name are not used\n if not isinstance(marker_id, tf.Tensor) and marker_id is None:\n raise RuntimeError('Error in nvtx range %s. '\n 'Make sure all nvtx ranges are closed' % op.name)\n\n grad, null_grad = nvtx_tf_ops.nvtx_end(inputs=grad,\n marker_id=marker_id, domain_handle=domain_handle,\n grad_message=op.inputs[2], grad_domain_name=op.inputs[3])\n return [grad, null_grad, None, None]\n\n\n@ops.RegisterGradient('NvtxEnd')\ndef _nvtx_end_grad(op, grad, null_grad):\n grad, marker_id, domain_handle = nvtx_tf_ops.nvtx_start(\n inputs=grad, null_input=1.,\n message=op.inputs[3], domain_name=op.inputs[4])\n return [grad, marker_id, domain_handle, None, None]\n\n\ndef start(inputs, message, domain_name=None,\n grad_message=None, grad_domain_name=None,\n trainable=False, enabled=True, name=None):\n \"\"\"An identity operation with a side effect of opening an NVTX marker.\n\n Note:\n The :func:`ops.start <start>` and :func:`ops.end <end>` operations\n must be used in pairs.\n\n Example:\n .. highlight:: python\n .. code-block:: python\n\n x, nvtx_context = nvtx.plugins.tf.ops.start(x, message='Dense 1-3',\n domain_name='Forward', grad_domain_name='Gradient')\n x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_1')\n x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_2')\n x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_3')\n x = nvtx.plugins.tf.ops.end(x, nvtx_context)\n\n Arguments:\n inputs: A ``Tensor`` object that is passed to ``output``.\n message: A ``string`` message to be associated with this marker.\n domain_name: An optional ``string`` domain name to be associated with\n this marker. If not provided the default NVTX domain will be used.\n grad_message: An optional ``string`` message to be associated with\n the op gradient. If not provided ``message`` will be used.\n grad_domain_name: An optional ``string`` domain name to be associated\n with this marker gradient. If not provided ``domain_name`` will\n be used.\n trainable: ``bool``, if ``True`` will make this op\n trainable. Used when this is the first operation in the graph to\n prevent an open ended marker during gradient calculation.\n enabled: ``bool``, if ``False`` the nvtx marker will be disabled.\n name: An optional `string` name for the operation.\n\n Returns:\n ``tuple``:\n - output: The inputs ``Tensor``.\n - nvtx_context: ``list``, NVTX context associated with this op and passed to :func:`ops.end <end>`. ``None`` if ``enabled=False``.\n\n \"\"\"\n if not enabled:\n return inputs, None\n\n domain_name = domain_name or ''\n grad_message = grad_message or message\n grad_domain_name = grad_domain_name or domain_name or ''\n\n null_input = 1.\n if trainable:\n with tf.compat.v1.variable_scope(\"nvtx\", reuse=tf.compat.v1.AUTO_REUSE):\n null_input = tf.compat.v1.get_variable('null_input', shape=(),\n dtype=tf.float32,\n initializer=tf.zeros_initializer,\n trainable=True)\n\n inputs, marker_id, domain_handle = nvtx_tf_ops.nvtx_start(\n inputs=inputs, null_input=null_input,\n message=message, domain_name=domain_name, name=name)\n return inputs, (marker_id, domain_handle, grad_message, grad_domain_name)\n\n\ndef end(inputs, nvtx_context, name=None):\n \"\"\"An identity operation with a side effect of closing an NVTX marker.\n\n Note:\n The :func:`ops.start <start>` and :func:`ops.end <end>` operations\n must be used in pairs.\n\n Example:\n .. highlight:: python\n .. code-block:: python\n\n x, nvtx_context = nvtx.plugins.tf.ops.start(x, message='Dense 1-3',\n domain_name='Forward', grad_domain_name='Gradient')\n x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_1')\n x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_2')\n x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_3')\n x = nvtx.plugins.tf.ops.end(x, nvtx_context)\n\n Arguments:\n inputs: A ``Tensor`` object that will be passed to ``output``.\n nvtx_context: ``list``, NVTX context received from\n :func:`ops.start <start>` If `None` the marker will be disabled.\n name: An optional ``string`` name for the operation.\n\n Returns:\n The inputs ``Tensor``.\n\n \"\"\"\n if nvtx_context is None:\n return inputs\n\n marker_id, domain_handle, grad_message, grad_domain_name = nvtx_context\n output, null_output = nvtx_tf_ops.nvtx_end(inputs=inputs,\n marker_id=marker_id, domain_handle=domain_handle,\n grad_message=grad_message, grad_domain_name=grad_domain_name, name=name)\n\n return output\n\n\ndef trace(message, domain_name=None,\n grad_message=None, grad_domain_name=None,\n trainable=False, enabled=True, name=None):\n \"\"\"An identity function decorator with a side effect of adding NVTX marker.\n\n Note:\n The decorator expects the wrapped function to take the input ``Tensor``\n as the first argument or to be named ``inputs``, and to return a single\n ``Tensor``.\n\n Arguments:\n message: A ``string`` message to be associated with this marker.\n domain_name: An optional ``string`` domain name to be associated with\n this marker. If not provided the default NVTX domain will be used.\n grad_message: An optional ``string`` message to be associated with\n the op gradient. If not provided `message` will be used.\n grad_domain_name: An optional ``string`` domain name to be associated\n with this marker gradient. If not provided ``domain_name`` will\n be used.\n trainable: ``bool``, if ``True`` will make this op\n trainable. Used when this is the first operation in the graph to\n prevent an open ended marker during gradient calculation.\n enabled: ``bool``, if ``False`` the nvtx marker will be disabled.\n name: An optional ``string`` name for the operation.\n\n \"\"\"\n @wrapt.decorator\n def func_wrapper(wrapped, instance, args, kwargs):\n try:\n inputs = kwargs[\"inputs\"] if \"inputs\" in kwargs else args[0]\n except:\n raise ValueError(\"The input tensor must be the first argument\"\n \" or named `inputs`\")\n assert isinstance(inputs, tf.Tensor)\n start_name = '{}_start'.format(name) if name else None\n end_name = '{}_end'.format(name) if name else None\n inputs, nvtx_context = start(inputs=inputs,\n message=message, domain_name=domain_name,\n grad_message=grad_message, grad_domain_name=grad_domain_name,\n enabled=enabled, trainable=trainable, name=start_name)\n if \"inputs\" not in kwargs:\n args = [inputs] + list(args[1:])\n else:\n kwargs[\"inputs\"] = inputs\n output = wrapped(*args, **kwargs)\n output = end(inputs=output, nvtx_context=nvtx_context, name=end_name)\n return output\n\n return func_wrapper\n" ]
[ [ "tensorflow.compat.v1.get_variable", "tensorflow.compat.v1.variable_scope", "tensorflow.python.framework.ops.RegisterGradient" ] ]
MeteSertkan/newsrec
[ "fc3b8803e6334aeba81f7cf212fa88ca2d1440d8" ]
[ "project/models/sentirec/__init__.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\nfrom torchmetrics import MetricCollection\nfrom models.sentirec.news_encoder import NewsEncoder\nfrom models.sentirec.user_encoder import UserEncoder\nfrom models.utils import TimeDistributed\nfrom models.metrics import NDCG, MRR, AUC, SentiMRR, Senti\n\n\nclass SENTIREC(pl.LightningModule):\n \"\"\"\n SENTIREC network.\n \"\"\"\n\n def __init__(self, config=None, pretrained_word_embedding=None):\n super(SENTIREC, self).__init__()\n self.config = config\n news_encoder = NewsEncoder(config, pretrained_word_embedding)\n self.news_encoder = TimeDistributed(news_encoder, batch_first=True)\n self.user_encoder = UserEncoder(config)\n self.sentiment_predictor = nn.Linear(config.word_embedding_dim, 1)\n # val metrics\n self.val_performance_metrics = MetricCollection({\n 'val_auc': AUC(),\n 'val_mrr': MRR(),\n 'val_ndcg@5': NDCG(k=5),\n 'val_ndcg@10': NDCG(k=10)\n })\n self.val_sentiment_diversity_metrics_vader = MetricCollection({\n 'val_senti_mrr_vader': SentiMRR(),\n 'val_senti@5_vader': Senti(k=5),\n 'val_senti@10_vader': Senti(k=10)\n })\n self.val_sentiment_diversity_metrics_bert = MetricCollection({\n 'val_senti_mrr_bert': SentiMRR(),\n 'val_senti@5_bert': Senti(k=5),\n 'val_senti@10_bert': Senti(k=10)\n })\n # test metrics\n self.test_performance_metrics = MetricCollection({\n 'test_auc': AUC(),\n 'test_mrr': MRR(),\n 'test_ndcg@5': NDCG(k=5),\n 'test_ndcg@10': NDCG(k=10)\n })\n self.test_sentiment_diversity_metrics_vader = MetricCollection({\n 'test_senti_mrr_vader': SentiMRR(),\n 'test_senti@5_vader': Senti(k=5),\n 'test_senti@10_vader': Senti(k=10)\n })\n self.test_sentiment_diversity_metrics_bert = MetricCollection({\n 'test_senti_mrr_bert': SentiMRR(),\n 'test_senti@5_bert': Senti(k=5),\n 'test_senti@10_bert': Senti(k=10)\n })\n \n\n def forward(self, batch):\n # encode candidate news\n candidate_news_vector = self.news_encoder(batch[\"c_title\"])\n # encode history \n clicked_news_vector = self.news_encoder(batch[\"h_title\"])\n # encode user\n user_vector = self.user_encoder(clicked_news_vector)\n # compute scores for each candidate news\n clicks_score = torch.bmm(\n candidate_news_vector,\n user_vector.unsqueeze(dim=-1)).squeeze(dim=-1)\n # sentiment-prediction task\n s_pred = self.sentiment_predictor(candidate_news_vector)\n \n return clicks_score, s_pred\n\n def training_step(self, batch, batch_idx):\n # forward pass\n y_pred, s_pred = self(batch)\n y_pred = torch.sigmoid(y_pred)\n # RECOMMENDATION LOSS\n y = torch.zeros(len(y_pred), dtype=torch.long, device=self.device)\n loss = F.cross_entropy(y_pred, y)\n # SENTIMENT PREDICTION LOSS\n s_c = batch[\"c_\" + self.config.sentiment_classifier].flatten()\n loss += self.config.sentiment_prediction_loss_coeff * F.l1_loss(s_pred.flatten(), s_c) #MAE\n # SENTIMENT REGULARIZATION LOSS\n if(self.config.sentiment_regularization): \n s_hist = batch[\"h_\" + self.config.sentiment_classifier].flatten()\n s_mean = s_hist.mean()\n # batch_size, 1+K // sentiment diversity score\n p = F.relu(s_mean * s_c * y_pred.flatten())\n # sentiment diversity loss\n loss += self.config.sentiment_diversity_loss_coeff * p.mean()\n self.log('train_loss', loss, on_step=True, on_epoch=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n y_pred, _ = self(batch)\n y_pred = F.softmax(y_pred, dim=1)\n y = batch[\"labels\"]\n # determine candidate sentiment and overall sentiment orientation\n s_c_vader, s_c_bert, s_mean_vader, s_mean_bert = self.sentiment_evaluation_helper(batch)\n # compute metrics\n self.val_performance_metrics(y_pred, y)\n self.val_sentiment_diversity_metrics_vader(y_pred.flatten(), s_c_vader, s_mean_vader)\n self.val_sentiment_diversity_metrics_bert(y_pred.flatten(), s_c_bert, s_mean_bert)\n # log metric\n self.log_dict(self.val_performance_metrics, on_step=True, on_epoch=True)\n self.log_dict(self.val_sentiment_diversity_metrics_vader, on_step=True, on_epoch=True)\n self.log_dict(self.val_sentiment_diversity_metrics_bert, on_step=True, on_epoch=True)\n\n def test_step(self, batch, batch_idx):\n y_pred, _ = self(batch)\n y_pred = F.softmax(y_pred, dim=1)\n y = batch[\"labels\"]\n # determine candidate sentiment and overall sentiment orientation\n s_c_vader, s_c_bert, s_mean_vader, s_mean_bert = self.sentiment_evaluation_helper(batch)\n # compute metrics\n self.test_performance_metrics(y_pred, y)\n self.test_sentiment_diversity_metrics_vader(y_pred.flatten(), s_c_vader, s_mean_vader)\n self.test_sentiment_diversity_metrics_bert(y_pred.flatten(), s_c_bert, s_mean_bert)\n # log metric\n self.log_dict(self.test_performance_metrics, on_step=True, on_epoch=True)\n self.log_dict(self.test_sentiment_diversity_metrics_vader, on_step=True, on_epoch=True)\n self.log_dict(self.test_sentiment_diversity_metrics_bert, on_step=True, on_epoch=True)\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(),\n lr=self.config.learning_rate)\n\n def sentiment_evaluation_helper(self, batch):\n # sentiment scores of candidate news\n # (determined through sentiment classifier)\n s_c_vader = batch[\"c_vader_sentiment\"].flatten()\n s_c_bert = batch[\"c_bert_sentiment\"].flatten()\n # calc mean sentiment score from browsed news\n # (using sentiment classifier\n s_clicked_vader = batch[\"h_vader_sentiment\"].flatten()\n s_clicked_bert = batch[\"h_bert_sentiment\"].flatten()\n s_mean_vader = s_clicked_vader.mean()\n s_mean_bert = s_clicked_bert.mean()\n\n return s_c_vader, s_c_bert, s_mean_vader, s_mean_bert" ]
[ [ "torch.nn.Linear", "torch.sigmoid", "torch.nn.functional.cross_entropy", "torch.nn.functional.softmax" ] ]
vguizilini/packnet-sfm
[ "fec6d0b493b784cabe5e6bf9c65b996a83c63fe1", "fec6d0b493b784cabe5e6bf9c65b996a83c63fe1" ]
[ "scripts/train_sfm_utils.py", "monodepth/datasets/image_sequence.py" ]
[ "# Copyright 2020 Toyota Research Institute. All rights reserved.\n\nimport torch\nfrom monodepth.models import monodepth_beta, load_net_from_checkpoint\nfrom monodepth.functional.image import scale_image\nimport os\n\n\ndef load_dispnet_with_args(args):\n \"\"\"\n Loads a pretrained depth network\n \"\"\"\n checkpoint = torch.load(args.pretrained_model)\n # check for relevant args\n assert 'args' in checkpoint, 'Cannot find args in checkpoint.'\n checkpoint_args = checkpoint['args']\n for arg in ['disp_model', 'dropout', 'input_height', 'input_width']:\n assert arg in checkpoint_args, 'Could not find argument {}'.format(arg)\n disp_net = monodepth_beta(checkpoint_args.disp_model,\n dropout=checkpoint_args.dropout)\n disp_net = load_net_from_checkpoint(disp_net, args.pretrained_model, starts_with='disp_network')\n disp_net = disp_net.cuda() # move to GPU\n print('Loaded disp net of type {}'.format(checkpoint_args.disp_model))\n\n return disp_net, checkpoint_args\n\n\ndef compute_depth_errors(args, gt, pred, use_gt_scale=True, crop=True):\n \"\"\"\n Computes depth errors given ground-truth and predicted depths\n use_gt_scale: If True, median ground-truth scaling is used\n crop: If True, apply a crop in the image before evaluating\n \"\"\"\n abs_diff, abs_rel, sq_rel, a1, a2, a3 = 0, 0, 0, 0, 0, 0\n rmse, rmse_log = 0, 0\n\n batch_size, _, gt_height, gt_width = gt.shape\n pred = scale_image(pred, gt_height, gt_width, mode='bilinear', align_corners=True)\n for current_gt, current_pred in zip(gt, pred):\n gt_channels, gt_height, gt_width = current_gt.shape\n current_gt = torch.squeeze(current_gt)\n current_pred = torch.squeeze(current_pred)\n\n # Mask within min and max depth\n valid = (current_gt > args.min_depth) & (current_gt < args.max_depth)\n\n if crop:\n # crop used by Garg ECCV16 to reproduce Eigen NIPS14 results\n # construct a mask of False values, with the same size as target\n # and then set to True values inside the crop\n crop_mask = torch.zeros(current_gt.shape).byte().cuda()\n y1, y2 = int(0.40810811 * gt_height), int(0.99189189 * gt_height)\n x1, x2 = int(0.03594771 * gt_width), int(0.96405229 * gt_width)\n crop_mask[y1:y2, x1:x2] = 1\n valid = valid & crop_mask\n\n valid_gt = current_gt[valid]\n valid_pred = current_pred[valid]\n\n if use_gt_scale:\n # Median ground-truth scaling\n valid_pred = valid_pred * torch.median(valid_gt) / torch.median(valid_pred)\n\n valid_pred = valid_pred.clamp(args.min_depth, args.max_depth)\n\n # Calculates threshold values\n thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))\n a1 += (thresh < 1.25).float().mean()\n a2 += (thresh < 1.25**2).float().mean()\n a3 += (thresh < 1.25**3).float().mean()\n\n # Calculates absolute relative error\n abs_diff += torch.mean(torch.abs(valid_gt - valid_pred))\n abs_rel += torch.mean(torch.abs(valid_gt - valid_pred) / valid_gt)\n\n # Calculates square relative error\n sq_rel += torch.mean(((valid_gt - valid_pred)**2) / valid_gt)\n\n # Calculates root mean square error and its log\n rmse += torch.sqrt(torch.mean((valid_gt - valid_pred)**2))\n r_log = (torch.log(valid_gt) - torch.log(valid_pred))**2\n rmse_log += torch.sqrt(torch.mean(r_log))\n\n return torch.tensor([metric / batch_size for metric in [abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3]])\n", "# Copyright 2020 Toyota Research Institute. All rights reserved.\n\nimport os\nimport numpy as np\n\nfrom collections import defaultdict\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom monodepth.logging import printcolor\n\n# File template for loading\nIMAGE_REGEX = '{:09d}'\n\n\ndef get_idx(filename):\n \"\"\"Get the index of the image filename (specific to a session) per-line.\n For e.g. berlin/508/image_02/data/berlin_000508_000012_leftImg8bit.png\n index corresponds to 12.\n\n Parameters\n ----------\n filename: str\n Image filename\n\n Returns\n ----------\n index: int\n Index of the filename\n \"\"\"\n return int(os.path.splitext(filename)[0])\n\n\ndef change_idx(idx, filename):\n \"\"\"Get the image filename for the index provided.\n\n Parameters\n ----------\n idx: int\n Image index\n filename: str\n Image filename\n\n Returns\n ----------\n filename: str\n Filename of the new image with the corresponding index\n \"\"\"\n _, ext = os.path.splitext(os.path.basename(filename))\n return IMAGE_REGEX.format(idx) + ext\n\n\ndef read_files(directory, ext=['.png', '.jpg', '.jpeg'], skip_empty=True):\n \"\"\"Read files recursively within a directory and return the directory\n structure as a dictionary.\n\n Parameters\n ----------\n directory: str\n Root directory\n ext: list\n List of acceptable file extensions.\n skip_empty: bool, (default=True)\n Only create dictionary key/value if the directory is empty.\n\n Returns\n ----------\n files: dict\n Directory structure as a dict\n \"\"\"\n files = defaultdict(list)\n for entry in os.scandir(directory):\n relpath = os.path.relpath(entry.path, directory)\n if entry.is_dir():\n d_files = read_files(entry.path, ext=ext, skip_empty=skip_empty)\n if skip_empty and not len(d_files):\n continue\n files[relpath] = d_files[entry.path]\n elif entry.is_file():\n if ext is None or entry.path.lower().endswith(tuple(ext)):\n files[directory].append(relpath)\n return files\n\n\ndef read_file_list(filename, ext=['.png', '.jpg', '.jpeg'], skip_empty=True):\n \"\"\"Read files from the file-list and return the directory\n structure as a dictionary.\n\n Parameters\n ----------\n filename: str\n File list name\n ext: list\n List of acceptable file extensions.\n skip_empty: bool, (default=True)\n Only create dictionary key/value if the directory is empty.\n\n Returns\n ----------\n files: dict\n Directory structure as a dict\n \"\"\"\n files = defaultdict(list)\n for entry in open(filename, 'r').read().splitlines():\n dirname, basename = os.path.split(entry)\n files[dirname].append(basename)\n for k in files:\n if not len(files[k]):\n files.pop(k)\n return files\n\n\nclass ImageSequenceLoader(Dataset):\n def __init__(self, root_dir, file_list=None, data_transform=None,\n forward_context=0, backward_context=0,\n strides=[1], dataset_idx=None):\n \"\"\"Image sequence data loader which handles temporal context.\n Supported image formats are .png, .jpg, .jpeg\n\n The dataset directory structure should be as follows:\n <root_dir>/<unique_session_name>/<%09d.png>\n\n For example:\n >> root_dir/\n root_dir/session1/intrinsics.json\n root_dir/session1/000000001.png\n root_dir/session1/000000002.png\n root_dir/session1/...\n root_dir/session2/intrinsics.json\n root_dir/session2/000000001.png\n root_dir/session2/000000002.png\n root_dir/...\n root_dir/sessionN/intrinsics.json\n root_dir/sessionN/000000001.png\n root_dir/sessionN/000000002.png\n root_dir/sessionN/...\n\n Parameters\n ----------\n root_dir: str\n Dataset path\n file_list: str\n Split file containing relative path to data\n data_transform: Data Transform\n Transform to be applied on each data sample before returning it\n forward_context: int, (default=0)\n Number of frames after the current frame to return.\n back_context: int, (default=0)\n Number of frames before the current frame to return.\n E.g. if the current frame is T, and back_context=2, the loader will return\n [T-2, T-1, T]\n strides: list, (default=[1])\n List of strides, denoting the number of frames to skip for each sample.\n (currently not supported)\n dataset_idx: int, (default=None)\n Identify dataset index loader for mixed batch training\n\n Notes\n ----------\n 1. This loader assumes that consecutive frame indices (t-1,t,t+1)\n are present in the same session.\n 2. The loader does not check for file existence when file_list is\n provided.\n \"\"\"\n super().__init__()\n assert len(strides) == 1 and strides[0] == 1\n assert isinstance(strides, list)\n self.dataset_idx = dataset_idx\n self.root_dir = root_dir\n self.forward_context = forward_context\n self.backward_context = backward_context\n self.stride = 1\n\n # Support training from image sequence directory, or via file splits\n file_tree = read_files(root_dir)\n if file_list is not None:\n self.tree = read_file_list(file_list)\n else:\n self.tree = file_tree\n self.sessions = self.tree.keys()\n\n self.calib = {}\n self.files = []\n for (k,v) in self.tree.items():\n self.calib[k] = self._get_calibration(k)\n plen = len(v)\n v = sorted(v)\n file_set = set(file_tree[k])\n files = [fname for fname in v if self._has_context(fname, file_set)]\n self.tree[k] = files\n self.files.extend([(k, fname) for fname in files])\n\n printcolor('ImageSequence: {}'.format(self.root_dir))\n printcolor('\\tSessions: {}'.format(len(self.sessions)))\n printcolor('\\tDataset size: {}'.format(len(self.files)))\n printcolor('\\tImages size: {}'.format(sum([len(v) for v in self.tree.values()])))\n self.data_transform = data_transform\n\n def __len__(self):\n return len(self.files)\n\n def _has_context(self, filename, file_set):\n \"\"\"Check if the filename (fname) has context files in the file_set\n\n Parameters\n ----------\n filename: str\n Filename\n file_set: set\n Set of files that exist\n\n Returns\n ----------\n bool\n Return whether the filename contains the context files list, after context check.\n \"\"\"\n context_paths = self._get_context_file_paths(filename)\n return all([f in file_set for f in context_paths])\n\n def _get_calibration(self, session):\n \"\"\"Get the calibration file for the video session.\n\n Parameters\n ----------\n session: str\n Video session\n\n Returns\n ----------\n calib: dict\n Calibration dict with 'K' and 'Kinv' keys\n \"\"\"\n filename = os.path.join(self.root_dir, session, 'intrinsics.json')\n if os.path.exists(filename):\n raise NotImplementedError()\n\n # Load calibration by using image size\n filename = os.path.join(self.root_dir, session, self.tree[session][0])\n im = Image.open(filename)\n W, H = im.size\n K = np.array([[1000., 0, W / 2 - 0.5],\n [0, 1000., H / 2 - 0.5],\n [0, 0, 1]])\n Kinv = K.copy()\n Kinv[0, 0] = 1. / K[0, 0]\n Kinv[1, 1] = 1. / K[1, 1]\n Kinv[0, 2] = -K[0, 2] / K[0, 0]\n Kinv[1, 2] = -K[1, 2] / K[1, 1]\n return {'K': K, 'Kinv': Kinv}\n\n def _get_context_file_paths(self, filename):\n \"\"\" Return RGB context files given a filename\n\n Parameters\n ----------\n filename: str\n Filename\n\n Returns\n ----------\n filenames: list\n Context image filenames\n \"\"\"\n fidx = get_idx(filename)\n idxs = list(np.arange(-self.backward_context * self.stride, 0, self.stride)) + \\\n list(np.arange(0, self.forward_context * self.stride, self.stride) + self.stride)\n return [change_idx(fidx + i, filename) for i in idxs]\n\n def _read_rgb_context_files(self, session, filename):\n \"\"\"Read context images for the given index.\n\n Parameters\n ----------\n session: str\n Session name\n filename: str\n Filename\n\n Returns\n ----------\n filenames: list\n List of context RGB images\n \"\"\"\n context_paths = self._get_context_file_paths(filename)\n return [Image.open(os.path.join(self.root_dir, session, filename)) for filename in context_paths]\n\n def _read_rgb_file(self, session, filename):\n \"\"\"Return RGB image given an index\n\n Parameters\n ----------\n session: str\n Session name\n filename: str\n Filename\n\n Returns\n ----------\n image: PIL.Image\n RGB image\n \"\"\"\n return Image.open(os.path.join(self.root_dir, session, filename))\n\n def __getitem__(self, idx):\n \"\"\"Return RGB image given an index\n\n Parameters\n ----------\n idx: int\n Image index\n\n Returns\n ----------\n sample: dict\n RGB image sample along with corresponding intrinsics, index\n \"\"\"\n session, filename = self.files[idx]\n calib = self.calib[session]\n K, Kinv = calib['K'], calib['Kinv']\n sample = {'left_intrinsics': K,\n 'left_intrinsics_inv': Kinv,\n 'left_fx': K[0, 0],\n 'left_fy': K[1, 1],\n 'baseline': 0,\n 'idx': idx\n }\n if self.dataset_idx is not None:\n sample.update({'dataset_idx' : self.dataset_idx})\n\n sample['left_image'] = self._read_rgb_file(session, filename)\n sample['left_image_context'] = self._read_rgb_context_files(session, filename)\n\n if self.data_transform:\n sample = self.data_transform(sample)\n\n return sample\n" ]
[ [ "torch.mean", "torch.abs", "torch.max", "torch.load", "torch.zeros", "torch.median", "torch.tensor", "torch.log", "torch.squeeze" ], [ "numpy.arange", "numpy.array" ] ]
sus304/RockVis
[ "171298006057af63f5ef01b149324c9f0623e0dc" ]
[ "RockVis.py" ]
[ "'''\r\nMIT License\r\nCopyright (c) 2017 Susumu Tanaka\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n'''\r\nimport codecs\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy import interpolate\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.integrate import odeint\r\nfrom scipy.integrate import cumtrapz\r\nimport pymap3d as pm\r\nimport simplekml\r\n\r\n\r\nclass RocketVisualizer:\r\n def __init__(self, sample_freq, g0, diameter, result_name):\r\n self.freq = sample_freq\r\n self.g0 = g0\r\n self.d = diameter\r\n self.A = 0.25 * self.d ** 2 * np.pi # [m2]\r\n self.result_name = result_name\r\n self.R_air = 287.1\r\n self.gamma_air = 1.4\r\n self.temp_slope = 6.49 # [K/km] < 11 km alt\r\n\r\n def search_liftoff(self, acc_body_axis_log):\r\n # 機軸加速度を頭から読んでthreshold_time[sec]間threshold_acc[G]が持続したindexが離床タイミング\r\n threshold_time = 0.2 # [sec]\r\n threshold_acc = 1.5 # [G]\r\n for i in range(len(acc_body_axis_log)):\r\n if np.sum(acc_body_axis_log[i:i+int(threshold_time * self.freq)] > threshold_acc * self.g0) >= int(threshold_time * self.freq):\r\n index_liftoff = i\r\n return index_liftoff\r\n\r\n def INS_flight_path_analysis(self, acc_x, acc_y, acc_z, gyro_x, gyro_y, gyro_z, elv0, azi0, roll0):\r\n '''\r\n Input coordinate (strap down INS sensor)\\n\r\n +X:Side\\n\r\n +Y:Launch direction\\n\r\n -Y:Launcher\\n\r\n +Z:Altitude(Body Axis)\\n\r\n -Z:Ground\\n\r\n Acc[G-Abs]/Gyro[rad/s]\r\n '''\r\n coordinate = Coordinate()\r\n\r\n self.acc_body_x_G_log = acc_z # [G]\r\n self.acc_body_y_G_log = -acc_x\r\n self.acc_body_z_G_log = -acc_y \r\n\r\n acc_body_x_log = acc_z * self.g0 # [m/s2]\r\n acc_body_y_log = -acc_x * self.g0\r\n acc_body_z_log = -acc_y * self.g0\r\n gyro_body_x_log = gyro_z - np.average(gyro_z[:int(self.freq)]) # [rad/s]\r\n gyro_body_y_log = -gyro_x + np.average(gyro_x[:int(self.freq)])\r\n gyro_body_z_log = -gyro_y + np.average(gyro_y[:int(self.freq)])\r\n\r\n self.index_liftoff = self.search_liftoff(acc_body_x_log)\r\n time_log = np.linspace(-self.index_liftoff / self.freq, (len(acc_body_x_log) - self.index_liftoff) / self.freq, len(acc_body_x_log))\r\n\r\n plt.figure()\r\n plt.plot(time_log, self.acc_body_x_G_log, label='X:Body Axis')\r\n plt.plot(time_log, self.acc_body_y_G_log, label='Y:Body Side')\r\n plt.plot(time_log, self.acc_body_z_G_log, label='Z:Body Side')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Acceleration Body [G]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Acc_body_G.png')\r\n plt.figure()\r\n plt.plot(time_log, acc_body_x_log, label='X:Body Axis')\r\n plt.plot(time_log, acc_body_y_log, label='Y:Body Side')\r\n plt.plot(time_log, acc_body_z_log, label='Z:Body Side')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Acceleration Body [m/s2]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Acc_body.png')\r\n plt.figure()\r\n plt.plot(time_log, np.degrees(gyro_body_x_log), label='X:Roll')\r\n plt.plot(time_log, np.degrees(gyro_body_y_log), label='Y:Pitch(initial)')\r\n plt.plot(time_log, np.degrees(gyro_body_z_log), label='Z:Yaw(initial)')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Angler Velocity Body [deg/s]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Gyro_body.png')\r\n\r\n self.time_log = time_log[self.index_liftoff:]\r\n self.acc_body_x_log = acc_body_x_log[self.index_liftoff:]\r\n self.acc_body_y_log = acc_body_y_log[self.index_liftoff:]\r\n self.acc_body_z_log = acc_body_z_log[self.index_liftoff:]\r\n self.acc_body_log = np.c_[self.acc_body_x_log, self.acc_body_y_log, self.acc_body_z_log]\r\n self.acc_body_x_G_log = self.acc_body_x_G_log[self.index_liftoff:]\r\n self.acc_body_y_G_log = self.acc_body_y_G_log[self.index_liftoff:]\r\n self.acc_body_z_G_log = self.acc_body_z_G_log[self.index_liftoff:]\r\n self.acc_body_G_log = np.c_[self.acc_body_x_G_log, self.acc_body_y_G_log, self.acc_body_z_G_log]\r\n self.gyro_body_x_log = gyro_body_x_log[self.index_liftoff:]\r\n self.gyro_body_y_log = gyro_body_y_log[self.index_liftoff:]\r\n self.gyro_body_z_log = gyro_body_z_log[self.index_liftoff:]\r\n self.gyro_body_log = np.c_[self.gyro_body_x_log, self.gyro_body_y_log, self.gyro_body_z_log]\r\n\r\n gyro_x_polate = interpolate.interp1d(self.time_log, self.gyro_body_x_log, kind='linear', bounds_error=False, fill_value=(0.0, 0.0))\r\n gyro_y_polate = interpolate.interp1d(self.time_log, self.gyro_body_y_log, kind='linear', bounds_error=False, fill_value=(0.0, 0.0))\r\n gyro_z_polate = interpolate.interp1d(self.time_log, self.gyro_body_z_log, kind='linear', bounds_error=False, fill_value=(0.0, 0.0))\r\n\r\n def kinematic(quat, t):\r\n p = gyro_x_polate(t)\r\n q = gyro_y_polate(t)\r\n r = gyro_z_polate(t)\r\n # quat = coordinate.quat_normalize(quat)\r\n tersor_0 = [0.0, r, -q, p]\r\n tersor_1 = [-r, 0.0, p, q]\r\n tersor_2 = [q, -p, 0.0, r]\r\n tersor_3 = [-p, -q, -r, 0.0]\r\n tersor = np.array([tersor_0, tersor_1, tersor_2, tersor_3])\r\n quatdot = 0.5 * tersor.dot(quat)\r\n return quatdot\r\n\r\n quat_init = coordinate.euler2quat(azi0, elv0, roll0)\r\n self.quat_log = odeint(kinematic, quat_init, self.time_log)\r\n DCM_ENU2Body_log = np.array(list(map(coordinate.DCM_ENU2Body_quat, self.quat_log)))\r\n DCM_Body2ENU_log = np.array([DCM.transpose() for DCM in DCM_ENU2Body_log])\r\n self.attitude_log = np.array([coordinate.quat2euler(DCM) for DCM in DCM_ENU2Body_log]) # [deg]\r\n\r\n self.gravity_body_x_log = DCM_ENU2Body_log.dot([0.0, 0.0, self.g0])[:, 0]\r\n\r\n self.vel_body_x_log = cumtrapz(self.acc_body_x_log, self.time_log, initial=0.0)\r\n self.vel_body_y_log = cumtrapz(self.acc_body_y_log, self.time_log, initial=0.0)\r\n self.vel_body_z_log = cumtrapz(self.acc_body_z_log, self.time_log, initial=0.0)\r\n self.vel_body_log = np.c_[self.vel_body_x_log, self.vel_body_y_log, self.vel_body_z_log]\r\n\r\n self.acc_ENU_log = np.array([DCM.dot(acc_body) for DCM, acc_body in zip(DCM_Body2ENU_log, np.array(self.acc_body_log))])\r\n self.acc_ENU_x_log = self.acc_ENU_log[:, 0]\r\n self.acc_ENU_y_log = self.acc_ENU_log[:, 1]\r\n self.acc_ENU_z_log = self.acc_ENU_log[:, 2] - self.g0 # ToDo:重力加速度可変?\r\n\r\n self.vel_ENU_x_log = cumtrapz(self.acc_ENU_x_log, self.time_log, initial=0.0)\r\n self.vel_ENU_y_log = cumtrapz(self.acc_ENU_y_log, self.time_log, initial=0.0)\r\n self.vel_ENU_z_log = cumtrapz(self.acc_ENU_z_log, self.time_log, initial=0.0)\r\n self.vel_ENU_log = np.c_[self.vel_ENU_x_log, self.vel_ENU_y_log, self.vel_ENU_z_log]\r\n\r\n self.pos_ENU_x_log = cumtrapz(self.vel_ENU_x_log, self.time_log, initial=0.0)\r\n self.pos_ENU_y_log = cumtrapz(self.vel_ENU_y_log, self.time_log, initial=0.0)\r\n self.pos_ENU_z_log = cumtrapz(self.vel_ENU_z_log, self.time_log, initial=0.0)\r\n self.pos_ENU_log = np.c_[self.pos_ENU_x_log, self.pos_ENU_y_log, self.pos_ENU_z_log]\r\n\r\n self.index_landing = int(20.0 * self.freq)\r\n # self.index_landing = len(self.pos_ENU_z_log)\r\n\r\n output_array = np.c_[self.time_log, self.acc_body_log, self.vel_body_log, self.acc_ENU_log, self.vel_ENU_log, self.pos_ENU_log, np.rad2deg(self.gyro_body_log), self.quat_log, self.attitude_log]\r\n header = 'time[s]' ',acc_body_axial[m/s2],acc_body_side[m/s2],acc_body_upper[m/s2]' \\\r\n ',vel_body_axial[m/s],vel_body_side[m/s],vel_body_upper[m/s]'\\\r\n ',acc_ENU_East[m/s2],acc_ENU_North[m/s2],acc_ENU_Up[m/s2]'\\\r\n ',vel_ENU_East[m/s],vel_ENU_North[m/s],vel_ENU_Up[m/s]'\\\r\n ',pos_ENU_East[m],pos_ENU_North[m],pos_ENU_Up[m]'\\\r\n ',gyro_body_axial[deg/s],gyro_body_side[deg/s],gyro_body_upper[deg/s]'\\\r\n ',quatrnion1[-],quatrnion2[-],quatrnion3[-],quatrnion4[-]'\\\r\n ',roll[deg],elevation[deg],yaw[deg]'\r\n np.savetxt(self.result_name + '_flight_log.csv', output_array, delimiter=',', fmt='%0.5f', header=header, comments='')\r\n\r\n plt.figure()\r\n plt.plot(self.time_log[:self.index_landing], self.vel_body_x_log[:self.index_landing], label='X:Body Axis')\r\n plt.plot(self.time_log[:self.index_landing], self.vel_body_y_log[:self.index_landing], label='Y:Body Side')\r\n plt.plot(self.time_log[:self.index_landing], self.vel_body_z_log[:self.index_landing], label='Z:Body Upper')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Velocity Body [m/s]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Vel_body.png')\r\n\r\n plt.figure()\r\n plt.plot(self.time_log[:self.index_landing], self.quat_log[:self.index_landing, 0], label='p1')\r\n plt.plot(self.time_log[:self.index_landing], self.quat_log[:self.index_landing, 1], label='p2')\r\n plt.plot(self.time_log[:self.index_landing], self.quat_log[:self.index_landing, 2], label='p3')\r\n plt.plot(self.time_log[:self.index_landing], self.quat_log[:self.index_landing, 3], label='p4')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Quatrnion [-]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Quatrnion.png')\r\n\r\n plt.figure()\r\n plt.plot(self.time_log[:self.index_landing], self.attitude_log[:self.index_landing, 0], label='Azimuth')\r\n plt.plot(self.time_log[:self.index_landing], self.attitude_log[:self.index_landing, 1], label='Elevation')\r\n plt.plot(self.time_log[:self.index_landing], self.attitude_log[:self.index_landing, 2], label='Roll')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Angle [deg]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Aittitude.png')\r\n\r\n plt.figure()\r\n plt.plot(self.time_log[:self.index_landing], self.acc_ENU_x_log[:self.index_landing], label='X:East')\r\n plt.plot(self.time_log[:self.index_landing], self.acc_ENU_y_log[:self.index_landing], label='Y:North')\r\n plt.plot(self.time_log[:self.index_landing], self.acc_ENU_z_log[:self.index_landing], label='Z:Up')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Acceleration ENU [m/s2]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Acc_ENU.png')\r\n\r\n plt.figure()\r\n plt.plot(self.time_log[:self.index_landing], self.vel_ENU_x_log[:self.index_landing], label='X:East')\r\n plt.plot(self.time_log[:self.index_landing], self.vel_ENU_y_log[:self.index_landing], label='Y:North')\r\n plt.plot(self.time_log[:self.index_landing], self.vel_ENU_z_log[:self.index_landing], label='Z:Up')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Velocity ENU [m/s]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Vel_ENU.png')\r\n\r\n plt.figure()\r\n plt.plot(self.time_log[:self.index_landing], self.pos_ENU_x_log[:self.index_landing], label='X:East')\r\n plt.plot(self.time_log[:self.index_landing], self.pos_ENU_y_log[:self.index_landing], label='Y:North')\r\n plt.plot(self.time_log[:self.index_landing], self.pos_ENU_z_log[:self.index_landing], label='Z:Up')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Position ENU [m]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Pos_ENU.png')\r\n\r\n def complementaly_fileter(self, acc_x, acc_y, acc_z, gyro_x, gyro_y, gyro_z, elv0, azi0, roll0):\r\n '''\r\n Input coordinate (strap down INS sensor)\\n\r\n +X:Side\\n\r\n +Y:Launch direction\\n\r\n -Y:Launcher\\n\r\n +Z:Altitude(Body Axis)\\n\r\n -Z:Ground\\n\r\n Acc[G-Abs]/Gyro[rad/s]\r\n '''\r\n coordinate = Coordinate()\r\n\r\n self.acc_body_x_G_log = acc_z # [G]\r\n self.acc_body_y_G_log = -acc_x\r\n self.acc_body_z_G_log = -acc_y \r\n\r\n acc_body_x_log = acc_z * self.g0 # [m/s2]\r\n acc_body_y_log = -acc_x * self.g0\r\n acc_body_z_log = -acc_y * self.g0\r\n gyro_body_x_log = gyro_z - np.average(gyro_z[:int(self.freq)]) # [rad/s]\r\n gyro_body_y_log = -gyro_x + np.average(gyro_x[:int(self.freq)])\r\n gyro_body_z_log = -gyro_y + np.average(gyro_y[:int(self.freq)])\r\n\r\n self.index_liftoff = self.search_liftoff(acc_body_x_log)\r\n time_log = np.linspace(-self.index_liftoff / self.freq, (len(acc_body_x_log) - self.index_liftoff) / self.freq, len(acc_body_x_log))\r\n\r\n self.time_log = time_log[self.index_liftoff:]\r\n self.acc_body_x_log = acc_body_x_log[self.index_liftoff:]\r\n self.acc_body_y_log = acc_body_y_log[self.index_liftoff:]\r\n self.acc_body_z_log = acc_body_z_log[self.index_liftoff:]\r\n self.acc_body_log = np.c_[self.acc_body_x_log, self.acc_body_y_log, self.acc_body_z_log]\r\n self.gyro_body_x_log = gyro_body_x_log[self.index_liftoff:]\r\n self.gyro_body_y_log = gyro_body_y_log[self.index_liftoff:]\r\n self.gyro_body_z_log = gyro_body_z_log[self.index_liftoff:]\r\n self.gyro_body_log = np.c_[self.gyro_body_x_log, self.gyro_body_y_log, self.gyro_body_z_log]\r\n\r\n gyro_x_polate = interpolate.interp1d(self.time_log, self.gyro_body_x_log, kind='linear', bounds_error=False, fill_value=(0.0, 0.0))\r\n gyro_y_polate = interpolate.interp1d(self.time_log, self.gyro_body_y_log, kind='linear', bounds_error=False, fill_value=(0.0, 0.0))\r\n gyro_z_polate = interpolate.interp1d(self.time_log, self.gyro_body_z_log, kind='linear', bounds_error=False, fill_value=(0.0, 0.0))\r\n\r\n def kinematic(quat, t):\r\n p = gyro_x_polate(t)\r\n q = gyro_y_polate(t)\r\n r = gyro_z_polate(t)\r\n # quat = coordinate.quat_normalize(quat)\r\n tersor_0 = [0.0, r, -q, p]\r\n tersor_1 = [-r, 0.0, p, q]\r\n tersor_2 = [q, -p, 0.0, r]\r\n tersor_3 = [-p, -q, -r, 0.0]\r\n tersor = np.array([tersor_0, tersor_1, tersor_2, tersor_3])\r\n quatdot = 0.5 * tersor.dot(quat)\r\n return quatdot\r\n\r\n quat_init = coordinate.euler2quat(azi0, elv0, roll0)\r\n self.quat_log = odeint(kinematic, quat_init, self.time_log)\r\n DCM_ENU2Body_log = np.array(list(map(coordinate.DCM_ENU2Body_quat, self.quat_log)))\r\n DCM_Body2ENU_log = np.array([DCM.transpose() for DCM in DCM_ENU2Body_log])\r\n self.attitude_log = np.array([coordinate.quat2euler(DCM) for DCM in DCM_ENU2Body_log]) # [deg]\r\n\r\n self.gravity_body_x_log = DCM_ENU2Body_log.dot([0.0, 0.0, self.g0])[:, 0]\r\n\r\n self.vel_body_x_log = cumtrapz(self.acc_body_x_log, self.time_log, initial=0.0)\r\n self.vel_body_y_log = cumtrapz(self.acc_body_y_log, self.time_log, initial=0.0)\r\n self.vel_body_z_log = cumtrapz(self.acc_body_z_log, self.time_log, initial=0.0)\r\n self.vel_body_log = np.c_[self.vel_body_x_log, self.vel_body_y_log, self.vel_body_z_log]\r\n\r\n self.acc_ENU_log = np.array([DCM.dot(acc_body) for DCM, acc_body in zip(DCM_Body2ENU_log, np.array(self.acc_body_log))])\r\n self.acc_ENU_x_log = self.acc_ENU_log[:, 0]\r\n self.acc_ENU_y_log = self.acc_ENU_log[:, 1]\r\n self.acc_ENU_z_log = self.acc_ENU_log[:, 2] - self.g0 # ToDo:重力加速度可変?\r\n\r\n self.vel_ENU_x_log = cumtrapz(self.acc_ENU_x_log, self.time_log, initial=0.0)\r\n self.vel_ENU_y_log = cumtrapz(self.acc_ENU_y_log, self.time_log, initial=0.0)\r\n self.vel_ENU_z_log = cumtrapz(self.acc_ENU_z_log, self.time_log, initial=0.0)\r\n self.vel_ENU_log = np.c_[self.vel_ENU_x_log, self.vel_ENU_y_log, self.vel_ENU_z_log]\r\n\r\n self.pos_ENU_x_log = cumtrapz(self.vel_ENU_x_log, self.time_log, initial=0.0)\r\n self.pos_ENU_y_log = cumtrapz(self.vel_ENU_y_log, self.time_log, initial=0.0)\r\n self.pos_ENU_z_log = cumtrapz(self.vel_ENU_z_log, self.time_log, initial=0.0)\r\n self.pos_ENU_log = np.c_[self.pos_ENU_x_log, self.pos_ENU_y_log, self.pos_ENU_z_log]\r\n\r\n\r\n def extend_flight_path_earth(self, launch_point_LLH):\r\n lat, lon, h = pm.enu2geodetic(self.pos_ENU_log[:self.index_landing,0], self.pos_ENU_log[:self.index_landing,1], self.pos_ENU_log[:self.index_landing,2], launch_point_LLH[0], launch_point_LLH[1], launch_point_LLH[2]) # lat, lon, h\r\n self.pos_LLH_log = np.c_[lat, lon, h]\r\n ecef_x, ecef_y, ecef_z = pm.enu2ecef(self.pos_ENU_log[:self.index_landing,0], self.pos_ENU_log[:self.index_landing,1], self.pos_ENU_log[:self.index_landing,2], launch_point_LLH[0], launch_point_LLH[1], launch_point_LLH[2])\r\n self.pos_ECEF_log = np.c_[ecef_x, ecef_y, ecef_z]\r\n\r\n plt.figure()\r\n plt.plot(self.time_log[:self.index_landing], self.pos_ECEF_log[:self.index_landing, 0] /1e3, label='X')\r\n plt.plot(self.time_log[:self.index_landing], self.pos_ECEF_log[:self.index_landing, 1] /1e3, label='Y')\r\n plt.plot(self.time_log[:self.index_landing], self.pos_ECEF_log[:self.index_landing, 2] /1e3, label='Z')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Position ECEF [km]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Pos_ECEF.png')\r\n\r\n kml = simplekml.Kml(open=1)\r\n Log_LLH = []\r\n for i in range(len(self.pos_LLH_log[:,0])):\r\n if 0 == i % 10:\r\n Log_LLH.append([self.pos_LLH_log[i,1], self.pos_LLH_log[i,0], self.pos_LLH_log[i,2]])\r\n line = kml.newlinestring()\r\n line.style.linestyle.width = 4\r\n line.style.linestyle.color = simplekml.Color.red\r\n line.extrude = 1\r\n line.altitudemode = simplekml.AltitudeMode.absolute\r\n line.coords = Log_LLH\r\n line.style.linestyle.colormode = simplekml.ColorMode.random\r\n kml.save(self.result_name + '_trajectory.kml')\r\n\r\n\r\n def extend_pressure_altitude_analysis(self, Pair_log, Pair_0, Tair_0):\r\n '''\r\n Input: Pair_log[kPa], Pair_0[kPa],Tair_0[degC]\r\n '''\r\n dPair_log = Pair_log - np.average(Pair_log[:int(self.freq*0.5)])\r\n self.Pair_log = Pair_0 + dPair_log[self.index_liftoff:]\r\n self.alt_pressure_log = (((Pair_0 / self.Pair_log) ** (1.0 / 5.25607)) - 1) * (Tair_0 + 273.15) / (self.temp_slope / 1e3)\r\n\r\n plt.figure()\r\n plt.plot(self.time_log[:self.index_landing], self.alt_pressure_log[:self.index_landing])\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Altitude [m]')\r\n plt.grid()\r\n plt.savefig(self.result_name + '_PressureAlt.png')\r\n\r\n np.savetxt(self.result_name + '_PressureAlt_log.csv', np.c_[self.time_log, self.Pair_log, self.alt_pressure_log], delimiter=',', comments='', fmt='%0.5f', header='time[sec],air pressure[kPa],altitude[m]')\r\n\r\n def extend_thrust_analysis(self, mass_log, mach_array_interpolate, Cd_array_interpolate, Tair_0):\r\n self.Tair_log = (Tair_0 + 273.15) - self.pos_ENU_z_log * (self.temp_slope / 1e3) # [K]\r\n self.rho_log = self.Pair_log * 1e3 / (self.R_air * self.Tair_log)\r\n self.Cs_log = np.sqrt(self.gamma_air * self.R_air * self.Tair_log)\r\n self.mach_log = self.vel_body_x_log / self.Cs_log\r\n\r\n Cd = interpolate.interp1d(mach_array_interpolate, Cd_array_interpolate, kind='linear', bounds_error=False, fill_value=(Cd_array_interpolate[0], Cd_array_interpolate[-1]))\r\n self.Cd_log = Cd(self.mach_log)\r\n\r\n self.drag_log = 0.5 * self.rho_log * self.vel_body_x_log ** 2 * self.Cd_log * self.A\r\n self.mg_axial_log = mass_log * self.gravity_body_x_log\r\n self.F_axial = self.acc_body_x_log * mass_log\r\n self.thrust = self.F_axial + self.drag_log - self.mg_axial_log\r\n\r\n output_array = np.c_[self.time_log, self.mach_log, self.Cd_log, self.drag_log, self.F_axial, self.thrust]\r\n header = 'time[s],mach[-],Cd[-],drag[N],axial[N],thrust[N]'\r\n np.savetxt(self.result_name + '_force_log.csv', output_array, delimiter=',', fmt='%0.5f', header=header, comments='')\r\n\r\n plt.figure()\r\n plt.plot(self.time_log[:self.index_landing], self.mach_log[:self.index_landing])\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Mach number [-]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Mach.png')\r\n\r\n plt.figure()\r\n plt.plot(self.time_log[:self.index_landing], self.drag_log[:self.index_landing], label='Drag')\r\n plt.plot(self.time_log[:self.index_landing], self.thrust[:self.index_landing], label='Thrust')\r\n plt.plot(self.time_log[:self.index_landing], self.F_axial[:self.index_landing], label='Axial')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Force [N]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Force.png')\r\n\r\n plt.figure()\r\n plt.plot(self.time_log[:self.index_landing], self.thrust[:self.index_landing], label='Thrust')\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Thrust [N]')\r\n plt.grid()\r\n plt.legend()\r\n plt.savefig(self.result_name + '_Thrust.png')\r\n\r\nclass Coordinate:\r\n def DCM_ENU2Body_euler(self, azimuth, elevation, roll):\r\n '''\r\n Input: Azimuth[rad],Elevation[rad],Roll[rad]\r\n '''\r\n DCM_0 = [np.cos(azimuth) * np.cos(elevation), np.sin(azimuth) * np.cos(elevation), -np.sin(elevation)]\r\n DCM_1 = [-np.sin(azimuth) * np.cos(roll) + np.cos(azimuth) * np.sin(elevation) * np.sin(roll), np.cos(azimuth) * np.cos(roll) + np.sin(azimuth) * np.sin(elevation) * np.sin(roll), np.cos(elevation) * np.sin(roll)]\r\n DCM_2 = [np.sin(azimuth) * np.sin(roll) + np.cos(azimuth) * np.sin(elevation) * np.cos(roll), -np.cos(azimuth) * np.sin(roll) + np.sin(azimuth) * np.sin(elevation) * np.cos(roll), np.cos(elevation) * np.cos(roll)]\r\n DCM_ENU2Body_euler = np.array([DCM_0, DCM_1, DCM_2])\r\n return DCM_ENU2Body_euler\r\n\r\n def quat_normalize(self, quat):\r\n norm = np.linalg.norm(quat)\r\n quat = quat / norm\r\n return quat\r\n\r\n def DCM_ENU2Body_quat(self, quat):\r\n q0 = quat[0]\r\n q1 = quat[1]\r\n q2 = quat[2]\r\n q3 = quat[3]\r\n\r\n DCM_0 = [q0 * q0 - q1*q1 - q2*q2 + q3*q3, 2.0 * (q0 * q1 + q2 * q3) , 2.0 * (q0 * q2 - q1 * q3)]\r\n DCM_1 = [2.0 * (q0 * q1 - q2 * q3) , q1*q1 - q0*q0 - q2*q2 + q3*q3, 2.0 * (q1 * q2 + q0 * q3)]\r\n DCM_2 = [2.0 * (q0 * q2 + q1 * q3) , 2.0 * (q1 * q2 - q0 * q3) , q2*q2 - q0*q0 - q1*q1 + q3*q3]\r\n DCM_ENU2Body_quat = np.array([DCM_0, DCM_1, DCM_2])\r\n return DCM_ENU2Body_quat\r\n\r\n def euler2quat(self, azimuth, elevation, roll=0.0):\r\n '''\r\n Input: Azimuth[deg],Elevation[deg],Roll[deg]\r\n '''\r\n azimuth = np.radians(azimuth)\r\n elevation = np.radians(elevation)\r\n roll = np.radians(roll)\r\n\r\n DCM = self.DCM_ENU2Body_euler(azimuth, elevation, roll)\r\n q0 = 0.5 * np.sqrt(1.0 + DCM[0,0] - DCM[1,1] - DCM[2,2])\r\n q1 = 0.5 * np.sqrt(1.0 - DCM[0,0] + DCM[1,1] - DCM[2,2])\r\n q2 = 0.5 * np.sqrt(1.0 - DCM[0,0] - DCM[1,1] + DCM[2,2])\r\n q3 = 0.5 * np.sqrt(1.0 + DCM[0,0] + DCM[1,1] + DCM[2,2])\r\n\r\n quat_max_index = np.argmax([q0, q1, q2, q3])\r\n if quat_max_index == 0:\r\n q0 = 0.5 * np.sqrt(1.0 + DCM[0, 0] - DCM[1,1] - DCM[2,2])\r\n q1 = (DCM[0, 1] + DCM[1, 0]) / (4.0 * q0)\r\n q2 = (DCM[2, 0] + DCM[0, 2]) / (4.0 * q0)\r\n q3 = (DCM[1, 2] - DCM[2, 1]) / (4.0 * q0)\r\n elif quat_max_index == 1:\r\n q1 = 0.5 * np.sqrt(1.0 - DCM[0, 0] + DCM[1,1] - DCM[2,2])\r\n q0 = (DCM[0, 1] + DCM[1, 0]) / (4.0 * q1)\r\n q2 = (DCM[1, 2] + DCM[2, 1]) / (4.0 * q1)\r\n q3 = (DCM[2, 0] - DCM[0, 2]) / (4.0 * q1)\r\n elif quat_max_index == 2:\r\n q2 = 0.5 * np.sqrt(1.0 - DCM[0, 0] - DCM[1,1] + DCM[2,2])\r\n q0 = (DCM[2, 0] + DCM[0, 2]) / (4.0 * q2)\r\n q1 = (DCM[1, 2] + DCM[2, 1]) / (4.0 * q2)\r\n q3 = (DCM[0, 1] - DCM[1, 0]) / (4.0 * q2)\r\n elif quat_max_index == 3:\r\n q3 = 0.5 * np.sqrt(1.0 + DCM[0, 0] + DCM[1,1] + DCM[2,2])\r\n q0 = (DCM[1, 2] - DCM[2, 1]) / (4.0 * q3)\r\n q1 = (DCM[2, 0] - DCM[0, 2]) / (4.0 * q3)\r\n q2 = (DCM[0, 1] - DCM[1, 0]) / (4.0 * q3) \r\n\r\n quat = np.array([q0, q1, q2, q3])\r\n quat = self.quat_normalize(quat)\r\n\r\n return quat\r\n\r\n def quat2euler(self, DCM_NED2Body):\r\n DCM = DCM_NED2Body\r\n azimuth = np.rad2deg(np.arctan2(DCM[0, 1], DCM[0, 0]))\r\n elevation = np.rad2deg(-np.arcsin(DCM[0, 2]))\r\n roll = np.rad2deg(np.arctan2(DCM[1, 2], DCM[2, 2]))\r\n\r\n return azimuth, elevation, roll\r\n\r\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.radians", "numpy.sqrt", "numpy.rad2deg", "scipy.integrate.odeint", "matplotlib.pyplot.plot", "numpy.arctan2", "scipy.integrate.cumtrapz", "numpy.arcsin", "numpy.sin", "scipy.interpolate.interp1d", "numpy.argmax", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.savetxt", "numpy.array", "matplotlib.pyplot.ylabel", "numpy.degrees", "numpy.linalg.norm", "numpy.cos", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel" ] ]
oxu2/flyingsquid
[ "07c38f259d962fa2b3581793045af4687f91be93" ]
[ "examples/tutorials/tutorial_helpers.py" ]
[ "import numpy as np\nfrom numpy.random import seed, rand\nimport itertools\n\ndef exponential_family (lam, y, theta, theta_y):\n # without normalization\n return np.exp(theta_y * y + y * np.dot(theta, lam))\n\n# create vector describing cumulative distribution of lambda_1, ... lambda_m, Y\ndef make_pdf(m, v, theta, theta_y, lst):\n p = np.zeros(len(lst))\n for i in range(len(lst)):\n labels = lst[i] \n p[i] = exponential_family(labels[0:m], labels[v-1], theta, theta_y)\n \n return p/sum(p)\n\ndef make_cdf(pdf):\n return np.cumsum(pdf)\n\n# draw a set of lambda_1, ... lambda_m, Y based on the distribution\ndef sample(lst, cdf):\n r = np.random.random_sample()\n smaller = np.where(cdf < r)[0]\n if len(smaller) == 0:\n i = 0\n else:\n i = smaller.max() + 1\n return lst[i]\n\ndef generate_data(n, theta, m, theta_y=0):\n v = m+1\n \n lst = list(map(list, itertools.product([-1, 1], repeat=v)))\n pdf = make_pdf(m, v, theta, theta_y, lst)\n cdf = make_cdf(pdf)\n\n sample_matrix = np.zeros((n,v))\n for i in range(n):\n sample_matrix[i,:] = sample(lst,cdf)\n \n return sample_matrix\n\ndef synthetic_data_basics():\n seed(0)\n \n n_train = 10000\n n_dev = 500\n \n m = 5\n theta = [1.5,.5,.2,.2,.05]\n abstain_rate = [.8, .88, .28, .38, .45]\n \n train_data = generate_data(n_train, theta, m)\n dev_data = generate_data(n_dev, theta, m)\n \n L_train = train_data[:,:-1]\n L_dev = dev_data[:,:-1]\n Y_dev = dev_data[:,-1]\n \n train_values = rand(n_train * m).reshape(L_train.shape)\n dev_values = rand(n_dev * m).reshape(L_dev.shape)\n \n L_train[train_values < (abstain_rate,) * n_train] = 0\n L_dev[dev_values < (abstain_rate,) * n_dev] = 0\n \n return L_train, L_dev, Y_dev\n\ndef print_statistics(L_dev, Y_dev):\n m = L_dev.shape[1]\n \n for i in range(m):\n acc = np.sum(L_dev[:,i] == Y_dev)/np.sum(L_dev[:,i] != 0)\n abstains = np.sum(L_dev[:,i] == 0)/Y_dev.shape[0]\n \n print('LF {}: Accuracy {}%, Abstain rate {}%'.format(\n i, int(acc * 100), int((abstains) * 100)))" ]
[ [ "numpy.dot", "numpy.random.seed", "numpy.cumsum", "numpy.random.random_sample", "numpy.random.rand", "numpy.where", "numpy.sum", "numpy.zeros" ] ]
matthias95/CNN_ImageSegmentation
[ "3eff992a2789af9a33a6e24e6f3ff5d3e600b395" ]
[ "cnn_image_segmentation/resnet_segmentation_model.py" ]
[ "# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\n\ndef getBuildInResNet50(trainable=False, pretrained=True):\n resnet50 = tf.keras.applications.resnet50.ResNet50(include_top=False, weights=('imagenet' if pretrained else None)) # 'imagenet'\n \n for layer in resnet50.layers:\n layer.trainable = trainable\n\n layers = [ resnet50.get_layer('conv2_block3_out'), resnet50.get_layer('conv3_block4_out'), resnet50.get_layer('conv4_block6_out'), resnet50.get_layer('conv5_block3_out')]\n return resnet50, layers\n \n\ndef residualBlock(inputs, filters, stride=1):\n with tf.name_scope('residualBlock') as scope:\n convs = tf.keras.Sequential([\n tf.keras.layers.Conv2D(filters=filters, kernel_size=(1,1), strides=(stride,stride), padding='SAME'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3), strides=(1,1), padding='SAME'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n tf.keras.layers.Conv2D(filters=filters * 4, kernel_size=(1,1), strides=(1,1), padding='SAME'),\n tf.keras.layers.BatchNormalization()\n ])\n\n\n if (stride != 1) or (inputs.shape[-1] != (filters * 4)):\n skip = tf.keras.layers.Conv2D(filters=filters * 4, kernel_size=(1,1), strides=(stride,stride), padding='SAME')(inputs)\n skip = tf.keras.layers.BatchNormalization()(skip)\n return tf.keras.layers.ReLU()(convs(inputs) + skip)\n\n return tf.keras.layers.ReLU()(convs(inputs) + inputs)\n\ndef residualBlockV2(inputs, filters, stride=1):\n\n x = inputs\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.ReLU()(x)\n x = tf.keras.layers.Conv2D(filters=filters, kernel_size=(1,1), strides=(stride,stride), padding='SAME')(x)\n\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.ReLU()(x)\n x = tf.keras.layers.Conv2D(filters=filters, kernel_size=(3,3), strides=(1,1), padding='SAME')(x)\n\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.ReLU()(x)\n x = tf.keras.layers.Conv2D(filters=filters * 4, kernel_size=(1,1), strides=(1,1), padding='SAME')(x)\n\n\n\n if (stride != 1) or (inputs.shape[-1] != (filters * 4)):\n return x + tf.keras.layers.Conv2D(filters=filters * 4, kernel_size=(1,1), strides=(stride,stride), padding='SAME')(inputs)\n\n return x + inputs\n\n\ndef getResNet50(residualBlock=residualBlock):\n inputs = tf.keras.Input((None, None, 3))\n\n x = inputs \n\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=(7,7), strides=(2,2), padding='SAME')(x)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.ReLU()(x)\n x = tf.keras.layers.MaxPooling2D(pool_size=(3,3), strides=(2,2))(x)\n\n x = residualBlock(x, 64)\n x = residualBlock(x, 64)\n x = residualBlock(x, 64)\n\n x = residualBlock(x, 128, stride=2)\n x = residualBlock(x, 128)\n x = residualBlock(x, 128)\n x = residualBlock(x, 128)\n\n x = residualBlock(x, 256, stride=2)\n x = residualBlock(x, 256)\n x = residualBlock(x, 256)\n x = residualBlock(x, 256)\n x = residualBlock(x, 256)\n x = residualBlock(x, 256)\n\n x = residualBlock(x, 512, stride=2)\n x = residualBlock(x, 512)\n x = residualBlock(x, 512)\n \n resnet50 = tf.keras.Model(inputs=inputs, outputs=x)\n \n print('Using: ' + residualBlock.__name__)\n \n if residualBlock.__name__ == 'residualBlock':\n layers = [resnet50.get_layer('re_lu_9'), resnet50.get_layer('re_lu_21'), resnet50.get_layer('re_lu_39'), resnet50.get_layer('re_lu_48')]\n elif residualBlock.__name__ == 'residualBlockV2':\n layers = [resnet50.get_layer('re_lu_10'), resnet50.get_layer('re_lu_22'), resnet50.get_layer('re_lu_40'), resnet50.get_layer('re_lu_48')]\n \n else:\n print('Invalid residualBlock')\n \n return resnet50, layers\n\ndef oneByOneConvAndResize(inputs, size):\n diemnsionalityReduction = tf.keras.layers.Conv2D(filters=1, kernel_size=[1,1], padding='SAME', activation=None)\n return tf.image.resize(diemnsionalityReduction(inputs), size, method=tf.image.ResizeMethod.BILINEAR)\n\ndef getSegmentationModel(resnet50, layers):\n shape = tf.shape(resnet50.input)\n\n addedLayers = tf.keras.layers.Add()([oneByOneConvAndResize(layer.output, [shape[1], shape[2]]) for layer in layers])\n addedLayers = tf.reshape(addedLayers, [shape[0],shape[1], shape[2]])\n output = tf.keras.activations.sigmoid(addedLayers)\n\n segmentationModel = tf.keras.Model(inputs=resnet50.input, outputs={'logits': addedLayers, 'sigmoid': output})\n return segmentationModel\n\nclass SegmentationModel(tf.Module):\n def __init__(self, model):\n super(SegmentationModel, self).__init__()\n self.model = model\n\n @tf.function(input_signature=[tf.TensorSpec([None,None,3], tf.float32)])\n def __call__(self, x):\n x = x + np.array([[[-103.939, -116.779, -123.68 ]]], dtype=np.float32)\n x = tf.expand_dims(x,0)\n return self.model(x, training=False)['sigmoid'][0]\n" ]
[ [ "tensorflow.keras.applications.resnet50.ResNet50", "tensorflow.keras.layers.ReLU", "tensorflow.keras.Input", "tensorflow.shape", "tensorflow.keras.layers.Conv2D", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.keras.Model", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.BatchNormalization", "tensorflow.name_scope", "tensorflow.keras.layers.Add", "numpy.array", "tensorflow.keras.activations.sigmoid", "tensorflow.TensorSpec" ] ]
rainyuxia0112/K-Nearest-Neighbors-Classifier-using-sql-and-datalog
[ "4391734464d8f548bfb64af0893a73f3ee6f7c1e" ]
[ "knn/datapreprocessing.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 14 18:33:50 2019\n\n@author: rain\n\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup as bs\nfrom urllib import request\n\ndef get_data(url, name): \n \"\"\"\n url - website\n name - csv name\n \"\"\"\n rq = request.urlopen(url)\n res = rq.read()\n \n # Save the string to a file\n csvstr = str(res).strip(\"b'\")\n \n lines = csvstr.split(\"\\\\n\")\n f = open(\"data.csv\", \"w\")\n for line in lines:\n f.write(line + \"\\n\")\n f.close()\n \n import pandas as pd\n knn = pd.read_csv('data.csv')\n knn.iloc[:, -1] = knn.iloc[:, -1].map(lambda x: int(x[0]))\n data = knn.iloc[:, :2]\n data_label = knn.iloc[:, -1]\n data_label = data_label.rename(\"label\")\n data = pd.concat([data, data_label], axis = 1) \n data.to_csv(name+'.csv', header=False)\n \n\nif __name__ == '__main__':\n urls = ['http://archive.ics.uci.edu/ml/machine-learning-databases/hill-valley/Hill_Valley_without_noise_Testing.data', \n 'http://archive.ics.uci.edu/ml/machine-learning-databases/hill-valley/Hill_Valley_without_noise_Training.data']\n names = ['arc', 'rc1']\n for url, name in zip(urls, names):\n get_data(url, name)\n " ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
semihyagcioglu/allennlp
[ "cb179a6d71285d7dff9120d980e6656f48aab489" ]
[ "allennlp/modules/seq2seq_encoders/bidirectional_language_model_transformer.py" ]
[ "\"\"\"\nThe BidirectionalTransformerEncoder from Calypso.\nThis is basically the transformer from http://nlp.seas.harvard.edu/2018/04/03/attention.html\nso credit to them.\n\nThis code should be considered \"private\" in that we have several\ntransformer implementations and may end up deleting this one.\nIf you use it, consider yourself warned.\n\"\"\"\n# pylint: disable=arguments-differ,invalid-name,no-self-use\nfrom typing import Tuple, Callable\nimport math\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom allennlp.common.checks import ExperimentalFeatureWarning\nfrom allennlp.modules.layer_norm import LayerNorm\nfrom allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder\nfrom allennlp.nn import util\n\n\ndef attention(query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: torch.Tensor = None,\n dropout: Callable = None) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute 'Scaled Dot Product Attention'\"\"\"\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n p_attn = F.softmax(scores, dim=-1)\n if dropout is not None:\n p_attn = dropout(p_attn)\n return torch.matmul(p_attn, value), p_attn\n\ndef subsequent_mask(size: int, device: str = 'cpu') -> torch.Tensor:\n \"\"\"Mask out subsequent positions.\"\"\"\n attn_shape = (1, size, size)\n mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n mask = (torch.from_numpy(mask) == 0)\n return mask.to(device)\n\n\nclass PositionalEncoding(torch.nn.Module):\n \"Implement the Positional Encoding function.\"\n def __init__(self, input_dim: int, max_len: int = 5000) -> None:\n super().__init__()\n\n # Compute the positional encodings once in log space.\n positional_encoding = torch.zeros(max_len, input_dim, requires_grad=False)\n position = torch.arange(0, max_len).unsqueeze(1).float()\n div_term = torch.exp(torch.arange(0, input_dim, 2).float() * -(math.log(10000.0) / input_dim))\n positional_encoding[:, 0::2] = torch.sin(position * div_term)\n positional_encoding[:, 1::2] = torch.cos(position * div_term)\n positional_encoding = positional_encoding.unsqueeze(0)\n self.register_buffer('positional_encoding', positional_encoding)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # pylint: disable=arguments-differ\n return x + self.positional_encoding[:, :x.size(1)]\n\n\nclass PositionwiseFeedForward(torch.nn.Module):\n \"Implements FFN equation.\"\n def __init__(self, input_dim: int, ff_dim: int, dropout: float = 0.1) -> None:\n super().__init__()\n self.w_1 = torch.nn.Linear(input_dim, ff_dim)\n self.w_2 = torch.nn.Linear(ff_dim, input_dim)\n self.dropout = torch.nn.Dropout(dropout)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # pylint: disable=arguments-differ\n return self.w_2(self.dropout(F.relu(self.w_1(x))))\n\n\nclass TransformerEncoder(torch.nn.Module):\n \"Core encoder is a stack of N layers\"\n def __init__(self, layer: torch.nn.Module, num_layers: int, return_all_layers: bool = False) -> None:\n super().__init__()\n self.layers = util.clone(layer, num_layers)\n self.norm = LayerNorm(layer.size)\n self.return_all_layers = return_all_layers\n\n def forward(self, x, mask):\n \"Pass the input (and mask) through each layer in turn.\"\n all_layers = []\n for layer in self.layers:\n x = layer(x, mask)\n if self.return_all_layers:\n all_layers.append(x)\n\n if self.return_all_layers:\n all_layers[-1] = self.norm(all_layers[-1])\n return all_layers\n return self.norm(x)\n\n\nclass SublayerConnection(torch.nn.Module):\n \"\"\"\n A residual connection followed by a layer norm.\n Note for code simplicity the norm is first as opposed to last.\n \"\"\"\n def __init__(self, size: int, dropout: float) -> None:\n super().__init__()\n self.norm = LayerNorm(size)\n self.dropout = torch.nn.Dropout(dropout)\n\n def forward(self, x: torch.Tensor, sublayer: Callable[[torch.Tensor], torch.Tensor]) -> torch.Tensor:\n \"Apply residual connection to any sublayer with the same size.\"\n return x + self.dropout(sublayer(self.norm(x)))\n\n\nclass EncoderLayer(torch.nn.Module):\n \"Encoder is made up of self-attn and feed forward (defined below)\"\n def __init__(self,\n size: int,\n self_attn: torch.nn.Module,\n feed_forward: torch.nn.Module,\n dropout: float) -> None:\n super().__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.sublayer = util.clone(SublayerConnection(size, dropout), 2)\n self.size = size\n\n def forward(self, x: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n \"Follow Figure 1 (left) for connections.\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)\n\n\nclass MultiHeadedAttention(torch.nn.Module):\n def __init__(self, num_heads: int, input_dim: int, dropout: float = 0.1) -> None:\n super().__init__()\n assert input_dim % num_heads == 0, \"input_dim must be a multiple of num_heads\"\n # We assume d_v always equals d_k\n self.d_k = input_dim // num_heads\n self.num_heads = num_heads\n # These linear layers are\n # [query_projection, key_projection, value_projection, concatenated_heads_projection]\n self.linears = util.clone(torch.nn.Linear(input_dim, input_dim), 4)\n self.dropout = torch.nn.Dropout(p=dropout)\n\n def forward(self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: torch.Tensor = None) -> torch.Tensor:\n if mask is not None:\n # Same mask applied to all h heads.\n # Shape (batch_size, num_heads, timesteps, timesteps)\n mask = mask.unsqueeze(1).expand([-1, self.num_heads, -1, -1])\n\n nbatches = query.size(0)\n\n # 1) Do all the linear projections in batch from d_model => h x d_k\n query, key, value = [layer(x).view(nbatches, -1, self.num_heads, self.d_k).transpose(1, 2)\n for layer, x in zip(self.linears, (query, key, value))]\n\n # 2) Apply attention on all the projected vectors in batch.\n x, _ = attention(query, key, value, mask=mask, dropout=self.dropout)\n\n # 3) \"Concat\" using a view and apply a final linear.\n x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.num_heads * self.d_k)\n return self.linears[-1](x)\n\n\ndef make_model(num_layers: int = 6,\n input_size: int = 512, # Attention size\n hidden_size: int = 2048, # FF layer size\n heads: int = 8,\n dropout: float = 0.1,\n return_all_layers: bool = False) -> TransformerEncoder:\n \"Helper: Construct a model from hyperparameters.\"\n attn = MultiHeadedAttention(heads, input_size, dropout)\n ff = PositionwiseFeedForward(input_size, hidden_size, dropout)\n model = TransformerEncoder(EncoderLayer(input_size, attn, ff, dropout),\n num_layers,\n return_all_layers=return_all_layers)\n\n # Initialize parameters with Glorot / fan_avg.\n for p in model.parameters():\n if p.dim() > 1:\n torch.nn.init.xavier_uniform(p)\n return model\n\n\nclass BidirectionalLanguageModelTransformer(Seq2SeqEncoder):\n def __init__(self,\n input_dim: int,\n hidden_dim: int,\n num_layers: int,\n dropout: float = 0.1,\n input_dropout: float = None,\n return_all_layers: bool = False) -> None:\n\n warnings.warn(\"This particular transformer implementation is a provisional feature \"\n \"that's intended for AI2 internal use and might be deleted at any time. \"\n \"If you use it, consider yourself warned!\",\n ExperimentalFeatureWarning)\n\n super().__init__()\n\n self._return_all_layers = return_all_layers\n self.transformer_layers = num_layers\n self.num_layers = num_layers\n\n self._forward_transformer = make_model(input_size=input_dim,\n hidden_size=hidden_dim,\n num_layers=num_layers,\n dropout=dropout,\n return_all_layers=return_all_layers)\n self._backward_transformer = make_model(input_size=input_dim,\n hidden_size=hidden_dim,\n num_layers=num_layers,\n dropout=dropout,\n return_all_layers=return_all_layers)\n self._position = PositionalEncoding(input_dim)\n\n self.input_dim = input_dim\n self.output_dim = 2 * input_dim\n\n if input_dropout:\n self._dropout = torch.nn.Dropout(input_dropout)\n else:\n self._dropout = lambda x: x\n\n self.should_log_activations = False\n\n def get_attention_masks(self, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Returns 2 masks of shape (batch_size, timesteps, timesteps) representing\n 1) non-padded elements, and\n 2) elements of the sequence which are permitted to be involved in attention at a given timestep.\n \"\"\"\n device = mask.device\n # Forward case:\n timesteps = mask.size(1)\n # Shape (1, timesteps, timesteps)\n subsequent = subsequent_mask(timesteps, device).int()\n # Broadcasted logical and - we want zero\n # elements where either we have padding from the mask,\n # or we aren't allowed to use the timesteps.\n # Shape (batch_size, timesteps, timesteps)\n forward_mask = mask.unsqueeze(-1) & subsequent\n # Backward case - exactly the same, but transposed.\n backward_mask = forward_mask.transpose(1, 2)\n\n return forward_mask, backward_mask\n\n def forward(self, token_embeddings: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n forward_mask, backward_mask = self.get_attention_masks(mask.int())\n token_embeddings = self._position(token_embeddings)\n token_embeddings = self._dropout(token_embeddings)\n forward_output = self._forward_transformer(token_embeddings, forward_mask)\n backward_output = self._backward_transformer(token_embeddings, backward_mask)\n\n if self._return_all_layers:\n to_return = []\n for forward, backward in zip(forward_output, backward_output):\n to_return.append(torch.cat([forward, backward], -1))\n return to_return\n\n return torch.cat([forward_output, backward_output], -1)\n\n def get_regularization_penalty(self):\n return 0.\n\n def get_input_dim(self) -> int:\n return self.input_dim\n\n def get_output_dim(self) -> int:\n return self.output_dim\n\n def is_bidirectional(self) -> bool:\n return True\n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.Dropout", "torch.sin", "torch.zeros", "torch.cat", "torch.from_numpy", "numpy.ones", "torch.matmul", "torch.nn.Linear", "torch.arange", "torch.nn.init.xavier_uniform", "torch.cos" ] ]
magnusross/Deep-LFM
[ "9f6a6c22a1af0ef949607603352b1d3f07c1bb97" ]
[ "tests/test_features.py" ]
[ "from deepLFM import features\nimport torch\n\n\ndef test_I1():\n ans = torch.complex(torch.Tensor([2.53379]), torch.Tensor([0.514595]))\n ours = features.I1(\n torch.Tensor([0.1]),\n torch.Tensor([0.2]),\n torch.Tensor([0.3]),\n torch.Tensor([0.4]),\n torch.Tensor([0.5]),\n )\n assert torch.isclose(ans, ours)\n\n\ndef test_I2():\n ans = torch.complex(torch.Tensor([2.03472]), torch.Tensor([-0.101821]))\n ours = features.I2(\n torch.Tensor([0.1]),\n torch.Tensor([0.2]),\n torch.Tensor([0.3]),\n torch.Tensor([0.4]),\n torch.Tensor([0.5]),\n )\n assert torch.isclose(ans, ours)\n" ]
[ [ "torch.Tensor", "torch.isclose" ] ]
koki0702/deep-learning-from-zero
[ "0aac172fa272160ab691a2b66248fcabb0f82186" ]
[ "ch06/batch_norm_test.py" ]
[ "import sys, os\nsys.path.append(os.pardir)\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom dataset.mnist import load_mnist\nfrom common.multi_layer_net_extend import MultiLayerNetExtend\nfrom common.optimizer import SGD, Adam\n\n\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)\n\nx_train = x_train[:1000]\nt_train = t_train[:1000]\n\nmax_epochs = 20\ntrain_size = x_train.shape[0]\nbatch_size = 100\nlearning_rate = 0.01\n\n\ndef __train(weight_init_std):\n bn_network = MultiLayerNetExtend(input_size=784, hidden_size_list=[100, 100, 100, 100, 100], output_size=10, \n weight_init_std=weight_init_std, use_batchnorm=True)\n network = MultiLayerNetExtend(input_size=784, hidden_size_list=[100, 100, 100, 100, 100], output_size=10,\n weight_init_std=weight_init_std)\n optimizer = SGD(lr=learning_rate)\n \n train_acc_list = []\n bn_train_acc_list = []\n \n iter_per_epoch = max(train_size / batch_size, 1)\n epoch_cnt = 0\n \n for i in range(1000000000):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n \n for _network in (bn_network, network):\n grads = _network.gradient(x_batch, t_batch)\n optimizer.update(_network.params, grads)\n \n if i % iter_per_epoch == 0:\n train_acc = network.accuracy(x_train, t_train)\n bn_train_acc = bn_network.accuracy(x_train, t_train)\n train_acc_list.append(train_acc)\n bn_train_acc_list.append(bn_train_acc)\n \n print(\"epoch:\" + str(epoch_cnt) + \" | \" + str(train_acc) + \" - \" + str(bn_train_acc))\n \n epoch_cnt += 1\n if epoch_cnt >= max_epochs:\n break\n \n return train_acc_list, bn_train_acc_list\n\n\nweight_scale_list = np.logspace(0, -4, num=16)\nx = np.arange(max_epochs)\n\nfor i, w in enumerate(weight_scale_list):\n print(\"============== \" + str(i+1) + \"/16\" + \" ==============\")\n train_acc_list, bn_train_acc_list = __train(w)\n \n plt.subplot(4, 4, i+1)\n plt.title(\"W:\" + str(w))\n if i == 15:\n plt.plot(x, bn_train_acc_list, label='Batch Normalization', markevery=2)\n plt.plot(x, train_acc_list, linestyle=\"--\", label='Normal(without BatchNorm)', markevery=2)\n else:\n plt.plot(x, bn_train_acc_list, markevery=2)\n plt.plot(x, train_acc_list, linestyle=\"--\", markevery=2)\n\n plt.ylim(0, 1.0)\n if i % 4:\n plt.yticks([])\n else:\n plt.ylabel(\"accuracy\")\n if i < 12:\n plt.xticks([])\n else:\n plt.xlabel(\"epochs\")\n plt.legend(loc='lower right')\n \nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.random.choice", "numpy.logspace", "numpy.arange", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel" ] ]
uwdata/boba
[ "80ff10ffd9a2ae99002bc7e88d173869b86c736c" ]
[ "example/fertility/template.py" ]
[ "#!/usr/bin/env python3\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n# --- (BOBA_CONFIG)\n{\n \"graph\": [\n \"NMO1->ECL1->A\",\n \"NMO2->ECL2->A\",\n \"NMO1->A\",\n \"NMO2->A\",\n \"A->B\",\n \"A->EC->B\"\n ],\n \"decisions\": [\n {\"var\": \"fertility_bounds\", \"options\": [\n [[7, 14], [17, 25], [17, 25]],\n [[6, 14], [17, 27], [17, 27]],\n [[9, 17], [18, 25], [18, 25]],\n [[8, 14], [1, 7], [15, 28]],\n [[9, 17], [1, 8], [18, 28]]\n ]},\n {\"var\": \"relationship_bounds\",\n \"options\": [[2, 3], [1, 2], [1, 3]]}\n ],\n \"before_execute\": \"cp ../durante_etal_2013_study1.txt ./code/\"\n}\n# --- (END)\n\nif __name__ == '__main__':\n # read data file\n df = pd.read_csv('durante_etal_2013_study1.txt', delimiter='\\t')\n\n # remove NA\n df = df.dropna(subset=['rel1', 'rel2', 'rel3'])\n\n # create religiosity score\n df['rel_comp'] = np.around((df.rel1 + df.rel2 + df.rel3) / 3, decimals=2)\n\n # next menstrual onset (nmo) assessment\n df.last_period_start = pd.to_datetime(df.last_period_start)\n df.period_before_last_start = pd.to_datetime(df.period_before_last_start)\n df.date_testing = pd.to_datetime(df.date_testing)\n\n # --- (NMO1)\n # first nmo option: based on computed cycle length\n cl = df.last_period_start - df.period_before_last_start\n next_onset = df.last_period_start + cl\n df['computed_cycle_length'] = (cl / np.timedelta64(1, 'D')).astype(int)\n\n # --- (NMO2)\n # second nmo option: based on reported cycle length\n df = df.dropna(subset=['reported_cycle_length'])\n next_onset = df.last_period_start + df.reported_cycle_length.apply(\n lambda a: pd.Timedelta(days=a))\n\n # --- (ECL1)\n # exclusion based on computed cycle length\n df = df[(df.computed_cycle_length >= 25) & (df.computed_cycle_length <= 35)]\n\n # --- (ECL2)\n # exclusion based on reported cycle length\n df = df[(df.reported_cycle_length >= 25) & (df.reported_cycle_length <= 35)]\n\n # --- (A)\n # compute cycle day\n df['cycle_day'] = pd.Timedelta('28 days') - (next_onset - df.date_testing)\n df.cycle_day = (df.cycle_day / np.timedelta64(1, 'D')).astype(int)\n df.cycle_day = np.clip(df.cycle_day, 1, 28)\n\n # fertility assessment\n high_bounds = {{fertility_bounds}}[0]\n low_bounds1 = {{fertility_bounds}}[1]\n low_bounds2 = {{fertility_bounds}}[2]\n df.loc[(high_bounds[0] <= df.cycle_day) & (df.cycle_day <= high_bounds[1]),\n 'fertility'] = 'High'\n df.loc[(low_bounds1[0] <= df.cycle_day) & (df.cycle_day <= low_bounds1[1]),\n 'fertility'] = 'Low'\n df.loc[(low_bounds2[0] <= df.cycle_day) & (df.cycle_day <= low_bounds2[1]),\n 'fertility'] = 'Low'\n\n # relationship status assessment\n # single = response options 1 and 2; relationship = response options 3 and 4\n df.loc[df.relationship <= {{relationship_bounds}}[0],\n 'relationship_status'] = 'Single'\n df.loc[df.relationship >= {{relationship_bounds}}[1],\n 'relationship_status'] = 'Relationship'\n\n # --- (EC)\n # exclusion based on certainty ratings\n df = df[(df.sure1 >= 6) & (df.sure2 >= 6)]\n\n # --- (B)\n # perform an ANOVA on the processed data set\n lm = smf.ols('rel_comp ~ relationship_status * fertility', data=df).fit()\n table = sm.stats.anova_lm(lm, typ=2)\n print(table)\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "numpy.clip", "numpy.around", "pandas.Timedelta", "numpy.timedelta64" ] ]
yangbang18/video-classification-3d-cnn
[ "47be83a234475dee109d62d448eb441c128b0895" ]
[ "models/resnext.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nfrom functools import partial\n\n__all__ = ['ResNeXt', 'resnet50', 'resnet101']\n\n\ndef conv3x3x3(in_planes, out_planes, stride=1):\n # 3x3x3 convolution with padding\n return nn.Conv3d(in_planes, out_planes, kernel_size=3,\n stride=stride, padding=1, bias=False)\n\n\ndef downsample_basic_block(x, planes, stride):\n out = F.avg_pool3d(x, kernel_size=1, stride=stride)\n zero_pads = torch.Tensor(out.size(0), planes - out.size(1),\n out.size(2), out.size(3),\n out.size(4)).zero_()\n if isinstance(out.data, torch.cuda.FloatTensor):\n zero_pads = zero_pads.cuda()\n\n out = Variable(torch.cat([out.data, zero_pads], dim=1))\n\n return out\n\n\nclass ResNeXtBottleneck(nn.Module):\n expansion = 2\n\n def __init__(self, inplanes, planes, cardinality, stride=1, downsample=None):\n super(ResNeXtBottleneck, self).__init__()\n mid_planes = cardinality * int(planes / 32)\n self.conv1 = nn.Conv3d(inplanes, mid_planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm3d(mid_planes)\n self.conv2 = nn.Conv3d(mid_planes, mid_planes, kernel_size=3, stride=stride,\n padding=1, groups=cardinality, bias=False)\n self.bn2 = nn.BatchNorm3d(mid_planes)\n self.conv3 = nn.Conv3d(mid_planes, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm3d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNeXt(nn.Module):\n\n def __init__(self, block, layers, sample_size, sample_duration, shortcut_type='B', cardinality=32, num_classes=400, last_fc=True):\n self.last_fc = last_fc\n\n self.inplanes = 64\n super(ResNeXt, self).__init__()\n self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2),\n padding=(3, 3, 3), bias=False)\n self.bn1 = nn.BatchNorm3d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)\n self.layer1 = self._make_layer(block, 128, layers[0], shortcut_type, cardinality)\n self.layer2 = self._make_layer(block, 256, layers[1], shortcut_type, cardinality, stride=2)\n self.layer3 = self._make_layer(block, 512, layers[2], shortcut_type, cardinality, stride=2)\n self.layer4 = self._make_layer(block, 1024, layers[3], shortcut_type, cardinality, stride=2)\n last_duration = math.ceil(sample_duration / 16)\n last_size = math.ceil(sample_size / 32)\n self.avgpool = nn.AvgPool3d((last_duration, last_size, last_size), stride=1)\n self.fc = nn.Linear(cardinality * 32 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, shortcut_type, cardinality, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n if shortcut_type == 'A':\n downsample = partial(downsample_basic_block,\n planes=planes * block.expansion,\n stride=stride)\n else:\n downsample = nn.Sequential(\n nn.Conv3d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm3d(planes * block.expansion)\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, cardinality, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, cardinality))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n\n x = x.view(x.size(0), -1)\n if self.last_fc:\n x = self.fc(x)\n\n return x\n\ndef get_fine_tuning_parameters(model, ft_begin_index):\n if ft_begin_index == 0:\n return model.parameters()\n\n ft_module_names = []\n for i in range(ft_begin_index, 5):\n ft_module_names.append('layer{}'.format(ft_begin_index))\n ft_module_names.append('fc')\n\n parameters = []\n for k, v in model.named_parameters():\n for ft_module in ft_module_names:\n if ft_module in k:\n parameters.append({'params': v})\n break\n else:\n parameters.append({'params': v, 'lr': 0.0})\n\n return parameters\n\ndef resnet50(**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)\n return model\n\ndef resnet101(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], **kwargs)\n return model\n\ndef resnet152(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNeXt(ResNeXtBottleneck, [3, 8, 36, 3], **kwargs)\n return model" ]
[ [ "torch.nn.AvgPool3d", "torch.nn.Sequential", "torch.cat", "torch.nn.MaxPool3d", "torch.nn.Conv3d", "torch.nn.Linear", "torch.nn.functional.avg_pool3d", "torch.nn.ReLU", "torch.nn.BatchNorm3d" ] ]
jareducherek/tutor-grad-mlp
[ "5998137fcb0a8ec1c76b6f98cb74f8b2039f3426" ]
[ "mlp.py" ]
[ "import numpy as np\n\nclass DenseLayer:\n def __init__(\n self, \n n_units, \n input_size=None, \n activation=None, \n name=None):\n self.n_units = n_units\n self.input_size = input_size\n self.W = None\n self.name = name\n self.A = None\n self.activation = activation\n self.fn, self.df = self._select_activation_fn(activation) \n\n def init_weights(self):\n np.random.seed(42)\n self.W = (np.random.randn(self.n_units, self.input_size + 1) - 0.5) * 2 / self.input_size\n\n def __call__(self, X):\n m_examples = X.shape[0]\n X_extended = np.hstack([np.ones((m_examples, 1)), X])\n Z = np.einsum('bi,oi->bo', X_extended, self.W)\n A = self.fn(Z)\n self.A = A\n return A\n \n def _select_activation_fn(self, activation):\n if activation == 'relu':\n fn = lambda x: np.where(x < 0, 0.0, x)\n df = lambda x: np.where(x < 0, 0.0, 1.0)\n elif activation == 'sigmoid':\n fn = lambda x: 1 / (1 + np.exp(-x))\n df = lambda x: x * (1 - x)\n elif activation == 'tanh':\n fn = lambda x: (np.exp(x) - np.exp(-1)) / (np.exp(x) + np.exp(-x))\n df = lambda x: 1 - x**2\n elif activation == 'softmax':\n fn = lambda x: np.exp(x) / np.sum(np.exp(x), axis=1).reshape(-1, 1)\n df = lambda x: np.apply_along_axis(np.diagflat, axis=1, arr=x) - np.einsum('ij,ik->ijk', x, x)\n elif activation is None:\n fn = lambda x: x\n df = lambda x: 1.0\n else:\n NotImplementedError(f\"Function {activation} cannot be used.\")\n return fn, df\n\n \nclass SequentialModel:\n def __init__(self, layers, lr=0.01, momentum=0.8, loss='mse'):\n input_size = layers[0].n_units\n layers[0].init_weights()\n for layer in layers[1:]:\n layer.input_size = input_size\n input_size = layer.n_units\n layer.init_weights()\n self.layers = layers\n self.lr = lr\n self.momentum = momentum\n self.prev_dWs = {}\n self.loss_fns = self._select_loss_function(loss)\n\n def __repr__(self):\n return f\"SequentialModel n_layer: {len(self.layers)}\"\n\n def forward(self, X):\n out = self.layers[0](X)\n for layer in self.layers[1:]:\n out = layer(out)\n return out\n\n def _select_loss_function(self, loss):\n if loss == 'mse':\n fn = lambda y_pred, y_true: np.sum(0.5*(y_pred - y_true) ** 2) / (2*y_true.shape[0])\n df = lambda y_pred, y_true: y_pred - y_true\n else:\n raise NotImplementedError(f\"Function {activation} cannot be used.\")\n return fn, df\n \n def loss(self, y_pred, y_true):\n return self.loss_fns[0](y_pred, y_true)\n \n def loss_grad(self, y_pred, y_true):\n return self.loss_fns[1](y_pred, y_true)\n \n def _extend(self, vec):\n return np.hstack([np.ones((vec.shape[0], 1)), vec])\n\n def backprop(self, delta, layer_W, layer_a):\n da = self.layers[layer_a].df(self.layers[layer_a].A) # the derivative of the activation fn\n if len(da.shape) > 2:\n raise NotImplementedError(f\"Function {self.layers[layer_a].activation} not implemented for backprop currently.\")\n return np.einsum('bz,za,ba->ba', delta, self.layers[layer_W].W[..., 1:], da)\n \n def backward(self, X, y_pred, y_true):\n # backprop through loss\n delta = self.loss_grad(y_pred, y_true)\n # backprop through last activation\n delta = np.einsum('ba,baz->bz', delta, self.layers[-1].df(self.layers[-1].A))\n\n dWs = {}\n # begin backprop loop. \n # a: activated neurons before weight i\n # delta: backprop difference term of preactivated neurons after weight i\n for i in range(-1, -len(self.layers), -1):\n a = self.layers[i-1].A\n dWs[i] = np.einsum('ba,bz->za', self._extend(a), delta) / delta.shape[0]\n delta = self.backprop(delta, i, i-1)\n\n # final update using input X\n dWs[-len(self.layers)] = np.einsum('bi,bz->zi', self._extend(X), delta)\n\n # update all weights\n for k, dW in dWs.items():\n self.layers[k].W = self.layers[k].W - self.lr*(dW + self.momentum*self.prev_dWs.get(k, 0))\n \n # update previous updates for momentum term\n self.prev_dWs = dWs\n \n def train(self, X, y, iters):\n for i in range(iters):\n y_pred = self.forward(X)\n if (i%100 == 0):\n print(\"Iteration: \", i, \"Loss: \", self.loss(y_pred, y))\n self.backward(X, y_pred, y)\n \n def evaluate(self, X, y):\n \"\"\"\n this method is to evaluate our model on unseen samples\n it computes the confusion matrix and the accuracy\n\n X is a numpy array of shape (num_train, D) containing the testing images\n consisting of num_train samples each of dimension D.\n y is a numpy array of shape (num_train, D) containing the testing labels\n consisting of num_train samples each of dimension D.\n \"\"\"\n outputs = self.forward(X)\n nclasses = np.shape(y)[1]\n\n # 1-of-N encoding\n outputs = np.argmax(outputs, 1)\n targets = np.argmax(y, 1)\n\n cm = np.zeros((nclasses, nclasses))\n for i in range(nclasses):\n for j in range(nclasses):\n cm[i, j] = np.sum(np.where(outputs == i, 1, 0) * np.where(targets == j, 1, 0))\n\n print(\"Confusion Matrix:\")\n print(cm)\n print(f\"Accuracy: {np.trace(cm) / np.sum(cm) * 100:0.4f}\")\n\n return cm" ]
[ [ "numpy.sum", "numpy.einsum", "numpy.random.seed", "numpy.trace", "numpy.ones", "numpy.argmax", "numpy.shape", "numpy.random.randn", "numpy.apply_along_axis", "numpy.exp", "numpy.zeros", "numpy.where" ] ]
zlpure/cs224w
[ "03fc4d179e430454632e1eeaf457626b3ba18a4e" ]
[ "hw1-bundle/q2.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 7 18:34:38 2020\n\n@author: zlpure\n\"\"\"\nimport snap\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef calcFeaturesNode(Node, Graph):\n \"\"\"\n return: 1. the degree of v, i.e., deg(v);\n 2. the number of edges in the egonet of v, where egonet of v is defined as the subgraph of G\n induced by v and its neighborhood;\n 3. the number of edges that connect the egonet of v and the rest of the graph, i.e., the number\n of edges that enter or leave the egonet of v.\n \"\"\"\n feature = snap.TIntV()\n \n feature_1 = Node.GetOutDeg()\n \n neighbor_list = list(Node.GetOutEdges()) #注意:NI.GetOutEdges()是返回节点邻居的序号!\n NIdV = snap.TIntV()\n for item in neighbor_list:\n NIdV.Add(item)\n NIdV.Add(Node.GetId())\n SubGraph = snap.GetSubGraph(Graph, NIdV) #注意:subgraph没有算半条边!\n feature_2 = snap.CntUniqUndirEdges(SubGraph)\n \n Graph_copy = snap.ConvertGraph(type(Graph), Graph) ##复制Graph\n snap.DelNodes(Graph_copy, NIdV)\n feature_3 = Graph.GetEdges() - snap.CntUniqUndirEdges(SubGraph) - snap.CntUniqUndirEdges(Graph_copy)\n \n feature.Add(feature_1)\n feature.Add(feature_2)\n feature.Add(feature_3)\n \n return feature\n\n\ndef calcCosSimilarity(feature_u, feature_v):\n xy, xx, yy = 0, 0, 0\n for i in range(feature_u.Len()):\n xy += feature_u[i] * feature_v[i]\n xx += feature_u[i] ** 2\n yy += feature_v[i] ** 2\n \n if xx == 0. or yy == 0.:\n return 0. \n return xy / (np.sqrt(xx) * np.sqrt(yy))\n\n\n\ndef calcTopkSimilarity(Node, Graph, k=5):\n res = snap.TIntV()\n dict_sim = {}\n feature_u = calcFeaturesNode(Node, Graph)\n for NI in Graph.Nodes():\n feature_v = calcFeaturesNode(NI, Graph)\n sim = calcCosSimilarity(feature_u, feature_v)\n dict_sim[NI.GetId()] = sim\n \n res_list = sorted(dict_sim.items(),key = lambda x:x[1],reverse = True)[1:k+1]\n res_list = list(zip(*res_list))[0]\n print (res_list)\n \n for item in res_list:\n res.Add(item)\n \n return res\n \n\ndef calcRecursiveFeaturesNode(Node, Graph, k=2):\n if k <= 0:\n return np.array(list(calcFeaturesNode(Node, Graph)))\n \n feature = np.zeros(3**k,)\n for NI in Node.GetOutEdges():\n feature_1 = calcRecursiveFeaturesNode(Graph.GetNI(NI), Graph, k-1)\n feature += feature_1\n \n return np.concatenate((np.array(list(calcRecursiveFeaturesNode(Node, Graph, k-1))), feature / feature.shape[0], feature))\n\n\n\ndef calcTopkRecursiveSimilarity(Node, Graph, k=5):\n res = snap.TIntV()\n dict_sim = {}\n feature_u = calcRecursiveFeaturesNode(Node, Graph)\n for NI in Graph.Nodes():\n feature_v = calcRecursiveFeaturesNode(NI, Graph)\n if np.linalg.norm(feature_u)==0 or np.linalg.norm(feature_v)==0:\n sim = 0.\n else:\n sim = np.dot(feature_u, feature_v.T)/np.linalg.norm(feature_u)/np.linalg.norm(feature_v)\n dict_sim[NI.GetId()] = sim\n \n res_list = sorted(np.nan_to_num(list(dict_sim.items())),key = lambda x:x[1],reverse = True)[1:k+1]\n ##去掉Nan, np.nan_to_num()使用0代替数组x中的nan元素,使用有限的数字代替inf元素\n res_list = list(zip(*res_list))[0]\n res_list = list(map(int, res_list))\n print (res_list)\n \n for item in res_list:\n res.Add(item)\n \n return res\n\n\ndef getSubgraph(Node, Graph, hop=3):\n NodeVecAll = snap.TIntV()\n \n for i in range(1, hop+1): \n NodeVec = snap.TIntV()\n snap.GetNodesAtHop(Graph, Node.GetId(), i, NodeVec, False)\n for item in NodeVec:\n NodeVecAll.Add(item)\n NodeVecAll.Add(Node.GetId())\n \n SubGraph = snap.GetSubGraph(Graph, NodeVecAll)\n return SubGraph\n\n\n" ]
[ [ "numpy.dot", "numpy.zeros", "numpy.sqrt", "numpy.linalg.norm" ] ]
jmhayesesq/Open-Chem
[ "e612d5cd471079c64e61ceda946c3dc7cf095bd8" ]
[ "openchem/models/vanilla_model.py" ]
[ "from __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\n\nfrom sklearn.ensemble import RandomForestRegressor as RFR\nfrom sklearn.ensemble import RandomForestClassifier as RFC\nfrom sklearn.svm import SVC\nfrom sklearn.svm import SVR\nfrom sklearn.externals import joblib\nfrom sklearn import metrics\n\nfrom data import get_fp, get_desc, normalize_desc, cross_validation_split\n\nfrom mordred import Calculator, descriptors\n\n\nclass RandomForestQSAR(object):\n def __init__(self, model_type='classifier', feature_type='fingerprints', n_estimators=100, n_ensemble=5):\n super(RandomForestQSAR, self).__init__()\n self.n_estimators = n_estimators\n self.n_ensemble = n_ensemble\n self.model = []\n self.model_type = model_type\n if self.model_type == 'classifier':\n for i in range(n_ensemble):\n self.model.append(RFC(n_estimators=n_estimators))\n elif self.model_type == 'regressor':\n for i in range(n_ensemble):\n self.model.append(RFR(n_estimators=n_estimators))\n else:\n raise ValueError('invalid value for argument')\n self.feature_type = feature_type\n if self.feature_type == 'descriptors':\n self.calc = Calculator(descriptors, ignore_3D=True)\n self.desc_mean = [0] * self.n_ensemble\n\n def load_model(self, path):\n self.model = []\n for i in range(self.n_ensemble):\n m = joblib.load(path + str(i) + '.pkl')\n self.model.append(m)\n if self.feature_type == 'descriptors':\n arr = np.load(path + 'desc_mean.npy', 'rb')\n self.desc_mean = arr\n\n def save_model(self, path):\n assert self.n_ensemble == len(self.model)\n for i in range(self.n_ensemble):\n joblib.dump(self.model[i], path + str(i) + '.pkl')\n if self.feature_type == 'descriptors':\n np.save(path + 'desc_mean.npy', self.desc_mean)\n\n def fit_model(self, data):\n eval_metrics = []\n if self.feature_type == 'fingerprints':\n fps = get_fp(data.smiles)\n elif self.feature_type == 'descriptors':\n fps, _, _ = get_desc(data.smiles, self.calc)\n if self.model_type == 'classifier':\n cross_val_data, cross_val_labels = \\\n cross_validation_split(fps, data.binary_labels)\n elif self.model_type == 'regressor':\n cross_val_data, cross_val_labels = \\\n cross_validation_split(fps, data.property)\n for i in range(self.n_ensemble):\n train_sm = np.concatenate(cross_val_data[:i] + cross_val_data[(i + 1):])\n test_sm = cross_val_data[i]\n train_labels = np.concatenate(cross_val_labels[:i] + cross_val_labels[(i + 1):])\n test_labels = cross_val_labels[i]\n if self.feature_type == 'descriptors':\n train_sm, desc_mean = normalize_desc(train_sm)\n self.desc_mean[i] = desc_mean\n test_sm, _ = normalize_desc(test_sm, desc_mean)\n self.model[i].fit(train_sm, train_labels.ravel())\n predicted = self.model[i].predict(test_sm)\n if self.model_type == 'classifier':\n fpr, tpr, thresholds = metrics.roc_curve(test_labels, predicted)\n eval_metrics.append(metrics.auc(fpr, tpr))\n metrics_type = 'AUC'\n elif self.model_type == 'regressor':\n r2 = metrics.r2_score(test_labels, predicted)\n eval_metrics.append(r2)\n metrics_type = 'R^2 score'\n\n return eval_metrics, metrics_type\n\n def predict(self, smiles, average=True):\n if self.feature_type == 'fingerprints':\n fps = get_fp(smiles)\n assert len(smiles) == len(fps)\n clean_smiles = []\n clean_fps = []\n nan_smiles = []\n for i in range(len(fps)):\n if np.isnan(sum(fps[i])):\n nan_smiles.append(smiles[i])\n else:\n clean_smiles.append(smiles[i])\n clean_fps.append(fps[i])\n clean_fps = np.array(clean_fps)\n elif self.feature_type == 'descriptors':\n clean_fps, clean_smiles, nan_smiles = get_desc(smiles, self.calc)\n prediction = []\n if len(clean_fps) > 0:\n for i in range(self.n_ensemble):\n m = self.model[i]\n if self.feature_type == 'descriptors':\n clean_fps, _ = normalize_desc(clean_fps, self.desc_mean[i])\n prediction.append(m.predict(clean_fps))\n prediction = np.array(prediction)\n if average:\n prediction = prediction.mean(axis=0)\n assert len(clean_smiles) == len(prediction)\n\n return clean_smiles, prediction, nan_smiles\n\n\nclass SVMQSAR(object):\n def __init__(self, model_type='classifier', n_ensemble=5):\n super(SVMQSAR, self).__init__()\n self.n_ensemble = n_ensemble\n self.model = []\n self.model_type = model_type\n if self.model_type == 'classifier':\n for i in range(n_ensemble):\n self.model.append(SVC())\n elif self.model_type == 'regressor':\n for i in range(n_ensemble):\n self.model.append(SVR())\n else:\n raise ValueError('invalid value for argument')\n\n def load_model(self, path):\n self.model = []\n for i in range(self.n_ensemble):\n m = joblib.load(path + str(i) + '.pkl')\n self.model.append(m)\n\n def save_model(self, path):\n assert self.n_ensemble == len(self.model)\n for i in range(self.n_ensemble):\n joblib.dump(self.model[i], path + str(i) + '.pkl')\n\n def fit_model(self, data, cross_val_data, cross_val_labels):\n eval_metrics = []\n for i in range(self.n_ensemble):\n train_sm = np.concatenate(cross_val_data[:i] + cross_val_data[(i + 1):])\n test_sm = cross_val_data[i]\n train_labels = np.concatenate(cross_val_labels[:i] + cross_val_labels[(i + 1):])\n test_labels = cross_val_labels[i]\n fp_train = get_fp(train_sm)\n fp_test = get_fp(test_sm)\n self.model[i].fit(fp_train, train_labels.ravel())\n predicted = self.model[i].predict(fp_test)\n if self.model_type == 'classifier':\n fpr, tpr, thresholds = metrics.roc_curve(test_labels, predicted)\n eval_metrics.append(metrics.auc(fpr, tpr))\n metrics_type = 'AUC'\n elif self.model_type == 'regressor':\n r2 = metrics.r2_score(test_labels, predicted)\n eval_metrics.append(r2)\n metrics_type = 'R^2 score'\n return eval_metrics, metrics_type\n\n def predict(self, smiles, average=True):\n fps = get_fp(smiles)\n assert len(smiles) == len(fps)\n clean_smiles = []\n clean_fps = []\n nan_smiles = []\n for i in range(len(fps)):\n if np.isnan(sum(fps[i])):\n nan_smiles.append(smiles[i])\n else:\n clean_smiles.append(smiles[i])\n clean_fps.append(fps[i])\n clean_fps = np.array(clean_fps)\n prediction = []\n if len(clean_fps) > 0:\n for m in self.model:\n prediction.append(m.predict(clean_fps))\n prediction = np.array(prediction)\n if average:\n prediction = prediction.mean(axis=0)\n assert len(clean_smiles) == len(prediction)\n return clean_smiles, prediction, nan_smiles\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "sklearn.metrics.r2_score", "sklearn.ensemble.RandomForestClassifier", "sklearn.metrics.auc", "sklearn.metrics.roc_curve", "numpy.save", "numpy.concatenate", "sklearn.svm.SVR", "sklearn.svm.SVC", "numpy.load", "numpy.array" ] ]
avr248/Serving
[ "bd12b303d3e490278dd94461fa8f70dc24c81ec0" ]
[ "examples/Pipeline/PaddleDetection/faster_rcnn/web_service.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom paddle_serving_server.web_service import WebService, Op\nimport logging\nimport numpy as np\nimport sys\nimport cv2\nfrom paddle_serving_app.reader import *\nimport base64\n\n\nclass FasterRCNNOp(Op):\n def init_op(self):\n self.img_preprocess = Sequential([\n BGR2RGB(), Div(255.0),\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], False),\n Resize(640, 640), Transpose((2, 0, 1))\n ])\n self.img_postprocess = RCNNPostprocess(\"label_list.txt\", \"output\")\n\n def preprocess(self, input_dicts, data_id, log_id):\n (_, input_dict), = input_dicts.items()\n imgs = []\n #print(\"keys\", input_dict.keys())\n for key in input_dict.keys():\n data = base64.b64decode(input_dict[key].encode('utf8'))\n data = np.fromstring(data, np.uint8)\n im = cv2.imdecode(data, cv2.IMREAD_COLOR)\n im = self.img_preprocess(im)\n imgs.append({\n \"image\": im[np.newaxis, :],\n \"im_shape\":\n np.array(list(im.shape[1:])).reshape(-1)[np.newaxis, :],\n \"scale_factor\": np.array([1.0, 1.0]).reshape(-1)[np.newaxis, :],\n })\n feed_dict = {\n \"image\": np.concatenate(\n [x[\"image\"] for x in imgs], axis=0),\n \"im_shape\": np.concatenate(\n [x[\"im_shape\"] for x in imgs], axis=0),\n \"scale_factor\": np.concatenate(\n [x[\"scale_factor\"] for x in imgs], axis=0)\n }\n #for key in feed_dict.keys():\n # print(key, feed_dict[key].shape)\n return feed_dict, False, None, \"\"\n\n def postprocess(self, input_dicts, fetch_dict, data_id, log_id):\n #print(fetch_dict)\n res_dict = {\n \"bbox_result\":\n str(self.img_postprocess(\n fetch_dict, visualize=False))\n }\n return res_dict, None, \"\"\n\n\nclass FasterRCNNService(WebService):\n def get_pipeline_response(self, read_op):\n faster_rcnn_op = FasterRCNNOp(name=\"faster_rcnn\", input_ops=[read_op])\n return faster_rcnn_op\n\n\nfasterrcnn_service = FasterRCNNService(name=\"faster_rcnn\")\nfasterrcnn_service.prepare_pipeline_config(\"config.yml\")\nfasterrcnn_service.run_service()\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.fromstring" ] ]
AmarisAI/tia
[ "a7043b6383e557aeea8fc7112bbffd6e36a230e9" ]
[ "tia/util/mplot.py" ]
[ "\"\"\"\nCommon matplotlib utilities\n\"\"\"\nimport uuid\nimport os\n\nfrom matplotlib.ticker import FuncFormatter\nfrom matplotlib.dates import DateFormatter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas\n\nimport tia.util.fmt as fmt\nfrom tia.util.decorator import DeferredExecutionMixin\n\n\nclass _CustomDateFormatter(DateFormatter):\n \"\"\"Extend so I can use with pandas Period objects \"\"\"\n\n def __call__(self, x, pos=0):\n if not hasattr(x, 'strftime'):\n x = pandas.to_datetime(x)\n x = x.strftime(self.fmt)\n return x\n\n\nclass _AxisFormat(DeferredExecutionMixin):\n def __init__(self, parent):\n super(_AxisFormat, self).__init__()\n self.parent = parent\n\n @property\n def X(self):\n \"\"\"Provide ability for user to switch from X to Y and vice versa\"\"\"\n return self.parent.X\n\n @property\n def Y(self):\n \"\"\"Provide ability for user to switch from X to Y and vice versa\"\"\"\n return self.parent.Y\n\n @property\n def axes(self):\n return self.parent.axes\n\n def percent(self, precision=2):\n fct = fmt.new_percent_formatter(precision=precision)\n wrapper = lambda x, pos: fct(x)\n self.axis.set_major_formatter(FuncFormatter(wrapper))\n return self\n\n def thousands(self, precision=1):\n fct = fmt.new_thousands_formatter(precision=precision)\n wrapper = lambda x, pos: fct(x)\n self.axis.set_major_formatter(FuncFormatter(wrapper))\n return self\n\n def millions(self, precision=1):\n fct = fmt.new_millions_formatter(precision=precision)\n wrapper = lambda x, pos: fct(x)\n self.axis.set_major_formatter(FuncFormatter(wrapper))\n return self\n\n def date(self, fmt='%Y-%m-%d'):\n fmtfct = DateFormatter(fmt)\n self.axis.set_major_formatter(fmtfct)\n return self\n\n def apply_format(self, fmtfct=lambda x: x):\n wrapper = lambda x, pos: fmtfct(x)\n self.axis.set_major_formatter(FuncFormatter(wrapper))\n return self\n\n def apply(self, axes=None):\n self.parent.apply(axes=axes)\n\n\nclass _YAxisFormat(_AxisFormat):\n @property\n def axis(self):\n return self.axes.yaxis\n\n def rotate(self, rot=40, ha='right'):\n rotate_labels(self.axes, which='y', rot=rot, ha=ha)\n return self\n\n def label(self, txt, **kwargs):\n self.axes.set_ylabel(txt, **kwargs)\n return self\n\n\nclass _XAxisFormat(_AxisFormat):\n @property\n def axis(self):\n return self.axes.xaxis\n\n def rotate(self, rot=40, ha='right'):\n rotate_labels(self.axes, which='x', rot=rot, ha=ha)\n return self\n\n def label(self, txt, **kwargs):\n self.axes.set_xlabel(txt, **kwargs)\n return self\n\n\nclass AxesFormat(DeferredExecutionMixin):\n def __init__(self):\n super(AxesFormat, self).__init__()\n self.X = _XAxisFormat(self)\n self.Y = _YAxisFormat(self)\n self.axes = None\n\n def apply(self, axes=None):\n self.axes = axes or plt.gca()\n self.X()\n self.Y()\n self()\n\n def tight_layout(self, pad=1.08, h_pad=None, w_pad=None, rect=None):\n plt.tight_layout(pad, h_pad, w_pad, rect)\n return self\n\n\nclass FigureHelper(object):\n def __init__(self, basedir=None, ext='.pdf', dpi=None):\n if not basedir:\n import tempfile\n\n basedir = tempfile.gettempdir()\n self.basedir = basedir\n self.last = None\n self.ext = ext\n self.fnmap = {}\n\n self.ax = None\n self.axiter = None\n self.figure = None\n self.dpi = dpi or 100\n\n def keys(self):\n return self.fnmap.keys()\n\n def next_ax(self):\n self.ax = self.axiter.next()\n return self.ax\n\n def __getitem__(self, item):\n return self.fnmap[item]\n\n def savefig(self, fn=None, dpi=None, clear=1, ext=None, key=None):\n ext = ext or self.ext\n ext = ext.startswith('.') and ext or '.' + ext\n fn = fn or uuid.uuid1()\n key = key or ''\n fn = '%s%s%s' % (key, fn, ext)\n fn = os.path.join(self.basedir, fn)\n\n figure = self.figure\n use_plt = 0\n if figure is None:\n figure = plt.gcf()\n use_plt = 1\n\n figure.savefig(fn, dpi=dpi or self.dpi)\n if clear:\n use_plt and plt.close() or figure.clf()\n if key:\n self.fnmap[key] = fn\n self.last = fn\n return fn\n\n def subplots(self, *params, **kwargs):\n f, ax = plt.subplots(*params, **kwargs)\n\n def axes_iter(axes):\n if not hasattr(axes, '__iter__'):\n return iter(list([axes]))\n else:\n if not hasattr(axes[0], '__iter__'):\n return iter(axes)\n else:\n # array of arrays\n return iter([y for x in axes for y in x])\n\n self.axiter = axes_iter(ax)\n self.figure = f\n return self.next_ax()\n\n\ndef rotate_labels(ax, which='x', rot=40, ha='right'):\n which = which.upper()\n\n def _apply(lbls):\n for lbl in lbls:\n lbl.set_ha(ha)\n lbl.set_rotation(rot)\n\n 'X' in which and _apply(ax.get_xticklabels())\n 'Y' in which and _apply(ax.get_yticklabels())\n\n\nclass GridHelper(object):\n @staticmethod\n def build(numobjs, ncols, **subplot_kwargs):\n nrows = int(np.ceil(float(numobjs) / float(ncols)))\n fig, axes = plt.subplots(nrows=nrows, ncols=ncols, **subplot_kwargs)\n\n if nrows == 1:\n axes = [axes]\n if ncols == 1:\n axes = [[ax] for ax in axes]\n return GridHelper(axes, nrows, ncols, fig=fig)\n\n def __init__(self, axarr, nrows, ncols, fig=None):\n self.axarr = axarr\n self.nrows = nrows\n self.ncols = ncols\n self.fig = fig\n\n def __iter__(self):\n import itertools\n\n flat = list(itertools.chain.from_iterable(self.axarr))\n return iter(flat)\n\n def get_axes(self, idx):\n \"\"\" Allow for simple indexing \"\"\"\n cidx = 0\n if idx > 0:\n cidx = idx % self.ncols\n ridx = idx / self.ncols\n return self.axarr[ridx][cidx]\n\n def get_last_row(self):\n return self.axarr[self.nrows - 1]\n\n def get_first_col(self):\n \"\"\" Return the array of Axes objects for the first column \"\"\"\n return [ax[0] for ax in self.axarr]\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.dates.DateFormatter", "matplotlib.pyplot.tight_layout", "pandas.to_datetime", "matplotlib.pyplot.subplots", "matplotlib.pyplot.gcf", "matplotlib.pyplot.close", "matplotlib.ticker.FuncFormatter" ] ]
Jokerming-MJ/CDLab
[ "bfe1262f14dea2d386108e949b7987a7d21d421f" ]
[ "src/models/siamunet_diff.py" ]
[ "# Implementation of\n# Daudt, R. C., Le Saux, B., & Boulch, A. \"Fully convolutional siamese networks for change detection\". In 2018 25th IEEE International Conference on Image Processing (ICIP) (pp. 4063-4067). IEEE.\n\n# Adapted from https://github.com/rcdaudt/fully_convolutional_change_detection/blob/master/siamunet_diff.py\n\n## Original head information\n# Rodrigo Caye Daudt\n# https://rcdaudt.github.io/\n# Daudt, R. C., Le Saux, B., & Boulch, A. \"Fully convolutional siamese networks for change detection\". In 2018 25th IEEE International Conference on Image Processing (ICIP) (pp. 4063-4067). IEEE.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ._blocks import Conv3x3, MaxPool2x2, ConvTransposed3x3\nfrom ._utils import Identity\n\n\nclass SiamUNet_diff(nn.Module):\n def __init__(self, in_ch, out_ch, use_dropout=False):\n super().__init__()\n\n self.use_dropout = use_dropout\n\n self.conv11 = Conv3x3(in_ch, 16, norm=True, act=True)\n self.do11 = self.make_dropout()\n self.conv12 = Conv3x3(16, 16, norm=True, act=True)\n self.do12 = self.make_dropout()\n self.pool1 = MaxPool2x2()\n\n self.conv21 = Conv3x3(16, 32, norm=True, act=True)\n self.do21 = self.make_dropout()\n self.conv22 = Conv3x3(32, 32, norm=True, act=True)\n self.do22 = self.make_dropout()\n self.pool2 = MaxPool2x2()\n\n self.conv31 = Conv3x3(32, 64, norm=True, act=True)\n self.do31 = self.make_dropout()\n self.conv32 = Conv3x3(64, 64, norm=True, act=True)\n self.do32 = self.make_dropout()\n self.conv33 = Conv3x3(64, 64, norm=True, act=True)\n self.do33 = self.make_dropout()\n self.pool3 = MaxPool2x2()\n\n self.conv41 = Conv3x3(64, 128, norm=True, act=True)\n self.do41 = self.make_dropout()\n self.conv42 = Conv3x3(128, 128, norm=True, act=True)\n self.do42 = self.make_dropout()\n self.conv43 = Conv3x3(128, 128, norm=True, act=True)\n self.do43 = self.make_dropout()\n self.pool4 = MaxPool2x2()\n\n self.upconv4 = ConvTransposed3x3(128, 128, output_padding=1)\n\n self.conv43d = Conv3x3(256, 128, norm=True, act=True)\n self.do43d = self.make_dropout()\n self.conv42d = Conv3x3(128, 128, norm=True, act=True)\n self.do42d = self.make_dropout()\n self.conv41d = Conv3x3(128, 64, norm=True, act=True)\n self.do41d = self.make_dropout()\n\n self.upconv3 = ConvTransposed3x3(64, 64, output_padding=1)\n\n self.conv33d = Conv3x3(128, 64, norm=True, act=True)\n self.do33d = self.make_dropout()\n self.conv32d = Conv3x3(64, 64, norm=True, act=True)\n self.do32d = self.make_dropout()\n self.conv31d = Conv3x3(64, 32, norm=True, act=True)\n self.do31d = self.make_dropout()\n\n self.upconv2 = ConvTransposed3x3(32, 32, output_padding=1)\n\n self.conv22d = Conv3x3(64, 32, norm=True, act=True)\n self.do22d = self.make_dropout()\n self.conv21d = Conv3x3(32, 16, norm=True, act=True)\n self.do21d = self.make_dropout()\n\n self.upconv1 = ConvTransposed3x3(16, 16, output_padding=1)\n\n self.conv12d = Conv3x3(32, 16, norm=True, act=True)\n self.do12d = self.make_dropout()\n self.conv11d = Conv3x3(16, out_ch)\n\n def forward(self, t1, t2):\n # Encode 1\n # Stage 1\n x11 = self.do11(self.conv11(t1))\n x12_1 = self.do12(self.conv12(x11))\n x1p = self.pool1(x12_1)\n\n # Stage 2\n x21 = self.do21(self.conv21(x1p))\n x22_1 = self.do22(self.conv22(x21))\n x2p = self.pool2(x22_1)\n\n # Stage 3\n x31 = self.do31(self.conv31(x2p))\n x32 = self.do32(self.conv32(x31))\n x33_1 = self.do33(self.conv33(x32))\n x3p = self.pool3(x33_1)\n\n # Stage 4\n x41 = self.do41(self.conv41(x3p))\n x42 = self.do42(self.conv42(x41))\n x43_1 = self.do43(self.conv43(x42))\n x4p = self.pool4(x43_1)\n\n # Encode 2\n # Stage 1\n x11 = self.do11(self.conv11(t2))\n x12_2 = self.do12(self.conv12(x11))\n x1p = self.pool1(x12_2)\n\n # Stage 2\n x21 = self.do21(self.conv21(x1p))\n x22_2 = self.do22(self.conv22(x21))\n x2p = self.pool2(x22_2)\n\n # Stage 3\n x31 = self.do31(self.conv31(x2p))\n x32 = self.do32(self.conv32(x31))\n x33_2 = self.do33(self.conv33(x32))\n x3p = self.pool3(x33_2)\n\n # Stage 4\n x41 = self.do41(self.conv41(x3p))\n x42 = self.do42(self.conv42(x41))\n x43_2 = self.do43(self.conv43(x42))\n x4p = self.pool4(x43_2)\n \n # Decode\n # Stage 4d\n x4d = self.upconv4(x4p)\n pad4 = (0, x43_1.shape[3]-x4d.shape[3], 0, x43_1.shape[2]-x4d.shape[2])\n x4d = torch.cat([F.pad(x4d, pad=pad4, mode='replicate'), torch.abs(x43_1-x43_2)], 1)\n x43d = self.do43d(self.conv43d(x4d))\n x42d = self.do42d(self.conv42d(x43d))\n x41d = self.do41d(self.conv41d(x42d))\n\n # Stage 3d\n x3d = self.upconv3(x41d)\n pad3 = (0, x33_1.shape[3]-x3d.shape[3], 0, x33_1.shape[2]-x3d.shape[2])\n x3d = torch.cat([F.pad(x3d, pad=pad3, mode='replicate'), torch.abs(x33_1-x33_2)], 1)\n x33d = self.do33d(self.conv33d(x3d))\n x32d = self.do32d(self.conv32d(x33d))\n x31d = self.do31d(self.conv31d(x32d))\n\n # Stage 2d\n x2d = self.upconv2(x31d)\n pad2 = (0, x22_1.shape[3]-x2d.shape[3], 0, x22_1.shape[2]-x2d.shape[2])\n x2d = torch.cat([F.pad(x2d, pad=pad2, mode='replicate'), torch.abs(x22_1-x22_2)], 1)\n x22d = self.do22d(self.conv22d(x2d))\n x21d = self.do21d(self.conv21d(x22d))\n\n # Stage 1d\n x1d = self.upconv1(x21d)\n pad1 = (0, x12_1.shape[3]-x1d.shape[3], 0, x12_1.shape[2]-x1d.shape[2])\n x1d = torch.cat([F.pad(x1d, pad=pad1, mode='replicate'), torch.abs(x12_1-x12_2)], 1)\n x12d = self.do12d(self.conv12d(x1d))\n x11d = self.conv11d(x12d)\n\n return x11d\n\n def make_dropout(self):\n if self.use_dropout:\n return nn.Dropout2d(p=0.2)\n else:\n return Identity()" ]
[ [ "torch.abs", "torch.nn.Dropout2d", "torch.nn.functional.pad" ] ]
IcarusWizard/Deep-Generative-Models
[ "4117c11ad944bdeff106a80adbb3642a076af64e" ]
[ "degmo/gan/wgan.py" ]
[ "import torch\nfrom torch.functional import F\nimport numpy as np\n\nfrom .modules import ConvGenerator, ConvDiscriminator, MLPGenerator, MLPDiscriminator\nfrom .utils import weights_init\nfrom .trainer import AdversarialTrainer\n\nclass WGAN(torch.nn.Module):\n def __init__(self, c, h, w, mode, latent_dim,\n discriminator_features, discriminator_hidden_layers,\n generator_features, generator_hidden_layers,\n use_norm_discriminator, use_norm_generator):\n super().__init__()\n\n self.input_size = (c, h, w)\n self.latent_dim = latent_dim\n\n if mode == 'mlp':\n self.generator = MLPGenerator(c, h, w, latent_dim, generator_features, generator_hidden_layers)\n\n self.discriminator = MLPDiscriminator(c, h, w, discriminator_features, discriminator_hidden_layers)\n \n elif mode == 'conv':\n assert h == w, \"only support square images\"\n\n self.generator = ConvGenerator(c, h, w, latent_dim, generator_features, use_norm_generator)\n\n self.discriminator = ConvDiscriminator(c, h, w, discriminator_features, use_norm_discriminator)\n\n self.generator.generator.apply(weights_init)\n self.discriminator.discriminator.apply(weights_init)\n\n def generate(self, samples):\n device = next(self.parameters()).device\n dtype = next(self.parameters()).dtype\n\n z = torch.randn(samples, self.latent_dim, dtype=dtype, device=device)\n\n return self.generator(z)\n\n def discriminate(self, x):\n return self.discriminator(x)\n\n def z2x(self, z):\n return self.generator(z)\n\n def get_discriminator_loss(self, real, fake):\n return torch.mean(self.discriminate(real)) - torch.mean(self.discriminate(fake))\n\n def get_generator_loss(self, fake):\n return torch.mean(self.discriminate(fake))\n\n def get_trainer(self):\n return AdversarialTrainer" ]
[ [ "torch.randn" ] ]
jni/gala
[ "975ed783a6cb3c0afe24a921afdacf2f27184fcf" ]
[ "gala/features/convex_hull.py" ]
[ "# python standard library\nimport logging\nimport itertools as it\n\n# numpy/scipy\nimport numpy as np\nfrom scipy import ndimage as nd\nfrom scipy.misc import factorial\nfrom numpy.linalg import det\ntry:\n from scipy.spatial import Delaunay\nexcept ImportError:\n logging.warning('Unable to load scipy.spatial.Delaunay. '+\n 'Convex hull features not available.')\n\n# local imports\nfrom . import base\n\nclass Manager(base.Null):\n def __init__(self, *args, **kwargs):\n super(Manager, self).__init__()\n\n def write_fm(self, json_fm={}):\n if 'feature_list' not in json_fm:\n json_fm['feature_list'] = []\n json_fm['feature_list'].append('convex-hull')\n json_fm['convex-hull'] = {}\n return json_fm\n\n def convex_hull_ind(self, g, n1, n2=None):\n m = np.zeros_like(g.watershed); \n if n2 is not None:\n m.ravel()[g.boundary(n1, n2)] = 1\n else:\n m.ravel()[list(g.extent(n1))] = 1\n m = m - nd.binary_erosion(m) #Only need border\n ind = np.array(np.nonzero(m)).T\n return ind\n\n\n def convex_hull_vol(self, ind, g):\n # Compute the convex hull of the region\n try:\n tri = Delaunay(ind)\n except:\n # Just triangulate bounding box\n mins = ind.min(axis=0)\n maxes = ind.max(axis=0)\n maxes[maxes==mins] += 1\n ind = np.array(list(it.product(*tuple(np.array([mins,maxes]).T))))\n tri = Delaunay(ind)\n vol = 0\n for simplex in tri.vertices:\n pts = tri.points[simplex].T\n pts = pts - np.repeat(pts[:,0][:, np.newaxis], pts.shape[1], axis=1)\n pts = pts[:,1:]\n vol += abs(1/float(factorial(pts.shape[0])) * det(pts))\n return vol,tri \n\n def create_node_cache(self, g, n):\n vol, tri = self.convex_hull_vol(self.convex_hull_ind(g,n), g)\n return np.array([tri,vol])\n\n def create_edge_cache(self, g, n1, n2):\n vol, tri = self.convex_hull_vol(self.convex_hull_ind(g,n1,n2), g)\n return np.array([tri,vol])\n\n def update_node_cache(self, g, n1, n2, dst, src):\n tri1 = src[0]\n tri2 = dst[0]\n ind1 = tri1.points[np.unique(tri1.convex_hull.ravel())]\n ind2 = tri2.points[np.unique(tri2.convex_hull.ravel())]\n allind = np.concatenate((ind1,ind2))\n vol, tri = self.convex_hull_vol(allind, g)\n dst = np.array([tri,vol])\n\n def update_edge_cache(self, g, e1, e2, dst, src):\n tri1 = src[0]\n tri2 = dst[0]\n ind1 = tri1.points[np.unique(tri1.convex_hull.ravel())]\n ind2 = tri2.points[np.unique(tri2.convex_hull.ravel())]\n allind = np.concatenate((ind1,ind2))\n vol, tri = self.convex_hull_vol(allind, g)\n dst = np.array([tri,vol])\n\n def compute_node_features(self, g, n, cache=None):\n if cache is None: \n cache = g.node[n][self.default_cache]\n convex_vol = cache[1]\n features = []\n features.append(convex_vol)\n features.append(convex_vol/float(g.node[n]['size']))\n return np.array(features)\n\n def compute_edge_features(self, g, n1, n2, cache=None):\n if cache is None: \n cache = g[n1][n2][self.default_cache]\n convex_vol = cache[1]\n\n features = []\n features.append(convex_vol)\n features.append(convex_vol / len(g.boundary(n1, n2)))\n return np.array(features)\n\n def compute_difference_features(self,g, n1, n2, cache1=None, cache2=None):\n if cache1 is None:\n cache1 = g.node[n1][self.default_cache]\n tri1 = cache1[0]\n convex_vol1 = cache1[1]\n\n if cache2 is None:\n cache2 = g.node[n2][self.default_cache]\n tri2 = cache2[0]\n convex_vol2 = cache2[1]\n\n ind1 = tri1.points[np.unique(tri1.convex_hull.ravel())]\n ind2 = tri2.points[np.unique(tri2.convex_hull.ravel())]\n allind = np.concatenate((ind1,ind2))\n convex_vol_both, tri_both = self.convex_hull_vol(allind, g)\n\n vol1 = float(g.node[n1]['size'])\n vol2 = float(g.node[n2]['size'])\n volborder = float(len(g.boundary(n1, n2)))\n volboth = vol1+vol2\n\n features = []\n features.append(abs(convex_vol1/vol1 - convex_vol2/vol2))\n features.append(abs(convex_vol1/vol1 - convex_vol_both/volboth))\n features.append(abs(convex_vol2/vol2 - convex_vol_both/volboth))\n features.append(abs(convex_vol_both/volboth))\n features.append((convex_vol1*vol2)/(convex_vol2*vol1))\n features.append(volborder/vol1)\n features.append(volborder/vol2)\n features.append(volborder/volboth)\n\n return np.array(features)\n" ]
[ [ "scipy.ndimage.binary_erosion", "numpy.nonzero", "scipy.spatial.Delaunay", "scipy.misc.factorial", "numpy.concatenate", "numpy.linalg.det", "numpy.zeros_like", "numpy.repeat", "numpy.array" ] ]
shinya1024/rate_equation_test
[ "044edd9665554f1b665de39994c654160160914b" ]
[ "exabibunn4.py" ]
[ "from scipy.integrate import solve_ivp\n\ndef upward_cannon(t,y):\n return [y[1],-0.5]\ndef hit_ground(t,y):\n return y[0]\n\nhit_ground.terminal = True\nhit_ground.direction = -1\ndef apex(t,y):\n return y[1]\n\nsol = solve_ivp(upward_cannon,[0,100],[0,10],events = (hit_ground,apex),dense_output = True)\n\nprint(sol.t_events)\nprint(sol.t)\nprint(sol.sol(sol.t_events[1][0]))\nprint(sol.y_events)\n" ]
[ [ "scipy.integrate.solve_ivp" ] ]
nextfortune/modelbricks
[ "5aee91cf5acb439cafcac89d96bee9c8a71f65cc" ]
[ "tests/test_transformlayer.py" ]
[ "\"\"\"Unit Test for TransformLayer\"\"\"\nimport unittest\nimport tensorflow as tf\nfrom modelbricks.layers.layers import TransformLayer\n\nimport common_base_test as cbt\n\nclass TestTransformLayer(cbt.TestBase):\n \"\"\"Test Case for Transform Layer\"\"\"\n def setUp(self):\n super().setUp()\n\n self.dim = {0: 'non_sequential', 1:'sequential'}\n\n #pylint: disable=W0612\n for inputs, labels in self.dataset.take(1):\n self.trans_from = TransformLayer(self.feature_columns, self.dim)\n self.trans_from.build(inputs)\n\n def test_transformlayer_outputshape(self):\n \"\"\"test Transformer Layer output shape\"\"\"\n #pylint: disable=W0612\n for input_x, labels in self.dataset.take(1):\n output = self.trans_from(input_x)\n\n expected_output_shape = tf.TensorShape([1,38])\n\n self.assertEqual(expected_output_shape, output.shape)\n" ]
[ [ "tensorflow.TensorShape" ] ]
kumagallium/pyklab
[ "67bb8f1f088d7e6f217c3e16583c51416589b073" ]
[ "pyklab/develop/structures.py" ]
[ "import pandas as pd\nimport numpy as np\nimport os\nimport plotly.graph_objects as go\nfrom scipy.spatial import Delaunay\nimport pymatgen.core as mg\nfrom pymatgen.ext.matproj import MPRester\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen.analysis.local_env import IsayevNN, MinimumDistanceNN, CrystalNN, CutOffDictNN, EconNN, JmolNN, MinimumOKeeffeNN, MinimumVIRENN, VoronoiNN\nimport itertools\nfrom itertools import product\nfrom bokeh.sampledata.periodic_table import elements\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nfrom .element_color_schemes import ElementColorSchemes\n\npmg = MPRester(\"UP0x1rTAXR52g7pi\")\nelcolor = dict(zip(elements[\"atomic number\"].values, elements[\"CPK\"].values))\n\nclass Structure():\n\n def __init__(self, structure_dirpath=\"structures/\"):\n self.structure_dirpath = structure_dirpath\n self.element_colors = ElementColorSchemes.get_element_color_schemes()\n\n def set_sig_fig(self, val):\n try:\n return '{:.3g}'.format(float(val))\n except:\n return val\n\n def get_structure(self, mpid, is_primitive, scale, structure=\"\"):\n if structure == \"\":\n structure_tmp = pmg.get_structure_by_material_id(mpid)\n else:\n structure_tmp = structure.copy()\n\n sa_structure = SpacegroupAnalyzer(structure_tmp)\n #print(sa_structure.get_space_group_symbol())\n if is_primitive:\n #structure = structure.get_primitive_structure()\n structure = sa_structure.get_primitive_standard_structure()\n structure.make_supercell([scale, scale, scale])\n else:\n #structure = structure.get_primitive_structure().get_reduced_structure()\n structure = sa_structure.get_refined_structure()\n structure.make_supercell([scale, scale, scale])\n return structure\n\n def get_mpdata_from_composition(self, composition):\n properties = [\"task_id\", \"pretty_formula\", \"spacegroup.symbol\", \"formation_energy_per_atom\", \"e_above_hull\", \"band_gap\"]\n mpdata = pmg.query(criteria=composition, properties=properties)\n df_mpdata = pd.DataFrame(mpdata)\n df_mpdata = df_mpdata.rename(columns={\"task_id\": \"mpid\"})\n df_mpdata = df_mpdata.applymap(self.set_sig_fig)\n df_mpdata = df_mpdata.sort_values(\"e_above_hull\")\n\n return df_mpdata\n\n def my_round(self, val, digit=2):\n p = 10 ** digit\n return (val * p * 2 + 1) // 2 / p\n\n def get_round(self, arr,digit=2):\n res = np.array([self.my_round(val,digit) for val in arr])\n return res\n\n def get_delaunay_ctn(self, mpid=\"mp-19717\", scale=1, is_primitive=False, structure=\"\"):\n structure_tmp = self.get_structure(mpid, is_primitive, scale, structure=structure)\n\n structure_tmp.make_supercell([5, 5, 5])\n xyz_list = [site[\"xyz\"] for site in structure_tmp.as_dict()[\"sites\"]] # Information on each site in the crystal structure\n label_list = [site[\"label\"] for site in structure_tmp.as_dict()[\"sites\"]] \n matrix = structure_tmp.lattice.matrix\n a, b, c = self.get_round(structure_tmp.lattice.abc)\n\n tri = Delaunay(xyz_list)\n\n simplices_all = tri.simplices\n points_all = tri.points\n\n tol = 0.05 #Error in atomic coordinates[angstrom]\n include_idxs = []\n for i, point in enumerate(points_all):\n abc_mat = self.get_round(structure_tmp.lattice.get_vector_along_lattice_directions(point))\n if (abc_mat[0]>=(a*2/5)-tol) and (abc_mat[1]>=(b*2/5)-tol) and (abc_mat[2]>=(c*2/5)-tol) and (abc_mat[0]<=(a*3/5)+tol) and (abc_mat[1]<=(b*3/5)+tol) and (abc_mat[2]<=(c*3/5)+tol):\n include_idxs.append(i)\n \n ijklist = []\n pidxs = []\n for tet in simplices_all:\n if len(set(tet)&set(include_idxs)) > 0:\n tet = np.sort(tet)\n i = tet[0]\n j = tet[1]\n k = tet[2]\n w = tet[3]\n\n ijklist.append((i, j, k, w)) \n pidxs.extend((i, j, k, w))\n pidxs = list(set(pidxs))\n \n atom_idx_dict = dict(zip(set(np.array(label_list)), range(len(set(np.array(label_list))))))\n viz_points = []\n atoms_radius = []\n atoms_color = []\n atom_idxs = []\n atom_species = []\n pidx_dict = {}\n for i, pidx in enumerate(np.sort(pidxs)):\n viz_points.append(points_all[pidx])\n if mg.Element(label_list[pidx]).atomic_radius != None:\n atoms_radius.append(mg.Element(label_list[pidx]).atomic_radius*(10/scale))\n else:\n atoms_radius.append(10/scale)\n #atoms_color.append(elements[elements[\"symbol\"]==label_list[pidx]][\"CPK\"].values[0])\n atoms_color.append(self.element_colors[\"VESTA\"][label_list[pidx]])\n atom_idxs.append(atom_idx_dict[label_list[pidx]])\n atom_species.append(label_list[pidx])\n pidx_dict[pidx] = i\n \n viz_ijk = []\n for ijk in ijklist:\n ijk_tmp = []\n for tmp in ijk:\n ijk_tmp.append(pidx_dict[tmp])\n viz_ijk.append(tuple(ijk_tmp))\n\n pts = np.array(viz_points)\n ijk = np.array(list(set(viz_ijk)))\n\n return {\"pts\": pts, \"ijk\": ijk, \"matrix\":matrix, \"atom_species\": atom_species, \"atoms_radius\": atoms_radius, \"atoms_color\": atoms_color, \"atom_idxs\": atom_idxs}\n\n def get_delaunay_ctn_multipro(self, mpid, is_primitive, scale, structure):\n delaunay_dict = {}\n delaunay_data = list(self.get_delaunay_ctn(mpid=mpid, is_primitive=is_primitive, scale=scale, structure=structure).values())\n delaunay_dict[mpid] = delaunay_data\n return delaunay_dict\n\n def get_delaunay_cfn(self, mpid=\"mp-19717\", scale=1, is_primitive=False, structure=\"\"):\n structure_tmp = self.get_structure(mpid, is_primitive, scale, structure=structure)\n\n structure_tmp.make_supercell([5, 5, 5])\n xyz_list = [site[\"xyz\"] for site in structure_tmp.as_dict()[\"sites\"]] # Information on each site in the crystal structure\n label_list = [site[\"label\"] for site in structure_tmp.as_dict()[\"sites\"]] \n matrix = structure_tmp.lattice.matrix\n a, b, c = self.get_round(structure_tmp.lattice.abc)\n\n tri = Delaunay(xyz_list)\n\n simplices_all = tri.simplices\n points_all = tri.points\n\n tol = 0.05 #Error in atomic coordinates[angstrom]\n include_idxs = []\n for i, point in enumerate(points_all):\n abc_mat = self.get_round(structure_tmp.lattice.get_vector_along_lattice_directions(point))\n if (abc_mat[0]>=(a*2/5)-tol) and (abc_mat[1]>=(b*2/5)-tol) and (abc_mat[2]>=(c*2/5)-tol) and (abc_mat[0]<=(a*3/5)+tol) and (abc_mat[1]<=(b*3/5)+tol) and (abc_mat[2]<=(c*3/5)+tol):\n include_idxs.append(i)\n \n ijklist = []\n pidxs = []\n for tet in simplices_all:\n if len(set(tet)&set(include_idxs)) > 0:\n for comb in itertools.combinations(tet, 3):\n comb = np.sort(comb)\n i = comb[0]\n j = comb[1]\n k = comb[2]\n\n ijklist.append((i, j, k)) \n pidxs.extend((i, j, k))\n pidxs = list(set(pidxs))\n \n atom_idx_dict = dict(zip(set(np.array(label_list)), range(len(set(np.array(label_list))))))\n viz_points = []\n atoms_radius = []\n atoms_color = []\n atom_idxs = []\n atom_species = []\n pidx_dict = {}\n for i, pidx in enumerate(np.sort(pidxs)):\n viz_points.append(points_all[pidx])\n if mg.Element(label_list[pidx]).atomic_radius != None:\n atoms_radius.append(mg.Element(label_list[pidx]).atomic_radius*(10/scale))\n else:\n atoms_radius.append(10/scale)\n #atoms_color.append(elements[elements[\"symbol\"]==label_list[pidx]][\"CPK\"].values[0])\n atoms_color.append(self.element_colors[\"VESTA\"][label_list[pidx]])\n atom_idxs.append(atom_idx_dict[label_list[pidx]])\n atom_species.append(label_list[pidx])\n pidx_dict[pidx] = i\n \n viz_ijk = []\n for ijk in ijklist:\n ijk_tmp = []\n for tmp in ijk:\n ijk_tmp.append(pidx_dict[tmp])\n viz_ijk.append(tuple(ijk_tmp))\n\n pts = np.array(viz_points)\n ijk = np.array(list(set(viz_ijk)))\n\n return {\"pts\": pts, \"ijk\": ijk, \"matrix\":matrix, \"atom_species\": atom_species, \"atoms_radius\": atoms_radius, \"atoms_color\": atoms_color, \"atom_idxs\": atom_idxs}\n\n def get_delaunay_multipro(self, mpid, is_primitive, scale, structure):\n delaunay_dict = {}\n delaunay_data = list(self.get_delaunay_cfn(mpid=mpid, is_primitive=is_primitive, scale=scale, structure=structure).values())\n delaunay_dict[mpid] = delaunay_data\n return delaunay_dict\n\n def show_delaunay(self, mpid=\"mp-19717\", scale=1, is_primitive=False, structure=\"\"):\n pts, ijk, matrix, atom_species, atoms_radius, atoms_color, atom_idxs = self.get_delaunay_cfn(mpid=mpid, scale=scale, is_primitive=is_primitive, structure=structure).values()\n\n x, y, z = pts.T\n i, j, k = ijk.T\n\n xyz = list(product([2/5,3/5], repeat=3))\n xyz = [np.dot(np.array(xyz_tmp),matrix) for xyz_tmp in xyz]\n\n xx,yy,zz = np.array([xyz[0],xyz[1],xyz[3],xyz[2],xyz[0]\n ,xyz[4],xyz[5],xyz[1]\n ,xyz[3],xyz[7],xyz[5]\n ,xyz[5],xyz[7],xyz[6],xyz[4]\n ,xyz[6],xyz[2]]).T\n fig = go.Figure(data=[go.Mesh3d(x=np.array(x), y=np.array(y), z=np.array(z),\n color='lightblue',\n opacity=0.2,\n flatshading=True,\n contour=dict(show=False),\n hoverinfo=\"text\",\n i = i,\n j = j,\n k = k),\n go.Scatter3d(x=x, y=y, z=z, mode='markers',\n #hoverinfo=\"text\",\n #hovertext=atom_species,\n marker=dict(\n size=atoms_radius,\n color=atoms_color,\n opacity=0.8\n )\n ),\n go.Scatter3d(x=xx,\n y=yy,\n z=zz,\n #hoverinfo=\"text\",\n mode='lines',\n name='',\n line=dict(color= 'rgb(70,70,70)', width=2))]\n )\n\n fig.update_layout(\n margin=dict(l=0, r=0, t=10, b=0),\n autosize=False,\n width=700,\n height=700,\n scene=dict(\n xaxis=dict(showgrid=False, showbackground=True, showticklabels=False, title=\"x\"),\n yaxis=dict(showgrid=False, showbackground=True ,showticklabels=False, title=\"y\"),\n zaxis=dict(showgrid=False, showbackground=True, showticklabels=False, title=\"z\")\n )\n )\n fig.update_scenes(camera_projection=dict(type=\"orthographic\"))\n fig.show()\n\n def get_delaunay_to_offformat(self, mpid=\"mp-19717\", scale=1, is_primitive=False, structure=\"\", nodes=False):\n pts, ijks, _, atom_species, _, _, _ = self.get_delaunay_cfn(mpid=mpid, scale=scale, is_primitive=is_primitive, structure=structure).values()\n\n offtext = \"OFF\\n\"\n offtext += str(len(pts)) + \" \" + str(len(ijks)) + \" 0\\n\"\n for pt in pts:\n offtext += \" \".join(map(str, pt)) + \"\\n\"\n for ijk in ijks:\n offtext += str(len(ijk)) + \" \" + \" \".join(map(str, ijk)) + \"\\n\"\n\n if nodes:\n offtext += \"\\n\".join(map(str, atom_species))\n\n if not os.path.exists(self.structure_dirpath):\n os.mkdir(self.structure_dirpath)\n if not os.path.exists(self.structure_dirpath+\"mp_delaunay_offformat/\"):\n os.mkdir(self.structure_dirpath+\"mp_delaunay_offformat/\")\n # Refactor: Add naming process when mpid is missing\n with open(self.structure_dirpath+\"mp_delaunay_offformat/\"+mpid+\".off\", mode='w') as f:\n f.write(offtext)\n\n return offtext\n\n\n def get_delaunay_to_objformat(self, mpid=\"mp-19717\", scale=1, is_primitive=False, structure=\"\"):\n pts, ijks, _, atom_species, _, _, _ = self.get_delaunay_cfn(mpid=mpid, scale=scale, is_primitive=is_primitive, structure=structure).values()\n\n objtext =\"####\\n#\\n# OBJ File Generated by Pyklab\\n#\\n####\\n\"+ \\\n \"# Object \"+mpid+\".obj\\n\"+ \\\n \"#\\n# Vertices: \"+str(len(pts))+\"\\n\"+ \\\n \"# Faces: \"+str(len(ijks))+\"\\n#\\n####\\n\"\n for pt in pts:\n objtext += \"v \" + \" \".join(map(str, pt)) + \"\\n\"\n objtext += \"\\n\"\n for ijk in ijks:\n ijk = map(lambda x: x+1, ijk)\n objtext += \"f \" + \" \".join(map(str, ijk)) + \"\\n\"\n objtext += \"\\n# End of File\"\n\n if not os.path.exists(self.structure_dirpath):\n os.mkdir(self.structure_dirpath)\n if not os.path.exists(self.structure_dirpath+\"mp_delaunay_objformat/\"):\n os.mkdir(self.structure_dirpath+\"mp_delaunay_objformat/\")\n # Refactor: Add naming process when mpid is missing\n with open(self.structure_dirpath+\"mp_delaunay_objformat/\"+mpid+\".obj\", mode='w') as f:\n f.write(objtext)\n\n return objtext\n\n def create_crystal_graph(self, structure, graphtype=\"IsayevNN\"):\n #https://pymatgen.org/pymatgen.analysis.local_env.html\n #IsayevNN: https://www.nature.com/articles/ncomms15679.pdf\n if graphtype == \"IsayevNN\":\n nn = IsayevNN(cutoff=6, allow_pathological=True)\n elif graphtype == \"MinimumDistanceNN\":\n nn = MinimumDistanceNN(cutoff=5)\n elif graphtype == \"CrystalNN\":\n nn = CrystalNN()\n elif graphtype == \"CutOffDictNN\":\n nn = CutOffDictNN()\n elif graphtype == \"EconNN\":\n nn = EconNN()\n elif graphtype == \"JmolNN\":\n nn = JmolNN()\n elif graphtype == \"MinimumOKeeffeNN\":\n nn = MinimumOKeeffeNN()\n elif graphtype == \"MinimumVIRENN\":\n nn = MinimumVIRENN()\n elif graphtype == \"VoronoiNN\":\n nn = VoronoiNN()\n\n originalsites = {}\n originalsites_inv = {}\n for site in structure.sites:\n originalsites[site] = nn._get_original_site(structure, site)\n originalsites_inv[nn._get_original_site(structure, site)] = site\n\n nodes = {} # ノードの初期化\n edges = {} # エッジの初期化\n adj = [] # 隣接行列の初期化\n weights = [] # 重み行列の初期化\n distances = [] # 原子間距離行列の初期化\n # 元の各サイト\n for i, basesite in enumerate(nn.get_all_nn_info(structure)):\n orisite1 = originalsites_inv[i]\n nodes[i] = orisite1.as_dict()[\"species\"][0][\"element\"]\n sitenum = structure.num_sites # 元の結晶構造のサイト数\n adj.append([0]*sitenum) # 隣接行列の初期化\n weights.append([0]*sitenum) # 重み行列の初期化\n distances.append([0]*sitenum) # 原子間距離行列の初期化\n # uniquesite = []\n # 各隣接サイト\n for neighbor in basesite:\n # 隣接サイトと同一の元サイトの探索\n # for orisite2 in list(originalsites.keys()):\n for orisite2 in list(originalsites.keys())[i+1:]:\n # https://pymatgen.org/pymatgen.core.sites.html\n # 同一サイトであるか判定\n if neighbor[\"site\"].is_periodic_image(orisite2):\n adj[i][originalsites[orisite2]] += 1\n weights[i][originalsites[orisite2]] += neighbor[\"weight\"]\n distances[i][originalsites[orisite2]] += orisite1.distance(neighbor[\"site\"])\n edges.setdefault(i, [])\n edges[i].append(originalsites[orisite2])\n break\n\n return nodes, edges, adj, weights, distances\n\n def view_graph(self, graph, node2atom):\n g_nodes = [mg.Composition(node).elements[0].symbol for node in graph.nodes]\n pos = nx.spring_layout(graph) # ,k=10)\n if len(graph.edges) > 0:\n edge_labels = {}\n u, v, d = np.array(list(graph.edges(data=True))).T\n sites = list(zip(u, v))\n for st in sites:\n edge_labels.setdefault(st, 0)\n edge_labels[st] += 1\n nx.draw_networkx_edge_labels(graph, pos, edge_labels=edge_labels, font_size=5)\n else:\n print(\"No edges\")\n\n nx.draw_networkx(graph, pos, font_size=5, width=0.5, node_color=[elcolor[node2atom[node]] for node in g_nodes],\n node_size=[mg.Element(node).atomic_radius*100 for node in g_nodes])\n\n def visualize_crystal_graph(self, nodes, edges, distances):\n G = nx.MultiGraph()\n node2atom = {}\n atomcount = {}\n renamenodes = {}\n for siteidx, el in nodes.items():\n atomcount.setdefault(el, 0)\n atomcount[el] += 1\n renamenodes[siteidx] = el + str(atomcount[el])\n G.add_node(renamenodes[siteidx])\n node2atom[el] = mg.Element(el).number\n\n for siteidx, edge in edges.items():\n for i, e in enumerate(edge):\n G.add_edge(renamenodes[siteidx], renamenodes[e], length=distances[siteidx][e])\n\n fig = plt.figure(figsize=(3, 3), dpi=300, facecolor='w', edgecolor='k')\n ax = fig.add_subplot(1, 1, 1)\n # Remove axis ticks\n ax.tick_params(labelbottom=\"off\", bottom=\"off\")\n ax.tick_params(labelleft=\"off\", left=\"off\")\n # Remove labels\n ax.set_xticklabels([])\n # Remove axis\n plt.gca().spines['right'].set_visible(False)\n plt.gca().spines['left'].set_visible(False)\n plt.gca().spines['top'].set_visible(False)\n plt.gca().spines['bottom'].set_visible(False)\n plt.grid(False)\n\n self.view_graph(G, node2atom)\n plt.show()\n\n def check_edges(self, adj, searchlist, connects):\n new_searchlist = []\n for sl in searchlist:\n connects[sl] = 1\n save_idxs = np.array(connects)^np.array([1]*len(adj))\n idxs = np.array(adj[sl]>0, dtype=int)\n searchidx = idxs & save_idxs\n new_searchlist.extend(np.where(np.array(searchidx) > 0)[0])\n new_searchlist = list(set(new_searchlist))\n \n if len(new_searchlist) <= 0:\n return np.sum(connects) == len(adj)\n \n return self.check_edges(adj, new_searchlist, connects)\n\n def is_cg_mpid(self, mpid, structure_tmp, is_primitive, scale, graphtype):\n try:\n structure = self.get_structure(mpid,is_primitive, scale,structure_tmp)\n _, _, adj, _, _ = self.create_crystal_graph(structure, graphtype)\n adj = np.array(adj)+np.array(adj).T\n if len(adj) > 1:\n connect_idxs = [0]*len(adj)\n startnode = [0]\n if self.check_edges(adj, startnode, connect_idxs):\n return True, mpid\n else:\n return False, mpid\n else:\n return False, mpid\n except:\n return False, mpid\n\n def get_space_group_number(self, mpid, structure_tmp):\n return SpacegroupAnalyzer(structure_tmp).get_space_group_number()\n\n def get_crystal_system_number(self, mpid, structure_tmp):\n cs_dict = {'trigonal': 0,\n 'monoclinic': 1,\n 'tetragonal': 2,\n 'triclinic': 3,\n 'cubic': 4,\n 'orthorhombic': 5,\n 'hexagonal': 6}\n return cs_dict[SpacegroupAnalyzer(structure_tmp).get_crystal_system()]" ]
[ [ "matplotlib.pyplot.gca", "scipy.spatial.Delaunay", "pandas.DataFrame", "numpy.sort", "matplotlib.pyplot.grid", "numpy.array", "numpy.sum", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
d00d/quantNotebooks
[ "a3d954ef543697f04b9960009ab9cc0b990dc578" ]
[ "Notebooks/strategies/from quantopian.algorithm import attach_pipeline,.py" ]
[ "from quantopian.algorithm import attach_pipeline, pipeline_output\nfrom quantopian.pipeline import Pipeline\nfrom quantopian.pipeline.data.builtin import USEquityPricing\nfrom quantopian.pipeline.factors import CustomFactor, SimpleMovingAverage\nfrom quantopian.pipeline.data import morningstar\n\nimport pandas as pd\nimport numpy as np\n\nv = morningstar.valuation\n\n# --- Liquidity Factor --- \nclass AvgDailyDollarVolumeTraded(CustomFactor):\n \n inputs = [USEquityPricing.close, USEquityPricing.volume]\n window_length = 20\n \n def compute(self, today, assets, out, close_price, volume):\n out[:] = np.mean(close_price * volume, axis=0)\n \n# --- Value & Growth Factor --- \nclass Value(CustomFactor):\n \n #EV_To_Sales_SalesGrowth_12M\n inputs = [morningstar.income_statement.total_revenue, v.enterprise_value]\n window_length = 252\n\n def compute(self, today, assets, out, sales, ev):\n out[:] = ev[-1] / ((sales[-1] * 4)/(((sales[-1] * 4) - (sales[0]) * 4) / (sales[0] * 4)))\n \n# --- Momentum Factor ---\n# --- 9/13: Modified Momentum factor to include (I/S)*LT scheme (I=50d, S=20d, LT=140d)\nclass Momentum(CustomFactor):\n \n inputs = [USEquityPricing.close] \n window_length = 140\n \n def compute(self, today, assets, out, close): \n out[:] = ((close[-1] / close[-50]) / (close[-1] / (close[-20]))* close[-1])\n\n# --- Quality Factor --- \nclass Quality(CustomFactor):\n \n inputs = [morningstar.operation_ratios.roe]\n window_length = 1\n \n def compute(self, today, assets, out, roe): \n out[:] = roe[-1]\n \n# --- Volatility Factor --- \n#-- 9/13 High Alpha Mean Reversion on 12M & 3M volatility\nclass Volatility(CustomFactor):\n \n inputs = [USEquityPricing.close]\n window_length = 252\n \n def compute(self, today, assets, out, close): \n close = pd.DataFrame(data=close, columns=assets) \n # Since we are going to rank largest is best we need to invert the sdev.\n out[:] = 1 / np.log(close).diff().std()\n \n\n# Compute final rank and assign long and short baskets.\ndef before_trading_start(context, data):\n results = pipeline_output('factors').dropna()\n ranks = results.rank().mean(axis=1).order()\n \n context.shorts = 1 / ranks.head(200)\n context.shorts /= context.shorts.sum()\n \n context.longs = ranks.tail(200)\n context.longs /= context.longs.sum()\n \n update_universe(context.longs.index + context.shorts.index)\n\n \n# Put any initialization logic here. The context object will be passed to\n# the other methods in your algorithm.\ndef initialize(context):\n pipe = Pipeline()\n pipe = attach_pipeline(pipe, name='factors')\n \n pipe.add(Value(), \"value\")\n pipe.add(Momentum(), \"momentum\")\n pipe.add(Quality(), \"quality\")\n pipe.add(Volatility(), \"volatility\")\n \n sma_200 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=200)\n dollar_volume = AvgDailyDollarVolumeTraded()\n \n # Screen out penny stocks and low liquidity securities.\n pipe.set_screen((sma_200 > 5) & (dollar_volume > 10**7))\n \n context.spy = sid(8554)\n context.shorts = None\n context.longs = None\n \n schedule_function(rebalance, date_rules.month_start())\n schedule_function(cancel_open_orders, date_rules.every_day(),\n time_rules.market_close())\n\n \n# Will be called on every trade event for the securities you specify. \ndef handle_data(context, data):\n record(lever=context.account.leverage,\n exposure=context.account.net_leverage,\n num_pos=len(context.portfolio.positions),\n oo=len(get_open_orders()))\n\n \ndef cancel_open_orders(context, data):\n for security in get_open_orders():\n for order in get_open_orders(security):\n cancel_order(order)\n \n \ndef rebalance(context, data):\n for security in context.shorts.index:\n if get_open_orders(security):\n continue\n if security in data:\n order_target_percent(security, -context.shorts[security])\n \n for security in context.longs.index:\n if get_open_orders(security):\n continue\n if security in data:\n order_target_percent(security, context.longs[security])\n \n for security in context.portfolio.positions:\n if get_open_orders(security):\n continue\n if security in data:\n if security not in (context.longs.index + context.shorts.index):\n order_target_percent(security, 0) \n \n \n " ]
[ [ "numpy.log", "numpy.mean", "pandas.DataFrame" ] ]
gregreen/deep-potential
[ "115897f8cc51c2ea2af0662449ff280ac1aa4e11" ]
[ "scripts/serializers_tf.py" ]
[ "#!/usr/bin/env python\n\nfrom __future__ import print_function, division\n\nimport tensorflow as tf\nprint(f'Tensorflow version {tf.__version__}')\nfrom tensorflow import keras\nimport numpy as np\nimport re\n\n\ndef weights_as_list(layer):\n \"\"\"\n Returns a (possibly nested) list containing\n the weights in a tf.keras.Layer.\n \"\"\"\n return [w.tolist() for w in layer.get_weights()]\n\n\ndef set_weights_w_list(layer, weights):\n \"\"\"\n Sets the weights of a tf.keras.Layer using the provided\n weights. The weights are in a (possibly nested) list, in\n the form provided by `weights_as_list`.\n \"\"\"\n layer.set_weights([np.array(w, dtype='f4') for w in weights])\n\n\ndef serialize_variable(v):\n \"\"\"\n Returns a JSON-serializable dictionary representing a\n tf.Variable.\n \"\"\"\n return dict(\n dtype=v.dtype.name,\n shape=list(v.shape),\n values=v.numpy().tolist(),\n name=re.sub('\\:[0-9]+$', '', v.name),\n trainable=v.trainable\n )\n\n\ndef deserialize_variable(d):\n \"\"\"\n Returns a tf.Variable, constructed using a dictionary\n of the form returned by `serialize_variable`.\n \"\"\"\n return tf.Variable(\n np.array(d['values'], dtype=d['dtype']),\n name=d['name'],\n trainable=d['trainable']\n )\n\n" ]
[ [ "numpy.array" ] ]
mostafanorouzi/Video-Synopsis
[ "baf871eb962f63c62e98b86120f71358b9291cee" ]
[ "Video_synopsis_Video1.py" ]
[ "import cv2\nimport numpy as np\nimport copy\nimport Car\nfrom Car import isOverLabCar, getBG, gradientline, setColor , isOverLabRects\n\nvideo_path = 'Video1.avi'\n\n# Open video file\ncap = cv2.VideoCapture(video_path)\n\n# Define the codec and create VideoWriter object\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\ncapture_size = (int(cap.get(3)), int(cap.get(4)))\nout = cv2.VideoWriter('result_video1.avi', fourcc, 20, capture_size)\n\n# Create the background substractor\nfgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)\n\n# Structuring elements for morphographic filters\nkernelOp = np.ones((3,3),np.uint8)\nkernelOp2 = np.ones((5,5),np.uint8)\nkernelCl = np.ones((11,11),np.uint8)\n\n# Variables\ncars = []\nfont = cv2.FONT_HERSHEY_SIMPLEX\npid = 1\nfps = cap.get(cv2.CAP_PROP_FPS)\nframeId = 0\nw = cap.get(3)\nh = cap.get(4)\nframeArea = h*w\nareaTH = frameArea/600\nup_limit = int(1.6*(h/5))\ndown_limit = int(3*(h/5))\n\n# create background\nimg_b = getBG(video_path, 40)\n\nwhile (cap.isOpened()):\n # read a frame\n ret, orginalframe = cap.read()\n frame = copy.copy(orginalframe)\n\n # time : second\n t_sec = frameId / fps\n\n # Use the substractor\n fgmask = fgbg.apply(frame)\n\n try:\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2,2))\n kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))\n bg = cv2.erode(fgmask, kernel, iterations=1)\n # Fill any small holes\n closing = cv2.morphologyEx(bg, cv2.MORPH_CLOSE, kernel)\n # Remove noise\n opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel2)\n # Dilate to merge adjacent blobs\n dilation = cv2.dilate(opening, kernel2, iterations=4)\n # threshold to remove furthur noise\n dilation[dilation < 240] = 0\n bg = dilation\n mask = bg\n except:\n # if there are no more frames to show...\n print('EOF')\n break\n\n # RETR_EXTERNAL returns only extreme outer flags. All child contours are left behind.\n _, contours0, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n for cnt in contours0:\n area = cv2.contourArea(cnt)\n if area > areaTH:\n #################\n # TRACKING #\n #################\n M = cv2.moments(cnt)\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n x, y, w, h = cv2.boundingRect(cnt)\n\n tube = Car.tube(cx,cy,x, y, w, h, t_sec, orginalframe)\n #calcute color of car\n setColor(tube)\n new = True\n if cy in range(up_limit, down_limit):\n for i in cars:\n if abs(x - i.getX()) <= w and abs(y - i.getY()) <= h and abs(i.frameId - frameId) < 3:\n # the object is close to one that was already detected before\n new = False\n i.updateCoords(tube, frameId)\n break\n if new == True:\n p = Car.MyCar(pid, tube, frameId)\n cars.append(p)\n pid += 1\n\n frameId += 1\n#finish loop\n\n\nfor i in range(1,len(cars)) :\n if cars[i].lentube()>15 :\n for j in range(i) :\n if cars[j].lentube() > 15:\n while isOverLabCar(cars[i],cars[j]) :\n cars[i].startFrame+=1\n\nn=0\n\n\n#create video synopsis\nwhile True:\n finish = True\n for i in cars :\n if i.lentube()>20:\n finish = False\n if finish :\n break\n frame = np.array(img_b)\n rects = []\n for i in range(len(cars)):\n if cars[i].lentube()>15 and cars[i].startFrame>=n :\n temp = cars[i].begin()\n canAdd = True\n new_rect = (temp.x,temp.y,temp.w,temp.h)\n for rect in rects :\n if isOverLabRects(rect, new_rect) :\n canAdd = False\n if canAdd :\n rects.append(new_rect)\n frame[temp.y:temp.y + temp.w, temp.x:temp.x + temp.h] = temp.target\n cv2.putText(frame, str(temp.t_sec), (temp.x, temp.y), font, 0.3, (0, 0, 0), 1, cv2.LINE_AA)\n cars[i].pop_front()\n out.write(frame)\n cv2.imshow('result', frame)\n k = cv2.waitKey(100) & 0xff\n if k == 27:\n break\n\ncap.release() # release video file\nout.release()\ncv2.destroyAllWindows() # close all openCV windows\n" ]
[ [ "numpy.array", "numpy.ones" ] ]
michellab/bgflow
[ "46c1f6035a7baabcbaee015603d08b8ce63d9717" ]
[ "tests/distribution/energy/test_linlogcut.py" ]
[ "\nimport pytest\nimport torch\nfrom bgflow import Energy, LinLogCutEnergy\n\n\nclass StrongRepulsion(Energy):\n def __init__(self):\n super().__init__([2, 2])\n\n def _energy(self, x):\n dist = torch.cdist(x, x)\n return (dist ** -12)[..., 0, 1][:, None]\n\n\ndef test_linlogcut(ctx):\n lj = StrongRepulsion()\n llc = LinLogCutEnergy(lj, high_energy=1e3, max_energy=1e10)\n x = torch.tensor([\n [[0., 0.], [0.0, 0.0]], # > max energy\n [[0., 0.], [0.0, 0.3]], # < max_energy, > high_energy\n [[0., 0.], [0.0, 1.]], # < high_energy\n ], **ctx)\n raw = lj.energy(x)[:, 0]\n cut = llc.energy(x)[:, 0]\n\n # first energy is clamped\n assert not (raw <= 1e10).all()\n assert (cut <= 1e10).all()\n assert cut[0].item() == pytest.approx(1e10, abs=1e-5)\n # second energy is softened, but force points in the right direction\n assert 1e3 < cut[1].item() < 1e10\n assert llc.force(x)[1][1, 1] > 0.0\n assert llc.force(x)[1][0, 1] < 0.0\n # third energy is unchanged\n assert torch.allclose(raw[2], cut[2], atol=1e-5)\n" ]
[ [ "torch.allclose", "torch.cdist", "torch.tensor" ] ]
zhoufu945/Transport-Mode-GPS-CNN
[ "8db5f83f593a004a7af280bfd6668cc6032e8338" ]
[ "Keras_Data_Creation.py" ]
[ "import numpy as np\r\nimport pickle\r\nimport os\r\nfrom scipy.signal import savgol_filter\r\n\r\nfilename = '../Combined Trajectory_Label_Geolife/Revised_InstanceCreation+NoJerkOutlier+Smoothing.pickle'\r\n# Each of the following variables contain multiple lists, where each list belongs to a user\r\nwith open(filename, 'rb') as f:\r\n Total_RelativeDistance, Total_Speed, Total_Acceleration, Total_Jerk, Total_BearingRate, Total_Label,\\\r\n Total_InstanceNumber, Total_Instance_InSequence, Total_Delta_Time, Total_Velocity_Change = pickle.load(f, encoding='latin1')\r\n\r\n# Create the data in the Keras form\r\n# Threshold: Is the max of number of GPS point in an instance\r\n\r\nThreshold = 200\r\nZero_Instance = [i for i, item in enumerate(Total_Instance_InSequence) if item == 0]\r\nNumber_of_Instance = len(Total_Instance_InSequence) - len(Zero_Instance)\r\nTotalInput = np.zeros((Number_of_Instance, 1, Threshold, 4), dtype=float)\r\nFinalLabel = np.zeros((Number_of_Instance, 1), dtype=int)\r\ncounter = 0\r\n\r\nfor k in range(len(Total_InstanceNumber)):\r\n # Create Keras shape with 4 channels for each user\r\n # There are 4 channels(in order: RelativeDistance, Speed, Acceleration, BearingRate)\r\n RD = Total_RelativeDistance[k]\r\n SP = Total_Speed[k]\r\n AC = Total_Acceleration[k]\r\n J = Total_Jerk[k]\r\n BR = Total_BearingRate[k]\r\n LA = Total_Label[k]\r\n # IN: the instances and number of GPS points in each instance for each user k\r\n IN = Total_InstanceNumber[k]\r\n\r\n for i in range(len(IN)):\r\n end = IN[i]\r\n if end == 0 or sum(RD[i]) == 0:\r\n continue\r\n TotalInput[counter, 0, 0:end, 0] = SP[i]\r\n TotalInput[counter, 0, 0:end, 1] = AC[i]\r\n TotalInput[counter, 0, 0:end, 2] = J[i]\r\n TotalInput[counter, 0, 0:end, 3] = BR[i]\r\n FinalLabel[counter, 0] = LA[i]\r\n counter += 1\r\n\r\nTotalInput = TotalInput[:counter, :, :, :]\r\nFinalLabel = FinalLabel[:counter, 0]\r\n\r\nwith open('Revised_KerasData_NoSmoothing.pickle', 'wb') as f: # Python 3: open(..., 'wb')\r\n pickle.dump([TotalInput, FinalLabel], f)\r\n\r\n" ]
[ [ "numpy.zeros" ] ]
MitchellAcoustics/rsd-engineeringcourse
[ "43769a849e02983f3fb334eb10d6d8e9ec259eac" ]
[ "ch01data/greengraph/map.py" ]
[ "import numpy as np\nfrom io import BytesIO\nimport imageio as img\nimport requests\n\n\nclass Map:\n def __init__(\n self, lat, long, satellite=True, zoom=10, size=(400, 400), sensor=False\n ):\n base = \"https://static-maps.yandex.ru/1.x/?\"\n\n params = dict(\n z=zoom,\n size=str(size[0]) + \",\" + str(size[1]),\n ll=str(long) + \",\" + str(lat),\n l=\"sat\" if satellite else \"map\",\n lang=\"en_US\",\n )\n\n self.image = requests.get(\n base, params=params\n ).content # Fetch our PNG image data\n content = BytesIO(self.image)\n self.pixels = img.imread(content) # Parse our PNG image as a numpy array\n\n def green(self, threshold):\n # Use NumPy to build an element-by-element logical array\n greener_than_red = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 0]\n greener_than_blue = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 2]\n green = np.logical_and(greener_than_red, greener_than_blue)\n return green\n\n def count_green(self, threshold=1.1):\n return np.sum(self.green(threshold))\n\n def show_green(data, threshold=1.1):\n green = self.green(threshold)\n out = green[:, :, np.newaxis] * array([0, 1, 0])[np.newaxis, np.newaxis, :]\n buffer = BytesIO()\n result = img.imwrite(buffer, out, format=\"png\")\n return buffer.getvalue()\n" ]
[ [ "numpy.logical_and" ] ]
alibh95/PyBaMM
[ "351183b2e8bd6fd11da998625d0fc029d8f99c1b" ]
[ "pybamm/simulation.py" ]
[ "#\n# Simulation class\n#\nimport pickle\nimport pybamm\nimport numpy as np\nimport copy\nimport warnings\nimport sys\n\n\ndef is_notebook():\n try:\n shell = get_ipython().__class__.__name__\n if shell == \"ZMQInteractiveShell\": # pragma: no cover\n # Jupyter notebook or qtconsole\n cfg = get_ipython().config\n nb = len(cfg[\"InteractiveShell\"].keys()) == 0\n return nb\n elif shell == \"TerminalInteractiveShell\": # pragma: no cover\n return False # Terminal running IPython\n elif shell == \"Shell\": # pragma: no cover\n return True # Google Colab notebook\n else:\n return False # Other type (?)\n except NameError:\n return False # Probably standard Python interpreter\n\n\ndef constant_current_constant_voltage_constant_power(variables):\n I = variables[\"Current [A]\"]\n V = variables[\"Terminal voltage [V]\"]\n s_I = pybamm.InputParameter(\"Current switch\")\n s_V = pybamm.InputParameter(\"Voltage switch\")\n s_P = pybamm.InputParameter(\"Power switch\")\n n_cells = pybamm.Parameter(\"Number of cells connected in series to make a battery\")\n return (\n s_I * (I - pybamm.InputParameter(\"Current input [A]\"))\n + s_V * (V - pybamm.InputParameter(\"Voltage input [V]\") / n_cells)\n + s_P * (V * I - pybamm.InputParameter(\"Power input [W]\") / n_cells)\n )\n\n\ndef constant_voltage(variables, V_applied):\n V = variables[\"Terminal voltage [V]\"]\n n_cells = pybamm.Parameter(\"Number of cells connected in series to make a battery\")\n return V - V_applied / n_cells\n\n\ndef constant_power(variables, P_applied):\n I = variables[\"Current [A]\"]\n V = variables[\"Terminal voltage [V]\"]\n n_cells = pybamm.Parameter(\"Number of cells connected in series to make a battery\")\n return V * I - P_applied / n_cells\n\n\nclass Simulation:\n \"\"\"A Simulation class for easy building and running of PyBaMM simulations.\n\n Parameters\n ----------\n model : :class:`pybamm.BaseModel`\n The model to be simulated\n experiment : :class:`pybamm.Experiment` (optional)\n The experimental conditions under which to solve the model\n geometry: :class:`pybamm.Geometry` (optional)\n The geometry upon which to solve the model\n parameter_values: :class:`pybamm.ParameterValues` (optional)\n Parameters and their corresponding numerical values.\n submesh_types: dict (optional)\n A dictionary of the types of submesh to use on each subdomain\n var_pts: dict (optional)\n A dictionary of the number of points used by each spatial variable\n spatial_methods: dict (optional)\n A dictionary of the types of spatial method to use on each\n domain (e.g. pybamm.FiniteVolume)\n solver: :class:`pybamm.BaseSolver` (optional)\n The solver to use to solve the model.\n output_variables: list (optional)\n A list of variables to plot automatically\n C_rate: float (optional)\n The C-rate at which you would like to run a constant current (dis)charge.\n \"\"\"\n\n def __init__(\n self,\n model,\n experiment=None,\n geometry=None,\n parameter_values=None,\n submesh_types=None,\n var_pts=None,\n spatial_methods=None,\n solver=None,\n output_variables=None,\n C_rate=None,\n ):\n self.parameter_values = parameter_values or model.default_parameter_values\n\n if isinstance(model, pybamm.lithium_ion.BasicDFNHalfCell):\n raise NotImplementedError(\n \"BasicDFNHalfCell is not compatible with Simulations yet.\"\n )\n\n if experiment is None:\n # Check to see if the current is provided as data (i.e. drive cycle)\n current = self._parameter_values.get(\"Current function [A]\")\n if isinstance(current, pybamm.Interpolant):\n self.operating_mode = \"drive cycle\"\n elif isinstance(current, tuple):\n raise NotImplementedError(\n \"Drive cycle from data has been deprecated. \"\n + \"Define an Interpolant instead.\"\n )\n else:\n self.operating_mode = \"without experiment\"\n if C_rate:\n self.C_rate = C_rate\n self._parameter_values.update(\n {\n \"Current function [A]\": self.C_rate\n * self._parameter_values[\"Nominal cell capacity [A.h]\"]\n }\n )\n\n self._unprocessed_model = model\n self.model = model\n else:\n self.set_up_experiment(model, experiment)\n\n self.geometry = geometry or self.model.default_geometry\n self.submesh_types = submesh_types or self.model.default_submesh_types\n self.var_pts = var_pts or self.model.default_var_pts\n self.spatial_methods = spatial_methods or self.model.default_spatial_methods\n self.solver = solver or self.model.default_solver\n self.output_variables = output_variables\n\n # Initialize empty built states\n self._model_with_set_params = None\n self._built_model = None\n self._mesh = None\n self._disc = None\n self._solution = None\n\n # ignore runtime warnings in notebooks\n if is_notebook(): # pragma: no cover\n import warnings\n\n warnings.filterwarnings(\"ignore\")\n\n def set_up_experiment(self, model, experiment):\n \"\"\"\n Set up a simulation to run with an experiment. This creates a dictionary of\n inputs (current/voltage/power, running time, stopping condition) for each\n operating condition in the experiment. The model will then be solved by\n integrating the model successively with each group of inputs, one group at a\n time.\n \"\"\"\n self.operating_mode = \"with experiment\"\n\n if not isinstance(experiment, pybamm.Experiment):\n raise TypeError(\"experiment must be a pybamm `Experiment` instance\")\n\n # Save the experiment\n self.experiment = experiment\n # Update parameter values with experiment parameters\n self._parameter_values.update(experiment.parameters)\n # Create a new submodel for each set of operating conditions and update\n # parameters and events accordingly\n self._experiment_inputs = []\n self._experiment_times = []\n for op, events in zip(experiment.operating_conditions, experiment.events):\n if isinstance(op[0], np.ndarray):\n # If ndarray is recived from, create interpolant\n # create interpolant\n timescale = self._parameter_values.evaluate(model.timescale)\n drive_cycle_interpolant = pybamm.Interpolant(\n op[0][:, 0], op[0][:, 1], timescale * pybamm.t\n )\n if op[1] == \"A\":\n operating_inputs = {\n \"Current switch\": 1,\n \"Voltage switch\": 0,\n \"Power switch\": 0,\n \"Current input [A]\": drive_cycle_interpolant,\n \"Voltage input [V]\": 0, # doesn't matter\n \"Power input [W]\": 0, # doesn't matter\n }\n if op[1] == \"V\":\n operating_inputs = {\n \"Current switch\": 0,\n \"Voltage switch\": 1,\n \"Power switch\": 0,\n \"Current input [A]\": 0, # doesn't matter\n \"Voltage input [V]\": drive_cycle_interpolant,\n \"Power input [W]\": 0, # doesn't matter\n }\n if op[1] == \"W\":\n operating_inputs = {\n \"Current switch\": 0,\n \"Voltage switch\": 0,\n \"Power switch\": 1,\n \"Current input [A]\": 0, # doesn't matter\n \"Voltage input [V]\": 0, # doesn't matter\n \"Power input [W]\": drive_cycle_interpolant,\n }\n else:\n if op[1] in [\"A\", \"C\"]:\n # Update inputs for constant current\n if op[1] == \"A\":\n I = op[0]\n else:\n # Scale C-rate with capacity to obtain current\n capacity = self._parameter_values[\"Nominal cell capacity [A.h]\"]\n I = op[0] * capacity\n operating_inputs = {\n \"Current switch\": 1,\n \"Voltage switch\": 0,\n \"Power switch\": 0,\n \"Current input [A]\": I,\n \"Voltage input [V]\": 0, # doesn't matter\n \"Power input [W]\": 0, # doesn't matter\n }\n elif op[1] == \"V\":\n # Update inputs for constant voltage\n V = op[0]\n operating_inputs = {\n \"Current switch\": 0,\n \"Voltage switch\": 1,\n \"Power switch\": 0,\n \"Current input [A]\": 0, # doesn't matter\n \"Voltage input [V]\": V,\n \"Power input [W]\": 0, # doesn't matter\n }\n elif op[1] == \"W\":\n # Update inputs for constant power\n P = op[0]\n operating_inputs = {\n \"Current switch\": 0,\n \"Voltage switch\": 0,\n \"Power switch\": 1,\n \"Current input [A]\": 0, # doesn't matter\n \"Voltage input [V]\": 0, # doesn't matter\n \"Power input [W]\": P,\n }\n # Update period\n operating_inputs[\"period\"] = op[3]\n # Update events\n if events is None:\n # make current and voltage values that won't be hit\n operating_inputs.update(\n {\"Current cut-off [A]\": -1e10, \"Voltage cut-off [V]\": -1e10}\n )\n elif events[1] in [\"A\", \"C\"]:\n # update current cut-off, make voltage a value that won't be hit\n if events[1] == \"A\":\n I = events[0]\n else:\n # Scale C-rate with capacity to obtain current\n capacity = self._parameter_values[\"Nominal cell capacity [A.h]\"]\n I = events[0] * capacity\n operating_inputs.update(\n {\"Current cut-off [A]\": I, \"Voltage cut-off [V]\": -1e10}\n )\n elif events[1] == \"V\":\n # update voltage cut-off, make current a value that won't be hit\n V = events[0]\n operating_inputs.update(\n {\"Current cut-off [A]\": -1e10, \"Voltage cut-off [V]\": V}\n )\n\n self._experiment_inputs.append(operating_inputs)\n # Add time to the experiment times\n dt = op[2]\n if dt is None:\n # max simulation time: 1 week\n dt = 7 * 24 * 3600\n self._experiment_times.append(dt)\n\n # Set up model for experiment\n if experiment.use_simulation_setup_type == \"old\":\n self.set_up_model_for_experiment_old(model)\n elif experiment.use_simulation_setup_type == \"new\":\n self.set_up_model_for_experiment_new(model)\n\n def set_up_model_for_experiment_old(self, model):\n \"\"\"\n Set up self.model to be able to run the experiment (old version).\n In this version, a single model is created which can then be called with\n different inputs for current-control, voltage-control, or power-control.\n\n This reduces set-up time since only one model needs to be processed, but\n increases simulation time since the model formulation is inefficient\n \"\"\"\n # Create a new model where the current density is now a variable\n # To do so, we replace all instances of the current density in the\n # model with a current density variable, which is obtained from the\n # FunctionControl submodel\n # create the FunctionControl submodel and extract variables\n external_circuit_variables = pybamm.external_circuit.FunctionControl(\n model.param, None\n ).get_fundamental_variables()\n\n # Perform the replacement\n symbol_replacement_map = {\n model.variables[name]: variable\n for name, variable in external_circuit_variables.items()\n }\n replacer = pybamm.SymbolReplacer(symbol_replacement_map)\n new_model = replacer.process_model(model, inplace=False)\n\n # Update the algebraic equation and initial conditions for FunctionControl\n # This creates an algebraic equation for the current to allow current, voltage,\n # or power control, together with the appropriate guess for the\n # initial condition.\n # External circuit submodels are always equations on the current\n # The external circuit function should fix either the current, or the voltage,\n # or a combination (e.g. I*V for power control)\n i_cell = new_model.variables[\"Total current density\"]\n new_model.initial_conditions[i_cell] = new_model.param.current_with_time\n new_model.algebraic[i_cell] = constant_current_constant_voltage_constant_power(\n new_model.variables\n )\n\n # Remove upper and lower voltage cut-offs that are *not* part of the experiment\n new_model.events = [\n event\n for event in model.events\n if event.name not in [\"Minimum voltage\", \"Maximum voltage\"]\n ]\n # add current and voltage events to the model\n # current events both negative and positive to catch specification\n new_model.events.extend(\n [\n pybamm.Event(\n \"Current cut-off (positive) [A] [experiment]\",\n new_model.variables[\"Current [A]\"]\n - abs(pybamm.InputParameter(\"Current cut-off [A]\")),\n ),\n pybamm.Event(\n \"Current cut-off (negative) [A] [experiment]\",\n new_model.variables[\"Current [A]\"]\n + abs(pybamm.InputParameter(\"Current cut-off [A]\")),\n ),\n pybamm.Event(\n \"Voltage cut-off [V] [experiment]\",\n new_model.variables[\"Terminal voltage [V]\"]\n - pybamm.InputParameter(\"Voltage cut-off [V]\")\n / model.param.n_cells,\n ),\n ]\n )\n\n self.model = new_model\n\n self.op_conds_to_model_and_param = {\n op_cond[-1]: (new_model, self.parameter_values)\n for op_cond in set(self.experiment.operating_conditions)\n }\n self.op_conds_to_built_models = None\n\n def set_up_model_for_experiment_new(self, model):\n \"\"\"\n Set up self.model to be able to run the experiment (new version).\n In this version, a new model is created for each step.\n\n This increases set-up time since several models to be processed, but\n reduces simulation time since the model formulation is efficient.\n \"\"\"\n self.op_conds_to_model_and_param = {}\n self.op_conds_to_built_models = None\n for op_cond, op_inputs in zip(\n self.experiment.operating_conditions, self._experiment_inputs\n ):\n # Create model for this operating condition if it has not already been seen\n # before\n if op_cond[-1] not in self.op_conds_to_model_and_param:\n if op_inputs[\"Current switch\"] == 1:\n # Current control\n # Make a new copy of the model (we will update events later))\n new_model = model.new_copy()\n else:\n # Voltage or power control\n # Create a new model where the current density is now a variable\n # To do so, we replace all instances of the current density in the\n # model with a current density variable, which is obtained from the\n # FunctionControl submodel\n # create the FunctionControl submodel and extract variables\n external_circuit_variables = (\n pybamm.external_circuit.FunctionControl(\n model.param, None\n ).get_fundamental_variables()\n )\n\n # Perform the replacement\n symbol_replacement_map = {\n model.variables[name]: variable\n for name, variable in external_circuit_variables.items()\n }\n replacer = pybamm.SymbolReplacer(symbol_replacement_map)\n new_model = replacer.process_model(model, inplace=False)\n\n # Update the algebraic equation and initial conditions for\n # FunctionControl\n # This creates an algebraic equation for the current to allow\n # current, voltage, or power control, together with the appropriate\n # guess for the initial condition.\n # External circuit submodels are always equations on the current\n # The external circuit function should fix either the current, or\n # the voltage, or a combination (e.g. I*V for power control)\n i_cell = new_model.variables[\"Total current density\"]\n new_model.initial_conditions[\n i_cell\n ] = new_model.param.current_with_time\n\n # add current events to the model\n # current events both negative and positive to catch specification\n new_model.events.extend(\n [\n pybamm.Event(\n \"Current cut-off (positive) [A] [experiment]\",\n new_model.variables[\"Current [A]\"]\n - abs(pybamm.InputParameter(\"Current cut-off [A]\")),\n ),\n pybamm.Event(\n \"Current cut-off (negative) [A] [experiment]\",\n new_model.variables[\"Current [A]\"]\n + abs(pybamm.InputParameter(\"Current cut-off [A]\")),\n ),\n ]\n )\n if op_inputs[\"Voltage switch\"] == 1:\n new_model.algebraic[i_cell] = constant_voltage(\n new_model.variables,\n pybamm.Parameter(\"Voltage function [V]\"),\n )\n elif op_inputs[\"Power switch\"] == 1:\n new_model.algebraic[i_cell] = constant_power(\n new_model.variables,\n pybamm.Parameter(\"Power function [W]\"),\n )\n\n # add voltage events to the model\n if op_inputs[\"Power switch\"] == 1 or op_inputs[\"Current switch\"] == 1:\n new_model.events.append(\n pybamm.Event(\n \"Voltage cut-off [V] [experiment]\",\n new_model.variables[\"Terminal voltage [V]\"]\n - op_inputs[\"Voltage cut-off [V]\"] / model.param.n_cells,\n )\n )\n\n # Remove upper and lower voltage cut-offs that are *not* part of the\n # experiment\n new_model.events = [\n event\n for event in new_model.events\n if event.name not in [\"Minimum voltage\", \"Maximum voltage\"]\n ]\n# Make Interpolant Here\n print(\"OK GEE\")\n # Update parameter values\n new_parameter_values = self.parameter_values.copy()\n if op_inputs[\"Current switch\"] == 1:\n new_parameter_values.update(\n {\"Current function [A]\": op_inputs[\"Current input [A]\"]}\n )\n elif op_inputs[\"Voltage switch\"] == 1:\n new_parameter_values.update(\n {\"Voltage function [V]\": op_inputs[\"Voltage input [V]\"]},\n check_already_exists=False,\n )\n elif op_inputs[\"Power switch\"] == 1:\n new_parameter_values.update(\n {\"Power function [W]\": op_inputs[\"Power input [W]\"]},\n check_already_exists=False,\n )\n \n # print(new_parameter_values) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!)\n\n self.op_conds_to_model_and_param[op_cond[-1]] = (\n new_model,\n new_parameter_values,\n )\n self.model = model\n\n def set_parameters(self):\n \"\"\"\n A method to set the parameters in the model and the associated geometry.\n \"\"\"\n\n if self.model_with_set_params:\n return None\n\n if self._parameter_values._dict_items == {}:\n # Don't process if parameter values is empty\n self._model_with_set_params = self._unprocessed_model\n else:\n self._model_with_set_params = self._parameter_values.process_model(\n self._unprocessed_model, inplace=False\n )\n self._parameter_values.process_geometry(self._geometry)\n self.model = self._model_with_set_params\n\n def build(self, check_model=True):\n \"\"\"\n A method to build the model into a system of matrices and vectors suitable for\n performing numerical computations. If the model has already been built or\n solved then this function will have no effect.\n This method will automatically set the parameters\n if they have not already been set.\n\n Parameters\n ----------\n check_model : bool, optional\n If True, model checks are performed after discretisation (see\n :meth:`pybamm.Discretisation.process_model`). Default is True.\n \"\"\"\n\n if self.built_model:\n return None\n elif self.model.is_discretised:\n self._model_with_set_params = self.model\n self._built_model = self.model\n else:\n self.set_parameters()\n self._mesh = pybamm.Mesh(self._geometry, self._submesh_types, self._var_pts)\n self._disc = pybamm.Discretisation(self._mesh, self._spatial_methods)\n self._built_model = self._disc.process_model(\n self._model_with_set_params, inplace=False, check_model=check_model\n )\n\n def build_for_experiment(self, check_model=True):\n \"\"\"\n Similar to :meth:`Simulation.build`, but for the case of simulating an\n experiment, where there may be several models to build\n \"\"\"\n if self.op_conds_to_built_models:\n return None\n else:\n # Can process geometry with default parameter values (only electrical\n # parameters change between parameter values)\n self._parameter_values.process_geometry(self._geometry)\n # Only needs to set up mesh and discretisation once\n self._mesh = pybamm.Mesh(self._geometry, self._submesh_types, self._var_pts)\n self._disc = pybamm.Discretisation(self._mesh, self._spatial_methods)\n # Process all the different models\n self.op_conds_to_built_models = {}\n processed_models = {}\n for op_cond, (\n unbuilt_model,\n parameter_values,\n ) in self.op_conds_to_model_and_param.items():\n if unbuilt_model in processed_models:\n built_model = processed_models[unbuilt_model]\n else:\n # It's ok to modify the models in-place as they are not accessible\n # from outside the simulation\n model_with_set_params = parameter_values.process_model(\n unbuilt_model, inplace=True\n )\n built_model = self._disc.process_model(\n model_with_set_params, inplace=True, check_model=check_model\n )\n processed_models[unbuilt_model] = built_model\n\n self.op_conds_to_built_models[op_cond] = built_model\n\n def solve(\n self,\n t_eval=None,\n solver=None,\n check_model=True,\n save_at_cycles=None,\n starting_solution=None,\n **kwargs,\n ):\n \"\"\"\n A method to solve the model. This method will automatically build\n and set the model parameters if not already done so.\n\n Parameters\n ----------\n t_eval : numeric type, optional\n The times (in seconds) at which to compute the solution. Can be\n provided as an array of times at which to return the solution, or as a\n list `[t0, tf]` where `t0` is the initial time and `tf` is the final time.\n If provided as a list the solution is returned at 100 points within the\n interval `[t0, tf]`.\n\n If not using an experiment or running a drive cycle simulation (current\n provided as data) `t_eval` *must* be provided.\n\n If running an experiment the values in `t_eval` are ignored, and the\n solution times are specified by the experiment.\n\n If None and the parameter \"Current function [A]\" is read from data\n (i.e. drive cycle simulation) the model will be solved at the times\n provided in the data.\n solver : :class:`pybamm.BaseSolver`, optional\n The solver to use to solve the model. If None, Simulation.solver is used\n check_model : bool, optional\n If True, model checks are performed after discretisation (see\n :meth:`pybamm.Discretisation.process_model`). Default is True.\n save_at_cycles : int or list of ints, optional\n Which cycles to save the full sub-solutions for. If None, all cycles are\n saved. If int, every multiple of save_at_cycles is saved. If list, every\n cycle in the list is saved.\n starting_solution : :class:`pybamm.Solution`\n The solution to start stepping from. If None (default), then self._solution\n is used. Must be None if not using an experiment.\n **kwargs\n Additional key-word arguments passed to `solver.solve`.\n See :meth:`pybamm.BaseSolver.solve`.\n \"\"\"\n # Setup\n if solver is None:\n solver = self.solver\n\n if self.operating_mode in [\"without experiment\", \"drive cycle\"]:\n self.build(check_model=check_model)\n if save_at_cycles is not None:\n raise ValueError(\n \"'save_at_cycles' option can only be used if simulating an \"\n \"Experiment \"\n )\n if starting_solution is not None:\n raise ValueError(\n \"starting_solution can only be provided if simulating an Experiment\"\n )\n if self.operating_mode == \"without experiment\":\n if t_eval is None:\n raise pybamm.SolverError(\n \"'t_eval' must be provided if not using an experiment or \"\n \"simulating a drive cycle. 't_eval' can be provided as an \"\n \"array of times at which to return the solution, or as a \"\n \"list [t0, tf] where t0 is the initial time and tf is the \"\n \"final time. \"\n \"For a constant current (dis)charge the suggested 't_eval' \"\n \"is [0, 3700/C] where C is the C-rate. \"\n \"For example, run\\n\\n\"\n \"\\tsim.solve([0, 3700])\\n\\n\"\n \"for a 1C discharge.\"\n )\n\n elif self.operating_mode == \"drive cycle\":\n # For drive cycles (current provided as data) we perform additional\n # tests on t_eval (if provided) to ensure the returned solution\n # captures the input.\n time_data = self._parameter_values[\"Current function [A]\"].x[0]\n # If no t_eval is provided, we use the times provided in the data.\n if t_eval is None:\n pybamm.logger.info(\"Setting t_eval as specified by the data\")\n t_eval = time_data\n # If t_eval is provided we first check if it contains all of the\n # times in the data to within 10-12. If it doesn't, we then check\n # that the largest gap in t_eval is smaller than the smallest gap in\n # the time data (to ensure the resolution of t_eval is fine enough).\n # We only raise a warning here as users may genuinely only want\n # the solution returned at some specified points.\n elif (\n set(np.round(time_data, 12)).issubset(set(np.round(t_eval, 12)))\n ) is False:\n warnings.warn(\n \"\"\"\n t_eval does not contain all of the time points in the data\n set. Note: passing t_eval = None automatically sets t_eval\n to be the points in the data.\n \"\"\",\n pybamm.SolverWarning,\n )\n dt_data_min = np.min(np.diff(time_data))\n dt_eval_max = np.max(np.diff(t_eval))\n if dt_eval_max > dt_data_min + sys.float_info.epsilon:\n warnings.warn(\n \"\"\"\n The largest timestep in t_eval ({}) is larger than\n the smallest timestep in the data ({}). The returned\n solution may not have the correct resolution to accurately\n capture the input. Try refining t_eval. Alternatively,\n passing t_eval = None automatically sets t_eval to be the\n points in the data.\n \"\"\".format(\n dt_eval_max, dt_data_min\n ),\n pybamm.SolverWarning,\n )\n\n self._solution = solver.solve(self.built_model, t_eval, **kwargs)\n\n elif self.operating_mode == \"with experiment\":\n self.build_for_experiment(check_model=check_model)\n if t_eval is not None:\n pybamm.logger.warning(\n \"Ignoring t_eval as solution times are specified by the experiment\"\n )\n # Re-initialize solution, e.g. for solving multiple times with different\n # inputs without having to build the simulation again\n self._solution = starting_solution\n # Step through all experimental conditions\n inputs = kwargs.get(\"inputs\", {})\n pybamm.logger.info(\"Start running experiment\")\n timer = pybamm.Timer()\n\n if starting_solution is None:\n starting_solution_cycles = []\n else:\n starting_solution_cycles = starting_solution.cycles.copy()\n\n cycle_offset = len(starting_solution_cycles)\n all_cycle_solutions = starting_solution_cycles\n current_solution = starting_solution\n\n idx = 0\n num_cycles = len(self.experiment.cycle_lengths)\n feasible = True # simulation will stop if experiment is infeasible\n for cycle_num, cycle_length in enumerate(\n self.experiment.cycle_lengths, start=1\n ):\n pybamm.logger.notice(\n f\"Cycle {cycle_num+cycle_offset}/{num_cycles+cycle_offset} \"\n f\"({timer.time()} elapsed) \" + \"-\" * 20\n )\n steps = []\n cycle_solution = None\n\n for step_num in range(1, cycle_length + 1):\n exp_inputs = self._experiment_inputs[idx]\n dt = self._experiment_times[idx]\n op_conds_str = self.experiment.operating_conditions_strings[idx]\n op_conds_elec = self.experiment.operating_conditions[idx][:2]\n model = self.op_conds_to_built_models[op_conds_str]\n # Use 1-indexing for printing cycle number as it is more\n # human-intuitive\n pybamm.logger.notice(\n f\"Cycle {cycle_num+cycle_offset}/{num_cycles+cycle_offset}, \"\n f\"step {step_num}/{cycle_length}: {op_conds_str}\"\n )\n inputs.update(exp_inputs)\n kwargs[\"inputs\"] = inputs\n # Make sure we take at least 2 timesteps\n npts = max(int(round(dt / exp_inputs[\"period\"])) + 1, 2)\n step_solution = solver.step(\n current_solution,\n model,\n dt,\n npts=npts,\n save=False,\n **kwargs,\n )\n steps.append(step_solution)\n current_solution = step_solution\n cycle_solution = cycle_solution + step_solution\n # Only allow events specified by experiment\n if not (\n cycle_solution is None\n or cycle_solution.termination == \"final time\"\n or \"[experiment]\" in cycle_solution.termination\n ):\n feasible = False\n break\n\n # Increment index for next iteration\n idx += 1\n\n # Break if the experiment is infeasible\n if feasible is False:\n pybamm.logger.warning(\n \"\\n\\n\\tExperiment is infeasible: '{}' \".format(\n cycle_solution.termination\n )\n + \"was triggered during '{}'. \".format(\n self.experiment.operating_conditions_strings[idx]\n )\n + \"The returned solution only contains the first \"\n \"{} cycles. \".format(cycle_num - 1 + cycle_offset)\n + \"Try reducing the current, shortening the time interval, \"\n \"or reducing the period.\\n\\n\"\n )\n break\n\n # At the final step of the inner loop we save the cycle\n self._solution = self.solution + cycle_solution\n cycle_solution.steps = steps\n all_cycle_solutions.append(cycle_solution)\n\n if self.solution is not None:\n self.solution.cycles = all_cycle_solutions\n\n pybamm.logger.notice(\n \"Finish experiment simulation, took {}\".format(timer.time())\n )\n\n return self.solution\n\n def step(\n self, dt, solver=None, npts=2, save=True, starting_solution=None, **kwargs\n ):\n \"\"\"\n A method to step the model forward one timestep. This method will\n automatically build and set the model parameters if not already done so.\n\n Parameters\n ----------\n dt : numeric type\n The timestep over which to step the solution\n solver : :class:`pybamm.BaseSolver`\n The solver to use to solve the model.\n npts : int, optional\n The number of points at which the solution will be returned during\n the step dt. Default is 2 (returns the solution at t0 and t0 + dt).\n save : bool\n Turn on to store the solution of all previous timesteps\n starting_solution : :class:`pybamm.Solution`\n The solution to start stepping from. If None (default), then self._solution\n is used\n **kwargs\n Additional key-word arguments passed to `solver.solve`.\n See :meth:`pybamm.BaseSolver.step`.\n \"\"\"\n if self.operating_mode in [\"without experiment\", \"drive cycle\"]:\n self.build()\n\n if solver is None:\n solver = self.solver\n\n if starting_solution is None:\n starting_solution = self._solution\n\n self._solution = solver.step(\n starting_solution, self.built_model, dt, npts=npts, save=save, **kwargs\n )\n\n return self.solution\n\n def plot(self, output_variables=None, quick_plot_vars=None, **kwargs):\n \"\"\"\n A method to quickly plot the outputs of the simulation. Creates a\n :class:`pybamm.QuickPlot` object (with keyword arguments 'kwargs') and\n then calls :meth:`pybamm.QuickPlot.dynamic_plot`.\n\n Parameters\n ----------\n output_variables: list, optional\n A list of the variables to plot.\n quick_plot_vars: list, optional\n A list of the variables to plot. Deprecated, use output_variables instead.\n **kwargs\n Additional keyword arguments passed to\n :meth:`pybamm.QuickPlot.dynamic_plot`.\n For a list of all possible keyword arguments see :class:`pybamm.QuickPlot`.\n \"\"\"\n\n if quick_plot_vars is not None:\n raise NotImplementedError(\n \"'quick_plot_vars' has been deprecated. Use 'output_variables' instead.\"\n )\n\n if self._solution is None:\n raise ValueError(\n \"Model has not been solved, please solve the model before plotting.\"\n )\n\n if output_variables is None:\n output_variables = self.output_variables\n\n self.quick_plot = pybamm.dynamic_plot(\n self._solution, output_variables=output_variables, **kwargs\n )\n\n return self.quick_plot\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, model):\n self._model = copy.copy(model)\n self._model_class = model.__class__\n\n @property\n def model_with_set_params(self):\n return self._model_with_set_params\n\n @property\n def built_model(self):\n return self._built_model\n\n @property\n def geometry(self):\n return self._geometry\n\n @geometry.setter\n def geometry(self, geometry):\n self._geometry = geometry.copy()\n\n @property\n def parameter_values(self):\n return self._parameter_values\n\n @parameter_values.setter\n def parameter_values(self, parameter_values):\n self._parameter_values = parameter_values.copy()\n\n @property\n def submesh_types(self):\n return self._submesh_types\n\n @submesh_types.setter\n def submesh_types(self, submesh_types):\n self._submesh_types = submesh_types.copy()\n\n @property\n def mesh(self):\n return self._mesh\n\n @property\n def var_pts(self):\n return self._var_pts\n\n @var_pts.setter\n def var_pts(self, var_pts):\n self._var_pts = var_pts.copy()\n\n @property\n def spatial_methods(self):\n return self._spatial_methods\n\n @spatial_methods.setter\n def spatial_methods(self, spatial_methods):\n self._spatial_methods = spatial_methods.copy()\n\n @property\n def solver(self):\n return self._solver\n\n @solver.setter\n def solver(self, solver):\n self._solver = solver.copy()\n\n @property\n def output_variables(self):\n return self._output_variables\n\n @output_variables.setter\n def output_variables(self, output_variables):\n self._output_variables = copy.copy(output_variables)\n\n @property\n def solution(self):\n return self._solution\n\n def specs(\n self,\n geometry=None,\n parameter_values=None,\n submesh_types=None,\n var_pts=None,\n spatial_methods=None,\n solver=None,\n output_variables=None,\n C_rate=None,\n ):\n \"Deprecated method for setting specs\"\n raise NotImplementedError(\n \"The 'specs' method has been deprecated. \"\n \"Create a new simulation for each different case instead.\"\n )\n\n def save(self, filename):\n \"\"\"Save simulation using pickle\"\"\"\n if self.model.convert_to_format == \"python\":\n # We currently cannot save models in the 'python' format\n raise NotImplementedError(\n \"\"\"\n Cannot save simulation if model format is python.\n Set model.convert_to_format = 'casadi' instead.\n \"\"\"\n )\n # Clear solver problem (not pickle-able, will automatically be recomputed)\n if (\n isinstance(self._solver, pybamm.CasadiSolver)\n and self._solver.integrator_specs != {}\n ):\n self._solver.integrator_specs = {}\n if self.solution is not None:\n self.solution.clear_casadi_attributes()\n with open(filename, \"wb\") as f:\n pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_sim(filename):\n \"\"\"Load a saved simulation\"\"\"\n return pybamm.load(filename)\n" ]
[ [ "numpy.round", "numpy.diff" ] ]
tkoyama010/pyvista
[ "edbd004db13c2e771b8bd0553565febfafc8f7c7" ]
[ "pyvista/demos/logo.py" ]
[ "\"\"\"Generate the pyvista logo.\n\nLogos generated with:\nplot_logo(screenshot='pyvista_logo.png', window_size=(1920, 1080))\nplot_logo(screenshot='pyvista_logo_sm.png', window_size=(960, 400), off_screen=True)\n\n# different camera angle for square plot\ncpos = [(-0.3654543687422538, 1.1098808905156292, 9.073223697728247),\n (2.553950615449191, 0.34145688392081264, 0.06127122762851659),\n (0.019308531920309947, 0.996708840795678, -0.07873161547192065)]\n\nplot_logo(screenshot='pyvista_logo_sm_sq.png', window_size=(960, 960), cpos=cpos,\n off_screen=True)\n\n\"\"\"\nimport os\nfrom pyvista import examples\nimport pyvista\nfrom pyvista import _vtk\n\nimport numpy as np\n\nTHIS_PATH = os.path.dirname(os.path.realpath(__file__))\n\nLOGO_TITLE = 'PyVista'\n\n\ndef atomize(grid, shift_fac=0.1, scale=0.9):\n \"\"\"Break apart and shrink and/or scale the individual cells of a mesh.\"\"\"\n cent = grid.center\n cells = []\n for i in range(grid.n_cells):\n cell = grid.extract_cells(i)\n ccent = np.array(cell.center)\n cell.points[:] = (cell.points - ccent)*scale + ccent\n cell.points += (ccent - np.array(cent))*shift_fac\n cells.append(cell)\n\n return cells[0].merge(cells[1:])\n\n\ndef text_3d(string, depth=0.5):\n \"\"\"Create 3D text.\"\"\"\n vec_text = _vtk.vtkVectorText()\n vec_text.SetText(string)\n\n extrude = _vtk.vtkLinearExtrusionFilter()\n extrude.SetInputConnection(vec_text.GetOutputPort())\n extrude.SetExtrusionTypeToNormalExtrusion()\n extrude.SetVector(0, 0, 1)\n extrude.SetScaleFactor(depth)\n\n tri_filter = _vtk.vtkTriangleFilter()\n tri_filter.SetInputConnection(extrude.GetOutputPort())\n tri_filter.Update()\n return pyvista.wrap(tri_filter.GetOutput())\n\n\ndef logo_letters(merge=False, depth=0.3):\n \"\"\"Generate a mesh for each letter in \"PyVista\".\"\"\"\n if merge:\n mesh_letters = pyvista.PolyData()\n else:\n mesh_letters = {}\n\n # spacing between letters\n space_factor = 0.9\n width = 0\n for letter in LOGO_TITLE:\n mesh_letter = text_3d(letter, depth=depth)\n this_letter_width = mesh_letter.points[:, 0].max()\n mesh_letter.translate([width*space_factor, 0, 0.0])\n width += this_letter_width\n if merge:\n mesh_letters += mesh_letter\n else:\n mesh_letters[letter] = mesh_letter\n\n return mesh_letters\n\n\ndef logo_voxel(density=0.03):\n \"\"\"Create a voxelized PyVista logo.\"\"\"\n return pyvista.voxelize(text_3d(LOGO_TITLE, depth=0.3), density)\n\n\ndef logo_basic():\n \"\"\"Create a basic pyvista logo.\n\n Examples\n --------\n Plot the basic pyvista logo.\n\n >>> from pyvista import demos\n >>> logo = demos.logo_basic()\n >>> logo.plot(smooth_shading=True) # doctest:+SKIP\n\n Add scalars and plot the logo.\n\n >>> logo['x_coord'] = logo.points[:, 0]\n >>> logo.plot(scalars='x_coord', cmap='Spectral',\n ... smooth_shading=True, cpos='xy') # doctest:+SKIP\n\n \"\"\"\n return logo_letters(merge=True).compute_normals(split_vertices=True)\n\n\ndef plot_logo(window_size=None, off_screen=None, screenshot=None, cpos=None, **kwargs):\n \"\"\"Plot the stylized PyVista logo.\n\n Examples\n --------\n >>> from pyvista import demos\n >>> demos.plot_logo() # doctest:+SKIP\n\n \"\"\"\n # initialize plotter\n if window_size is None:\n window_size = [960, 400]\n plotter = pyvista.Plotter(window_size=window_size, off_screen=off_screen)\n\n mesh_letters = logo_letters()\n\n # letter 'P'\n p_mesh = mesh_letters['P'].compute_normals(split_vertices=True)\n plotter.add_mesh(p_mesh, color='#376fa0', smooth_shading=True)\n\n # letter 'y'\n p_mesh = mesh_letters['y'].compute_normals(split_vertices=True)\n plotter.add_mesh(p_mesh, color='#ffd040', smooth_shading=True)\n\n # letter 'V'\n v_grid = pyvista.voxelize(mesh_letters['V'], density=0.08)\n v_grid_atom = atomize(v_grid)\n v_grid_atom['scalars'] = v_grid_atom.points[:, 0]\n plotter.add_mesh(v_grid_atom, scalars='scalars', show_edges=True,\n cmap='winter', show_scalar_bar=False)\n\n # letter 'i'\n i_grid = pyvista.voxelize(mesh_letters['i'], density=0.1)\n\n plotter.add_mesh(i_grid.extract_surface(),\n style='points', color='r',\n render_points_as_spheres=True, point_size=8)\n plotter.add_mesh(i_grid, style='wireframe', color='k', line_width=4)\n\n # letter 's'\n mesh = mesh_letters['s']\n scalars = mesh.points[:, 0]\n plotter.add_mesh(mesh, scalars=scalars, style='wireframe', color='w',\n show_edges=True, line_width=2, cmap='gist_heat',\n backface_culling=True, render_lines_as_tubes=True)\n\n # letter 't'\n mesh = mesh_letters['t']\n scalars = mesh.points[:, 0]\n plotter.add_mesh(mesh, scalars=scalars, show_edges=True,\n cmap='autumn', lighting=True)\n\n # letter 'a'\n grid = examples.download_letter_a()\n grid.points[:, 0] += (mesh_letters['a'].center[0] - grid.center[0])\n\n # select some cells from grid\n cells = grid.cells.reshape(-1, 5)\n mask = grid.points[cells[:, 1:], 2] < 0.2\n mask = mask.all(1)\n\n a_part = grid.extract_cells(mask)\n\n cells = a_part.cells.reshape(-1, 5)\n scalars = grid.points[cells[:, 1], 1]\n plotter.add_mesh(a_part, scalars=scalars, show_edges=True, cmap='Greens')\n\n # finalize plot and show it\n plotter.set_background(kwargs.pop('background', 'white'))\n if cpos is None:\n cpos = [(0.9060226106040606, 0.7752122028710583, 5.148283455883558),\n (2.553950615449191, 0.34145688392081264, 0.06127122762851659),\n (0.019308531920309943, 0.9967088407956779, -0.07873161547192063)]\n plotter.camera_position = cpos\n\n plotter.remove_scalar_bar()\n if screenshot: # pragma: no cover\n plotter.show(cpos=cpos, auto_close=False)\n plotter.screenshot(screenshot, True)\n cpos_final = plotter.camera_position\n plotter.close()\n return cpos_final\n else:\n return plotter.show(cpos=cpos, **kwargs)\n\n\ndef logo_atomized(density=0.05, scale=0.6, depth=0.05):\n \"\"\"Generate a voxelized pyvista logo with intra-cell spacing.\"\"\"\n mesh_letters = logo_letters(depth=depth)\n grids = []\n for letter in mesh_letters.values():\n grid = pyvista.voxelize(letter, density=density)\n grids.append(atomize(grid, scale=scale))\n\n return grids[0].merge(grids[1:])\n\n\ndef _for_landing_page(jupyter_backend='ipygany', **kwargs):\n \"\"\"Plot the stylized PyVista logo for ipygany.\n\n To be shown on the landing page at index.rst\n\n \"\"\"\n mesh_letters = logo_letters()\n\n # letter 'P'\n p_mesh = mesh_letters['P'].compute_normals(split_vertices=True)\n\n # letter 'y'\n y_mesh = mesh_letters['y'].compute_normals(split_vertices=True)\n\n # letter 'V'\n v_grid = pyvista.voxelize(mesh_letters['V'], density=0.08)\n v_grid_atom = atomize(v_grid)\n v_grid_atom['scalars'] = v_grid_atom.points[:, 0]\n\n i_grid = pyvista.voxelize(mesh_letters['i'], density=0.1)\n i_mesh = i_grid.extract_surface().triangulate().subdivide(2)\n i_mesh = i_mesh.smooth(500)\n old_center = np.array(i_mesh.center)\n i_mesh.points *= 1.07\n i_mesh.points += old_center - np.array(i_mesh.center)\n\n # letter 's'\n s_vox = pyvista.voxelize(mesh_letters['s'], density=0.04)\n s_cent = s_vox.cell_centers()\n pd = pyvista.PolyData(s_cent.points)\n\n sphere = pyvista.Sphere(theta_resolution=9, phi_resolution=9)\n s_grid = pd.glyph(factor=0.04, geom=sphere)\n\n # letter 't'\n # t_mesh = mesh_letters['t'].subdivide(5)\n # t_mesh.flip_normals()\n # t_mesh = t_mesh.compute_normals(consistent_normals=True)\n # import pyacvd\n # clus = pyacvd.Clustering(t_mesh)\n # clus.cluster(140)\n # t_cmesh = clus.create_mesh()\n\n # import _ as fe\n # src = fe.Surface(t_cmesh)\n # tgt = fe.Surface(t_mesh)\n # src.morph(tgt, settings={'local_with_centroid': True, 'local_steps': 300})\n # src.morph(tgt, settings={'local_with_centroid': True, 'local_steps': 300})\n\n # t_mesh = t_cmesh.extract_all_edges().tube(radius=0.005, n_sides=4)\n # t_mesh.extract_surface().save(...)\n\n t_mesh_filename = os.path.join(THIS_PATH, 't_mesh.ply')\n t_mesh = pyvista.read(t_mesh_filename)\n\n # letter 'a'\n grid = examples.download_letter_a()\n grid.points[:, 0] += (mesh_letters['a'].center[0] - grid.center[0])\n\n # select some cells from grid\n cells = grid.cells.reshape(-1, 5)\n mask = grid.points[cells[:, 1:], 2] < 0.2\n mask = mask.all(1)\n\n a_part = grid.extract_cells(mask)\n\n plotter = pyvista.Plotter()\n plotter.add_mesh(p_mesh, color='#376fa0')\n plotter.add_mesh(y_mesh, color='#ffd040')\n vista = v_grid_atom.merge([i_mesh, s_grid, t_mesh, a_part])\n vista['xdist'] = vista.points[:, 0]\n plotter.add_mesh(vista, cmap='viridis')\n\n # cpos = None\n # cpos = [(-0.9785294154224577, 1.2712499319005408, 10.965733716449193),\n # (2.553950615449191, 0.34145688392081264, 0.06127122762851659),\n # (0.019308531920309947, 0.996708840795678, -0.07873161547192065)]\n\n # cpos = [(0.9060226106040606, 0.7752122028710583, 5.148283455883558),\n # (2.553950615449191, 0.34145688392081264, 0.06127122762851659),\n # (0.019308531920309943, 0.9967088407956779, -0.07873161547192063)]\n\n # cpos = [(0.6861237002108157, 0.7572283207509382, 5.078581054505883),\n # (2.334051705055946, 0.3234730018006926, -0.008431173749159387),\n # (0.019308531920309947, 0.996708840795678, -0.07873161547192065)]\n\n if jupyter_backend == 'ipygany':\n x = 2.7\n cpos = [(x, 0.306, 5),\n (x, 0.306, 0.15),\n (0.0, 1.0, 0.0)]\n\n text = text_3d(\"I'm interactive!\", depth=0.1)\n text.points *= 0.15\n text.translate([4, -0.4, 0])\n\n plotter.add_mesh(text, color='black')\n\n else:\n cpos = [(0.9060226106040606, 0.7752122028710583, 5.148283455883558),\n (2.553950615449191, 0.34145688392081264, 0.06127122762851659),\n (0.019308531920309943, 0.9967088407956779, -0.07873161547192063)]\n\n plotter.background_color = 'white'\n plotter.remove_scalar_bar()\n return plotter.show(cpos=cpos, jupyter_backend=jupyter_backend,\n jupyter_kwargs=kwargs)\n\n\n# _for_landing_page()\n" ]
[ [ "numpy.array" ] ]
materialsinnovation/pydem
[ "966a9e2467469f285b16d81aac47d82e75a4927c" ]
[ "pydem.py" ]
[ "import numpy as np\nimport time\nfrom scipy.spatial import ConvexHull\nfrom scipy.spatial import Delaunay\nimport sympy\ntry:\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from matplotlib.text import Text\n enable_plotting = True\nexcept ImportError:\n enable_plotting = False\nimport numdifftools as nd \nimport sys\nimport itertools\nimport scipy.interpolate as si\nfrom matplotlib.colors import LinearSegmentedColormap\nimport collections\n\nhdemi_thresh = 1\nhdemi_tol = 0.005\nht_p = hdemi_thresh + hdemi_tol\nht_m = hdemi_thresh - hdemi_tol\ncdict = {'red': [(0.0, 0.0, 0.0),\n (1.0, 157/255.0, 157/255.0)],\n\n 'green': [(0.0, 0.0, 40/255.0),\n (1.0, 123/255.0, 123/255.0)],\n\n 'blue': [(0.0, 0.0, 82/255.0),\n (1.0, 72/255.0, 72/255.0)]}\ncustom_cmap = LinearSegmentedColormap('BlueGold', cdict)\npad_dist = 13\nf_size = 16\nbase_fig_size = (6,4)\nfig_dpi = 300\n\nclass Boundary(object):\n\n def __init__(self, *args, **kwargs):\n self.holes_enabled = False\n self.ignore_bounds = None\n \n def set_ignore_bounds(self, bounds):\n \"\"\"\n Allow user to set arbitrarily large distances in different directions\n\n Boundary searches will not occur in directions with non-zero values\n Shape is (2, n_dimensions). First row is for searches decreasing in the ith dimension for [0,i-1].\n\n Parameters\n ----------\n bounds : array_like\n values to return in each ignored direction (for non-zero values)\n\n Returns\n -------\n\n \"\"\"\n bounds = np.array(bounds)\n self.ignore_bounds = bounds\n \n def set_holes_enabled_value(self, holes_enabled):\n \"\"\"\n Set boolean value for holes enabled in search.\n\n Enabling this feature will check all simplices along seach path to ensure none are missing. Significantly slows code.\n\n Parameters\n ----------\n holes_enabled : bool\n If True assume holes exist within the feasible space\n\n Returns\n -------\n\n \"\"\"\n self.holes_enabled = holes_enabled\n\n def is_inner(self, points):\n \"\"\"\n\n Parameters\n ----------\n points : array_like\n List of points in n-space to check for location within this Boundary\n\n Returns\n -------\n inner_values : array\n List of values for each input point. True for those interior of Boundary, False otherwise\n\n \"\"\"\n raise NotImplementedError()\n return [False]*len(points)\n \n def bound_dist(self, point, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n point : array_like\n Single point in the n-space to compute distances to Boundary.\n kwargs\n\n Returns\n -------\n dists : ndarray\n (2,n) array of distances. [0,i] is decreasing in i+1th dimension. [1,i] is decreasing in i+1th dimension\n\n \"\"\"\n raise NotImplementedError()\n dists = np.zeros((2,point.shape[1])) - 1\n return dists\n\n def extents(self):\n \"\"\"\n\n Returns\n -------\n extents : ndarray\n (2,n) array of the minimum and maximum extents of this Boundary in each of the n dimensions\n\n \"\"\"\n raise NotImplementedError()\n\n def points(self):\n \"\"\"\n\n Returns\n -------\n points : ndarray\n (m,n) array of m points associated with this Boundary which at the very least will enclose the convex hull.\n \"\"\"\n raise NotImplementedError()\n\n def find_boundary(self, path):\n \"\"\"\n Use bisection method to determine a point between two endpoints which lies on the boundary.\n Number of bisection iterations is currently fixed at 10.\n\n Parameters\n ----------\n path : array_like\n Two (or more) points which are the start and end points of the path to search along. Shape (n_points, n_dims)\n\n Returns\n -------\n bound_point : approximate location of boundary. If no intermediate boundary, returns the starting location\n\n \"\"\"\n k_max = 10\n inners = self.is_inner(path)\n outsides = np.where(np.asarray(inners) == False)\n if len(outsides) < 1:\n print(\"No path points are outside the boundary, no intermediate boundary\")\n return np.atleast_2d(path[0])\n if len(outsides) == len(path):\n print(\"No path points are inside the boundary, no intermediate boundary\")\n return np.atleast_2d(path[0])\n if not inners[0]:\n path = path[::-1]\n inners = inners[::-1]\n outsides = np.where(np.asarray(inners) == False)\n if not inners[0]:\n print(\"No endpoint is inside the boundary, unsure of direction to proceed\")\n return np.atleast_2d(path[0])\n \n u_lower = 0.0\n # use linear interpolation along path to make a little easier\n tck, u = si.splprep(np.transpose(path[:outsides[0]+1]), k=min([path.shape[0]-1,3]))\n u_upper = 1.0\n \n # in binary search guarantee to be searching true->false for boundary\n u_m = (u_lower + u_upper)/2.0\n eval_point = np.atleast_2d(np.asarray(si.splev(u_m, tck)))\n k = 0\n total = 0\n while k < k_max:\n if \"simplex_neighbors\" in dir(self) and self.holes_enabled:\n iters = 0\n k_sub = 0\n temp_low = np.atleast_2d(np.asarray(si.splev(u_lower, tck)))\n while True:\n if iters >= k_max or self.simplex_neighbors(eval_point, temp_low):\n break\n u_m = (u_lower + u_m)/2.0\n eval_point = np.atleast_2d(np.asarray(si.splev(u_m, tck)))\n iters += 1\n k_sub = 1\n k -= k_sub\n if self.is_inner(eval_point)[0]:\n u_lower = u_m\n else:\n u_upper = u_m\n u_m = (u_lower + u_upper)/2.0\n eval_point = np.atleast_2d(np.asarray(si.splev(u_m, tck)))\n if total > 100:\n # print(path)\n # print(u_lower)\n # print(u_m)\n # print(u_upper)\n # print(\"I was looping a lot in find_boundary\")\n break\n k += 1\n total += 1\n return eval_point\n\n def invert_boundary(self, new_extents):\n \"\"\"\n Construct a new Boundary which is the inverse of the current Boundary, i.e. is_inner will be the inverse of previous evaluations.\n\n Parameters\n ----------\n new_extents : array_like\n (2, n_dims) defines the hyperrectangular region which the new Boundary object will be valid over.\n\n Returns\n -------\n inv_bound : ConcaveBoundary\n The union of this new Boundary and the input bound will contain the hyperrectangular region of new_extents\n\n \"\"\"\n new_points = self.points()\n temp = PrismaticBoundary(new_extents)\n new_points = np.concatenate((new_points, temp.points()), axis=0)\n inv_bound = ConcaveBoundary(new_points, np.zeros((0, new_points.shape[1])))\n simplex_centroids = inv_bound.simplex_centroids()\n inner_simplex = self.is_inner(simplex_centroids)\n exclude_simplices = np.arange(simplex_centroids.shape[0])\n exclude_simplices = exclude_simplices[inner_simplex]\n inv_bound.exclude_simplices(exclude_simplices)\n return inv_bound\n \n \nclass MultiBoundary(Boundary):\n \"\"\"\n Support all Boundary operations using logical operations on multiple Boundary objects.\n Currently supported operations are union, intersection, and difference.\n May be used recursively to define any number of regions.\n \"\"\"\n INTERSECT_TYPE = 1\n UNION_TYPE = 2\n DIFFERENCE_TYPE = 3\n \n def __init__(self, bound_1, bound_2, bool_type, *args, **kwargs):\n super(MultiBoundary, self).__init__(*args, **kwargs)\n self.bool_type = bool_type\n self.bound_1 = bound_1\n self.bound_2 = bound_2\n if self.bool_type == MultiBoundary.DIFFERENCE_TYPE:\n new_extents = self.extents()\n self.bound_2 = self.bound_2.invert_boundary(new_extents)\n\n def is_inner(self, points):\n inners_1 = self.bound_1.is_inner(points)\n inners_2 = self.bound_2.is_inner(points)\n if self.bool_type == MultiBoundary.INTERSECT_TYPE:\n return np.logical_and(inners_1, inners_2)\n elif self.bool_type == MultiBoundary.UNION_TYPE:\n return np.logical_or(inners_1, inners_2)\n elif self.bool_type == MultiBoundary.DIFFERENCE_TYPE:\n return np.logical_and(inners_1, inners_2)\n return [False]*len(points)\n\n def extents(self):\n ex_1 = self.bound_1.extents()\n ex_2 = self.bound_2.extents()\n mask = ex_1[1] < ex_2[1]\n ex_1[1][mask] = ex_2[1][mask]\n mask = ex_1[0] > ex_2[0]\n ex_1[0][mask] = ex_2[0][mask]\n return ex_1\n\n def points(self):\n return np.concatentate([self.bound_1.points(), self.bound_2.points()], axis=0)\n \n def bound_dist(self, point, dim=None, dire=None):\n dists_1 = self.bound_1.bound_dist(point, dim=dim, dire=dire)\n dists_2 = self.bound_2.bound_dist(point, dim=dim, dire=dire)\n \n if self.bool_type == MultiBoundary.INTERSECT_TYPE:\n mask = dists_1 > dists_2\n dists_1[mask] = dists_2[mask]\n elif self.bool_type == MultiBoundary.UNION_TYPE:\n n = point.shape[1]\n dists_1 = np.zeros(dists_1.shape)\n if dim is not None:\n space = [dim]\n else:\n space = range(n)\n if dire is not None:\n directions = [dire]\n else:\n directions = range(2)\n for d in space:\n for i in directions:\n new_point = np.copy(point)\n while True:\n if self.bound_1.is_inner(new_point):\n dists = self.bound_1.bound_dist(new_point, dim=d, dire=i)\n elif self.bound_2.is_inner(new_point):\n dists = self.bound_2.bound_dist(new_point, dim=d, dire=i)\n else:\n break\n dists = dists[i]\n if i == 0:\n dists = -dists\n new_point[0,d] += dists[d]\n dists_1[i, d] = np.max(np.abs(new_point-point))\n elif self.bool_type == MultiBoundary.DIFFERENCE_TYPE:\n mask = dists_1 > dists_2\n dists_1[mask] = dists_2[mask]\n \n return dists_1\n\n\nclass PrismaticBoundary(Boundary):\n \"\"\"\n Describe hyperrectangular regions in space for all Boundary functions.\n Is only defined between a minimum and maximum value along each dimension.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(PrismaticBoundary, self).__init__(*args, **kwargs)\n new_bound = np.array(args[0])\n if new_bound.shape[0] != 2:\n raise ValueError(\"Prismatic bounds must be array-like (2,dims) to store upper and lower bounds\")\n if np.any(new_bound[0] >= new_bound[1]):\n raise ValueError(\"Lower bound must be less than upper bound for all dimensions\")\n self.bound = new_bound\n\n def is_inner(self, points):\n if len(points.shape) == 1:\n points = np.reshape(points, (1,len(points)))\n if points.shape[1] != self.bound.shape[1] :\n raise ValueError(\"dimension of points (%d) must match dimension of bounds (%d)\" % (points.shape[1], self.bound.shape[1]))\n temp = np.logical_and(points > self.bound[0,:], points < self.bound[1,:])\n temp = np.all(temp, axis=1)\n return temp\n\n def extents(self):\n return np.copy(self.bound)\n\n def points(self):\n lims = self.extents()\n lims = np.transpose(lims)\n bound_vertex = np.meshgrid(*tuple(lims))\n bound_vertex = [np.atleast_2d(coords.flatten()) for coords in bound_vertex]\n bound_vertex = np.concatenate(bound_vertex, axis=0)\n bound_vertex = np.transpose(bound_vertex)\n return bound_vertex\n\n def bound_dist(self, point, **kwargs):\n dists = np.concatenate((point-self.bound[0,:], self.bound[1,:]-point), axis=0)\n return dists\n\n \nclass ConcaveBoundary(Boundary):\n \"\"\"\n Boundary object with support for several additional features.\n Uses Delaunay Triagnulation and to reduce the higher dimensional space into simplices.\n Each simplex may then be removed from the hull using exclude_simplices to better describe the underlying hypervolume.\n \"\"\"\n \n def __init__(self, feasible_points, boundary_points, *args, **kwargs):\n super(ConcaveBoundary, self).__init__(*args, **kwargs)\n feasible_points = np.copy(feasible_points)\n boundary_points = np.copy(boundary_points)\n bound = np.concatenate((feasible_points, boundary_points), axis=0)\n min_pct = np.min(np.max(bound, axis=0)-np.min(bound, axis=0))*.005\n # Qbb Qc Qz Qx \n bound = Delaunay(bound, qhull_options=\"QJ%f\" % min_pct)\n print(\"Finished Delaunay\")\n # bound = Delaunay(bound)\n self.bound = bound\n self.simplex_is_inner = []\n self.init_inner_list()\n self.exclude_points = None\n self.boundary_points = boundary_points\n self.feasible_points = feasible_points\n\n def extents(self):\n extents = np.zeros((2,self.feasible_points.shape[1]))\n extents[0] = np.min(self.bound.points, axis=0)\n extents[1] = np.max(self.bound.points, axis=0)\n return extents\n\n def points(self):\n return self.bound.points\n\n def init_inner_list(self):\n self.simplex_is_inner = np.ones(self.bound.simplices.shape[0] + 1, dtype=bool)\n self.simplex_is_inner[-1] = False\n \n def simplex_centroids(self):\n nodes = self.bound.points\n elements = self.bound.simplices\n centroids = np.average(nodes[elements], axis=1)\n return centroids\n \n def exclude_simplices(self, simplex_list):\n self.simplex_is_inner[simplex_list] = False\n # for s in simplex_list:\n # self.simplex_is_inner[s] = True\n \n def make_exclude_points(self, ex_points):\n if self.exclude_points is None:\n self.exclude_points = ex_points\n self.init_inner_list()\n else:\n self.exclude_points = np.concatenate((self.exclude_points, ex_points), axis=0)\n locs = self.bound.find_simplex(ex_points)\n self.simplex_is_inner[locs] = False\n\n def is_inner(self, points, valid_simplices=None):\n simplices = self.bound.find_simplex(points)\n inners = self.simplex_is_inner[simplices]\n return inners\n \n def projected_point_distance(self, output_range, nominal_output=None):\n if nominal_output is None:\n nominal_output = np.mean(output_range, axis=0)\n n = output_range.shape[1]\n dists = np.zeros((2,n))\n all_points = self.boundary_points\n simplex_points = self.bound.simplices[np.logical_not(self.simplex_is_inner[:-1])].flatten()\n simplex_points = np.unique(simplex_points)\n hole_bound_points = self.bound.points[simplex_points]\n all_points = np.concatenate((all_points, hole_bound_points), axis=0)\n for d in range(n):\n valid_band_points = np.ones(all_points.shape[0], dtype=bool)\n for d2 in range(n-1):\n d2 = (d + d2 + 1) % n\n valid_band_points = np.logical_and(valid_band_points, all_points[:,d2] >= output_range[0,d2])\n valid_band_points = np.logical_and(valid_band_points, all_points[:,d2] <= output_range[1,d2])\n band_points = all_points[valid_band_points]\n band_dim = band_points[:,d]\n temp_upper = band_dim[band_dim > nominal_output[d]]\n if not len(temp_upper):\n dists[1, d] = -1\n else:\n dists[1, d] = np.min(temp_upper) - nominal_output[d]\n temp_lower = band_dim[band_dim < nominal_output[d]]\n if not len(temp_lower):\n dists[0, d] = -1\n else:\n dists[0, d] = nominal_output[d] - np.max(temp_lower)\n \n if self.ignore_bounds is not None:\n dists[np.where(self.ignore_bounds)] = self.ignore_bounds[np.where(self.ignore_bounds)]\n \n return dists\n \n def simplex_neighbors(self, point_1, point_2):\n simp_1 = self.bound.find_simplex(point_1)\n simp_2 = self.bound.find_simplex(point_2)\n if simp_1 == simp_2:\n return True\n n_list = self.bound.neighbors\n if simp_1 != -1:\n n_1 = n_list[simp_1]\n else:\n n_1 = []\n if simp_2 != -1:\n n_2 = n_list[simp_2]\n else:\n n_2 = []\n are_neighbors = simp_1 in n_2 or simp_2 in n_1\n return are_neighbors\n \n def bound_dist(self, point, dim=None, dire=None):\n n = point.shape[1]\n extra = 0.01\n dim_min = np.min(self.bound.points, axis=0)\n dim_max = np.max(self.bound.points, axis=0)\n dim_range = dim_max - dim_min\n dim_max += dim_range*extra\n dim_min -= dim_range*extra\n dists = np.zeros((2,n))\n if not self.is_inner(point)[0]:\n dists[0,0] = -1\n return dists\n ignore_search = np.zeros((2,n), dtype=bool)\n if self.ignore_bounds is not None:\n ignore_search[np.where(self.ignore_bounds)] = True\n if dim is not None:\n space = [dim]\n else:\n space = range(n)\n if dire is not None:\n directions = [dire]\n else:\n directions = range(2)\n for d in space:\n d_new = [dim_min[d], dim_max[d]]\n for i in directions:\n if not ignore_search[i, d]:\n x_2 = np.copy(point)\n x_2[0,d] = d_new[i]\n temp_points = np.concatenate((point, x_2), axis=0)\n x_bound = self.find_boundary(temp_points)\n dists[i,d] = np.linalg.norm(point - x_bound)\n else:\n dists[i,d] = sys.maxint\n if self.ignore_bounds is not None:\n dists[np.where(self.ignore_bounds)] = self.ignore_bounds[np.where(self.ignore_bounds)]\n return np.abs(dists)\n \n def get_connected_volumes(self):\n neigh_list = np.copy(self.bound.neighbors)\n out_simplices = np.where(np.logical_not(self.simplex_is_inner[:-1]))[0]\n for s in out_simplices:\n neigh_list[s,:] = -1\n neigh_list[neigh_list==s] = -1\n print(len(neigh_list))\n trees = {}\n simplex_tree = [i for i in range(len(neigh_list))]\n for i in range(len(neigh_list)):\n trees[i] = {}\n trees[i][i] = True\n for i in range(len(neigh_list)):\n for j in range(len(neigh_list[i])):\n n = neigh_list[i,j]\n if n != -1 and n not in trees[simplex_tree[i]]:\n neigh_tree = trees[simplex_tree[n]].keys()\n for k in neigh_tree:\n temp_tree = trees[simplex_tree[k]]\n temp_tree.pop(k, None)\n if len(temp_tree) == 0:\n trees.pop(simplex_tree[k], None)\n simplex_tree[k] = simplex_tree[i]\n trees[simplex_tree[i]][k] = True\n\n for s in out_simplices:\n trees.pop(int(s), None)\n return trees\n \n def split_disconnected_volumes(self):\n \"\"\"\n\n :return:\n \"\"\"\n trees = self.get_connected_volumes()\n if len(trees) == 1:\n return [self]\n points = self.bound.points\n new_bounds = []\n print(\"*************************************************\")\n print(\"%d disconnected volumes detected\" % len(trees))\n for key in trees:\n simplices = trees[key].keys()\n temp_points = points[np.unique(self.bound.simplices[simplices])]\n print(temp_points.shape)\n print(\"Boundary contains %d points from the initial hull\" % len(temp_points))\n new_bound = ConcaveBoundary(temp_points, np.zeros((0,temp_points.shape[1])))\n centroids = new_bound.simplex_centroids()\n valid_centroids = self.is_inner(centroids, trees[key])\n invalid_simplices = np.where(np.logical_not(valid_centroids))[0]\n new_bound.exclude_simplices(invalid_simplices)\n new_bounds.append(new_bound)\n print(\"*************************************************\")\n return new_bounds\n\n def get_external_faces(self):\n ex_faces = np.zeros((0,self.bound.points.shape[1]))\n # can speed this up by vectorizing, but lazy for now\n neighbors = self.bound.neighbors\n simplices = self.bound.simplices\n for i in range(self.bound.simplices.shape[0]):\n inner_simplex = self.simplex_is_inner[i]\n temp_simplex = simplices[i]\n for j in range(neighbors.shape[1]):\n n = neighbors[i,j]\n already_explored = n < i and n > -1\n not_included = not self.simplex_is_inner[n]\n if inner_simplex and not already_explored and not_included:\n ex_faces = np.concatenate((ex_faces, [np.concatenate((temp_simplex[:j], temp_simplex[(j+1):]))]), axis=0)\n return ex_faces\n\nclass AnonymousFunction:\n def get_y(self, x):\n \"\"\"\n\n Parameters\n ----------\n x : array_like\n point (or list of points) to evaluate based on nominal input configurations\n\n Returns\n -------\n y : array_like\n evaluates objective function to perform mapping for the associated dimension in output space\n process must be repeated over multiple AnonymousFunction objects to accomplish full mapping\n\n \"\"\"\n raise NotImplementedError()\n return 0\n\n def get_dy(self, x, dx):\n \"\"\"\n\n Parameters\n ----------\n x : array_like\n Nominal input configuration to evaluate\n dx : array_like\n Symmetric variation along each axis based on uncertainty\n\n Returns\n -------\n dy : float\n Maximum potential variation based on the dot product of the absolute value of the gradient and the absolute value of the dx vector\n\n \"\"\"\n raise NotImplementedError()\n return 0\n \n \nclass NumericFunction(AnonymousFunction):\n\n \"\"\"\n This class is an implementation of AnonymousFunction which uses numdifftools to numerically differentiate objective functions.\n \"\"\"\n def __init__(self, f):\n \"\"\"\n\n Parameters\n ----------\n f : function\n Single objective function which accepts a list of values as an input\n \"\"\"\n self.function = f\n self.d_function = nd.Gradient(f)\n \n def get_y(self, x):\n \"\"\"\n Parameters\n ----------\n x : array_like\n point (or list of points) to evaluate based on nominal input configurations\n While the generic AnonymousFunction accepts a list of points indexed by the first axis, we only evaluate the first point in that list currently\n\n Returns\n -------\n y : array_like\n output of the objective function (mapping) for the assigned output dimension\n\n \"\"\"\n return self.function(x[0])\n \n def get_dy(self, x, dx):\n \"\"\"\n Parameters\n ----------\n x : array_like\n Nominal input configuration to evaluate\n Note that we must maintain consistency with a (1,n) point being passed in but gradient evaluation will fail\n if numdifftools sees a 2d point being passed in so we pass in the first row\n dx : array_like\n Symmetric variation along each axis based on uncertainty\n\n Returns\n -------\n dy : float\n Maximum potential variation based on the dot product of the absolute value of the gradient and the absolute value of the dx vector\n \"\"\"\n partials = np.abs(self.d_function(x[0]))\n dx = np.abs(dx)\n return np.dot(partials.flatten(), dx.flatten())\n \n \nclass SymbolicFunction(AnonymousFunction):\n \"\"\"\n This class is an implementation of AnonymousFunction which uses sympy to evaluate and differentiate symbolic objective functions.\n \"\"\"\n def __init__(self, f, symbols):\n \"\"\"\n\n Parameters\n ----------\n f : sympy expression\n Construct using symbol objects, e.g. f = -0.853260 + 0.0248455 * THICKNESS + 0.000808578 * THICKNESS * STRENGTH + 0.000391126 * THICKNESS * DISSIPATEDENERGY\n symbols : list of sympy symbols\n These can be constructed using the sympy.symbols function, e.g. vars = sympy.symbols('STRENGTH DISSIPATEDENERGY THICKNESS')\n \"\"\"\n dims = len(symbols)\n self.function = sympy.lambdify(symbols, f, modules='numpy')\n self.derivatives = [0]*dims\n for i in range(dims):\n d_f = sympy.diff(f,symbols[i])\n self.derivatives[i] = sympy.lambdify(symbols, d_f, modules='numpy')\n \n def get_y(self, x):\n \"\"\"\n\n Parameters\n ----------\n x : array_like\n point (or list of points) to evaluate based on nominal input configurations\n\n Returns\n -------\n y : array_like\n evaluates objective function to perform mapping for the associated dimension in output space\n process must be repeated over multiple AnonymousFunction objects to accomplish full mapping\n\n \"\"\"\n x = np.atleast_2d(x)\n temp_inp = np.split(x, x.shape[1], axis=1)\n return self.function(*temp_inp)\n \n def get_dy(self, x, dx):\n \"\"\"\n\n Parameters\n ----------\n x : array_like\n Nominal input configuration to evaluate\n dx : array_like\n Symmetric variation along each axis based on uncertainty\n\n Returns\n -------\n dy : float\n Maximum potential variation based on the dot product of the absolute value of the gradient and the absolute value of the dx vector\n\n \"\"\"\n d_f = 0\n dx = dx.flatten().tolist()\n for d in range(len(dx)):\n d_f += np.abs(self.derivatives[d](*x[0]) * dx[d])\n return d_f\n \n \ndef vor(f, x, dx, bound):\n \"\"\"\n Valid Output Region\n Possible replacement error margin for the Hyperdimensional error margin index. Evaluates the outer extents to see if all of the output region (expressed as a hyperrectangle) lies within the feasible space, and is thus a robust space.\n Could currently fail due to gaps in the feasible space which are contained within the extents of the output region. Can be addressed efficiently by means of KD-tree or similar.\n\n Parameters\n ----------\n f : array_like\n Mapping functions (number of bounding functions with nominal value last, number of output variables)\n x : ndarray\n Nominal input to evaluate\n dx : ndarray\n Variation along each input dimension\n bound : Boundary\n Feasible region representation\n\n Returns\n -------\n error_margin : float\n Uses same convention as HDemi for compatibility of logic. -1 if not in feasible region, 1.5 otherwise\n\n \"\"\"\n n = len(f[0])\n output_space = np.zeros((1,n))\n o_lim = np.zeros((n,2))\n\n for d in range(n):\n poss_max = []\n poss_min = []\n for i in range(len(f)):\n anon_f = f[i][d]\n f0 = anon_f.get_y(x)\n d_f = abs(anon_f.get_dy(x, dx))\n poss_max.append(f0 + d_f)\n poss_min.append(f0 - d_f)\n output_space[0,d] = f0\n o_lim[d,:] = np.asarray([np.min(poss_min),np.max(poss_max)])\n \n # construct all outermost extents + the center of output range\n eval_points = np.meshgrid(*tuple(o_lim))\n eval_points = [np.atleast_2d(coords.flatten()) for coords in eval_points]\n eval_points = np.concatenate(eval_points, axis=0)\n eval_points = np.transpose(eval_points)\n eval_points = np.concatenate((eval_points, output_space), axis=0)\n \n valid_outputs = bound.is_inner(eval_points)\n \n if np.all(valid_outputs):\n return 1.5\n return -1\n \n \ndef miv(f, x, dx, bound):\n \"\"\"\n Maximum Independent Variation\n Possible replacement error margin for the Hyperdimensional error margin index. Evaluates the mid-points of the outer extents to see if all of the output region (expressed as a hyperrectangle) lies within the feasible space, and is thus a robust space.\n Could currently fail due to gaps in the feasible space which are contained within the extents of the output region. Can be addressed efficiently by means of KD-tree or similar.\n\n Parameters\n ----------\n f : array_like\n Mapping functions (number of bounding functions with nominal value last, number of output variables)\n x : ndarray\n Nominal input to evaluate\n dx : ndarray\n Variation along each input dimension\n bound : Boundary\n Feasible region representation\n\n Returns\n -------\n error_margin : float\n Uses same convention as HDemi for compatibility of logic. -1 if not in feasible region, 1.5 otherwise\n\n \"\"\"\n n = len(f[0])\n o_lim = np.zeros((n,2))\n eval_points = np.zeros((2*n+1,n))\n for d in range(n):\n poss_max = []\n poss_min = []\n for i in range(len(f)):\n anon_f = f[i][d]\n f0 = anon_f.get_y(x)\n d_f = abs(anon_f.get_dy(x, dx))\n poss_max.append(f0 + d_f)\n poss_min.append(f0 - d_f)\n eval_points[:,d] = f0\n eval_points[d*2,d] = np.min(poss_min)\n eval_points[d*2+1,d] = np.max(poss_max) \n valid_outputs = bound.is_inner(eval_points)\n if np.all(valid_outputs):\n return 1.5\n return -1\n\n\ndef hdemi(f, x, dx, bound, projection=False):\n \"\"\"\n Compute the Hyperdimensional error margin index. See \"An Inductive Design Exploration Method for Robust Multiscale Materials Design\" Choi, McDowell, Allen, Rosen, Mistree.\n Requires the distance computations from Boundary objects which may, or may not, be subject to significant errors if holes exist, etc.\n Distance computations are also relatively expensive, so recommend exploring mlv or vor alternatives.\n\n Parameters\n ----------\n f : array_like\n Mapping functions (number of bounding functions with nominal value last, number of output variables)\n x : ndarray\n Nominal input to evaluate\n dx : ndarray\n Variation along each input dimension\n bound : Boundary\n Feasible region representation\n\n Returns\n -------\n error_margin : float\n Return -1 if value is within the feasible region, otherwise compute the full hyperdimensional error margin index which is the minimum ratio of the output range distance to the distance to the boundary along that dimension.\n HD_{EMI} = \\begin{cases} min_{i} \\left[ \\frac{\\left\\Vert \\bar{y}-b_i \\right\\Vert}{\\Delta y_{i}} \\right], & \\mbox{for } \\bar{y} \\in \\Omega \\\\ -1, & \\mbox{for } \\bar{y} \\notin \\Omega \\end{cases}\n\n \"\"\"\n n = len(f[0])\n output_space = np.zeros((1,n))\n o_dist = np.zeros((2,n))\n\n for d in range(n):\n poss_max = []\n poss_min = []\n for i in range(len(f)):\n anon_f = f[i][d]\n f0 = anon_f.get_y(x)\n d_f = abs(anon_f.get_dy(x, dx))\n poss_max.append(f0 + d_f)\n poss_min.append(f0 - d_f)\n output_space[0,d] = f0\n o_dist[:,d] = np.asarray([np.min(poss_min),np.max(poss_max)])\n if projection and \"projected_point_distance\" in dir(bound):\n dists = bound.projected_point_distance(o_dist, nominal_output=output_space[0])\n else:\n dists = bound.bound_dist(output_space)\n o_dist[0,:] -= output_space[0]\n o_dist[1,:] -= output_space[0]\n \n div_0_mask = o_dist == 0\n o_dist[div_0_mask] = dists[div_0_mask]/10000.0\n if np.all(dists>=0):\n return np.min(np.abs(dists/o_dist))\n return -1\n \n \ndef find_boundary(f, x_1, x_2, x_err, bound, f_0=[], em=hdemi):\n \"\"\"\n Find an approximation of the boundary representing the feasible region. Uses bisection search between a known interior point and exterior point.\n\n Parameters\n ----------\n f : array_like\n Mapping functions (number of bounding functions with nominal value last, number of output variables)\n x_1 : ndarray\n Robust nominal input to search between\n x_2 : ndarray\n Non-robust nominal input to search between\n x_err : ndarray\n Relative uncertainty in each dimension\n bound : Boundary\n Feasible region in the output space\n f_0 : list, optional\n Previously computed evaluations of f array for both x_1 and x_2 to reduce total computation slightly\n em : function\n Error margin function which may be evaluated em(f, x, dx, bound) and return expected values < 1 for non-robust solutions and values >= 1 for robust solutions.\n\n Returns\n -------\n x_m : ndarray\n Approximation of the boundary point based on 10 bisection iterations or error margin values in range (1-tolerance, 1+tolerance).\n\n \"\"\"\n k_max = 7\n if f_0:\n h_1 = f_0[0]\n h_2 = f_0[1]\n else:\n h_1 = em(f, x_1, x_err*x_1, bound)\n h_2 = em(f, x_2, x_err*x_2, bound)\n # error check outer ranges just in case...\n if h_1 < ht_p and h_1 > ht_m:\n return x_1, None\n elif h_2 < ht_p and h_2 > ht_m:\n return x_2, None\n x_m = (x_1 + x_2)/2.0\n for k in range(k_max):\n dx = x_m*x_err\n h_m = em(f, x_m, dx, bound)\n if h_m < ht_p and h_m > ht_m:\n break\n elif (h_1 > ht_p and h_m > ht_p) or (h_1 < ht_m and h_m < ht_m):\n x_1 = x_m\n h_1 = h_m\n else:\n x_2 = x_m\n h_2 = h_m\n x_m = (x_1 + x_2)/2.0\n if h_1 < ht_m:\n exclude_point = x_1\n else:\n exclude_point = x_2\n return x_m, exclude_point\n\n\ndef idem(f, xs, x_err, objective_bound, ignore_concavity=False, ignore_boundary=False, em=vor, ignore_region=None):\n \"\"\"\n Inductive Design Exploration Method. See \"An Inductive Design Exploration Method for Robust Multiscale Materials Design\" Choi, McDowell, Allen, Rosen, Mistree.\n Perform all critical steps to result in a feasible region boundary for the current design level based on constraints from the previous level, mapping functions, and uncertainties.\n Initially screen a discretized input space based on xs which will satisfy the robust design threshold.\n Several additional checks may be completed such as finding the boundary between the discrete feasible points and the infeasible region along each dimension,\n potential concavities in the feasible space, and ignoring potentially feasible combinations due to additional constraints.\n\n Parameters\n ----------\n f : array_like\n Mapping functions (number of bounding functions with nominal value last, number of output variables)\n xs : list of lists\n Discrete values to sample in each dimension. Unless ingore_region is specified, the full factorial combination space of these values will be explored.\n x_err : list\n Relative uncertainty for each input dimension\n objective_bound : Boundary\n Describes the region which satisfies design criteria for the previous level (current output region)\n ignore_concavity : bool, optional\n Controls the logic flow for evaluating concave regions. Compute the feasibility of the centroid of each simplex in the Delaunay Triangulation.\n If centroid is feasible, simplex remains in the feasible space, else it is removes and is_inner will evaluate to False within this simplex.\n Defaults to False (evaluate concavity).\n ignore_boundary : bool, optional\n Controls the logic flow for evaluating boundary points (HDemi ~ 1). If False, will execute explore_boundary and probe for boundary locations between feasible and infeasible configurations.\n Defaults to False (evaluate boundaries).\n em : function\n Error margin function which may be evaluated em(f, x, dx, bound) and return expected values < 1 for non-robust solutions and values >= 1 for robust solutions.\n ignore_region : Boundary, optional\n Initial configurations within this region will be ignored for feasibility computation.\n Once simplices are constructed for ConcaveBoundary of the current feasible space, centroids are examined to ensure they do not lie in this ignore_region.\n\n Returns\n -------\n feas_values : ndarray\n (n_feas_points) shape array containing the error margin values for the evaluated feasible points\n bound : ConcaveBoundary\n ConcaveBoundary made up of the feasible and boundary points (if found). Concavities may or may not be removed based on selected\n \"\"\"\n\n n = len(xs)\n feasible = find_feasible(f, xs, x_err, objective_bound, em=em, ignore_region=ignore_region)\n feas = feasible.points\n feas_values = feasible.robustness\n\n if ignore_boundary:\n print('Ignoring boundary computations')\n bnd = np.zeros((0,n))\n else:\n bound_vals = explore_boundary(f, xs, x_err, objective_bound, feasible.explored_robustness, em=em, ignore_region=ignore_region)\n bnd = bound_vals.points\n\n if len(feas) > 0 and n > 1:\n start_time = time.time()\n boundary = ConcaveBoundary(feas, bnd)\n if ignore_concavity:\n print('Not evaluating concavity')\n else:\n fix_concavity(f, x_err, objective_bound, boundary, em=em, ignore_region=ignore_region)\n print('Finished constructing feasible hull, elapsed time=%02.02f seconds' % (time.time()-start_time))\n return feas_values, boundary\n elif len(feas) > 0:\n bound = np.zeros((2,n))\n temp = np.concatenate((feas, bnd), axis=0)\n bound[0,:] = np.min(temp, axis=0)\n bound[1,:] = np.max(temp, axis=0)\n bound = PrismaticBoundary(bound)\n bound.feasible_points = feas\n bound.boundary_points = bnd\n return feas_values, bound\n else:\n return feas_values, None\n \n\ndef find_feasible(f, xs, x_err, objective_bound, em=vor, ignore_region=None):\n \"\"\"\n Perform the initial step of IDEM which requires the evaluation of the discretized region using an error metric.\n Parameters\n ----------\n f : array_like\n Mapping functions (number of bounding functions with nominal value last, number of output variables)\n xs : list of lists\n Discrete values to sample in each dimension. Unless ingore_region is specified, the full factorial combination space of these values will be explored.\n x_err : list\n Relative uncertainty for each input dimension\n objective_bound : Boundary\n Describes the region which satisfies design criteria for the previous level (current output region)\n em : function\n Error margin function which may be evaluated em(f, x, dx, bound) and return expected values < 1 for non-robust solutions and values >= 1 for robust solutions.\n ignore_region : Boundary, optional\n Initial configurations within this region will be ignored for feasibility computation.\n\n Returns\n -------\n points : ndarray\n Array of points in n-dimensional space (n_points, n_dims) for those configurations found to be robust\n robustness : ndarray\n Array of robustness values for all robust solutions based on the error margin computed.\n explored_robustness : ndarray\n Total array of all explored points and their computed error margin values.\n \"\"\"\n start_time = time.time()\n feas_dims = tuple(map(len, xs))\n n = len(xs)\n robust = np.zeros(np.prod(feas_dims))\n x_err = np.array(x_err)\n x_err = np.reshape(x_err, (1,n))\n # evaluate feasible region\n for i in range(len(robust)):\n index = np.unravel_index(i, feas_dims)\n \n x = np.zeros((1,n))\n for j in range(n):\n x[0,j] = (xs[j][index[j]])\n if ignore_region is None or not ignore_region.is_inner(x):\n dxs = x_err * x\n temp = em(f, x, dxs, objective_bound)\n robust[i] = temp\n else:\n robust[i] = -1\n if len(robust) > 10 and i%(len(robust)/10)==0:\n print('Feasible: %02.0f%% complete, elapsed time=%02.02f seconds' % (i/float(len(robust))*100, time.time()-start_time))\n print('Finished finding feasible points, elapsed time=%02.02f seconds' % (time.time()-start_time)) \n feas_mask = robust > ht_p\n print('%10d Feasible points found' % feas_mask.sum())\n indices = np.arange(len(robust))\n feas_index = indices[feas_mask]\n # feas = np.unravel_index(feas, feas_dims)\n total_indices = tuple(xs)\n locs = np.meshgrid(*total_indices,indexing='ij')\n new_locs = np.zeros((locs[0].size, len(locs)))\n for i in range(len(locs)):\n new_locs[:,i] = locs[i].flatten()\n feas_robust = robust[feas_mask]\n feas = new_locs[feas_index,:]\n\n Feasible = collections.namedtuple('Feasible', 'points robustness explored_robustness')\n f = Feasible(feas, feas_robust, robust)\n return f\n\n\ndef explore_boundary(f, xs, x_err, objective_bound, x_robust, em=vor, ignore_region=None):\n \"\"\"\n Examine robust configurations with non-robust neighbors in all dimensions.\n If any such neighbor pair is found, the boundaries are explored using find_boundary to solve for the point where the error margin approximates the true boundary within some tolerance.\n This function can require significant additional computational effort depending on the initial gridding density and the dimensionality.\n\n Parameters\n ----------\n f : array_like\n Mapping functions (number of bounding functions with nominal value last, number of output variables)\n xs : list of lists\n Discrete values to sample in each dimension. Unless ingore_region is specified, the full factorial combination space of these values will be explored.\n x_err : list\n Relative uncertainty for each input dimension\n objective_bound : Boundary\n Describes the region which satisfies design criteria for the previous level (current output region)\n em : function\n Error margin function which may be evaluated em(f, x, dx, bound) and return expected values < 1 for non-robust solutions and values >= 1 for robust solutions.\n ignore_region : Boundary, optional\n Do not explore boundary between infeasible_region feasible points and robust configurations.\n\n Returns\n -------\n points : ndarray\n (n_bound_points, n_dim) Array of the explored boundary points, all within tolerance of the true boundary location based on error metric.\n\n \"\"\"\n feas_dims = tuple(map(len, xs))\n start_time = time.time()\n n = len(xs)\n bnd = []\n for k in range(len(x_robust)):\n ind_1 = np.unravel_index(k, feas_dims)\n x_1 = np.reshape([xs[i][ind_1[i]] for i in range(n)], (1,n))\n f_1 = x_robust[k]\n if f_1 <= ht_p and f_1 > ht_m:\n bnd.append(x_1)\n elif f_1 > ht_p:\n for i in range(n):\n dim_diffs = np.zeros((n,2))\n dim_diffs[i,:] = np.asarray([[-1,1]])\n for j in range(2):\n ind_2 = tuple(np.asarray(ind_1)+dim_diffs[:,j])\n ind_2 = map(int, ind_2)\n if np.min(ind_2) > -1 and ind_2[i] < feas_dims[i]:\n flat_ind = np.ravel_multi_index(ind_2, feas_dims)\n f_2 = x_robust[flat_ind]\n x_2 = np.asarray([xs[ii][ind_2[ii]] for ii in range(n)])\n if f_2 < ht_m and (ignore_region is None or not ignore_region.is_inner(x_2)):\n found_bound, outside_point = find_boundary(f, x_1, x_2, x_err, objective_bound, f_0=[f_1, f_2], em=em)\n found_bound = np.reshape(found_bound, (1,n))\n bnd.append(found_bound)\n if len(x_robust) > 10 and k%(len(x_robust)/10)==0:\n print('Boundary: %02.0f%% complete, time=%02.02f' % (k/float(len(x_robust))*100, time.time()-start_time))\n print('Finished finding boundary points, elapsed time=%02.02f seconds' % (time.time()-start_time))\n print('%6.00f Boundary points found' % len(bnd))\n bnd = np.concatenate(bnd, axis=0)\n\n Bound = collections.namedtuple(\"Bound\", \"points\") # idea to later add the objective function values here\n b = Bound(bnd)\n return b\n\n\ndef fix_concavity(f, x_err, objective_bound, boundary, em=vor, ignore_region=None, plot_concave=False):\n \"\"\"\n Examine all simplex centroids in the Delaunay Triangulation to determine if they lie within feasible regions.\n Obviously still limited in terms of finding infeasible, concave regions due to fineness of meshing related to initial discretization.\n Larger performance hit in higher dimensional space due to the larger number of simplices constructed.\n\n Parameters\n ----------\n f : array_like\n Mapping functions (number of bounding functions with nominal value last, number of output variables)\n x_err : list\n Relative uncertainty for each input dimension\n objective_bound : Boundary\n Describes the region which satisfies design criteria for the previous level (current output region)\n boundary : ConcaveBoundary\n Triangulation to remove simplexes from using the aforementioned criteria\n em : function\n Error margin function which may be evaluated em(f, x, dx, bound) and return expected values < 1 for non-robust solutions and values >= 1 for robust solutions.\n ignore_region : Boundary, optional\n Perform additional filtering if this region is supplied to ensure that is_inner for the constructed boundary does not overlap with the ignored region.\n plot_concave : bool, optional\n Plot the resultant space using a gridded view to ensure proper construction of space. Default False\n\n Returns\n -------\n\n \"\"\"\n start_time = time.time()\n centroids = boundary.simplex_centroids()\n print('%d simplex centroids to evaluate for concavity feasibility' % len(centroids))\n remove_simplex = []\n for i, c in enumerate(centroids):\n dxs = x_err * c\n if em(f, [c], dxs, objective_bound) < ht_m or (ignore_region is not None and ignore_region.is_inner(c)):\n remove_simplex.append(i)\n if len(centroids) > 10 and i%(len(centroids)/10)==0:\n print('Concavity Check: %02.0f%% complete, time=%02.02f' % (i/float(len(centroids))*100, time.time()-start_time))\n print('Will remove %d simplices due to concavity' % len(remove_simplex))\n boundary.exclude_simplices(remove_simplex)\n if plot_concave and enable_plotting:\n points = centroids[remove_simplex]\n plot(points)\n plt.show()\n\n \ndef get_sub_dim_range(feas, axis_ranges, range_index, values=None, bnd=None):\n \"\"\"\n Get points within ranges along each dimension (hyperrectangular region) for use in plotting functions.\n Note the hyperrectangle is only defined in the first n dimensions, where n is the length of axis_ranges.\n Dimensions not specified are assumed to be valid on :math:'\\\\left[-\\\\infty, \\\\infty\\\\right]'\n\n Parameters\n ----------\n feas : ndarray\n (n_points, n_dim) array of feasible points to plot\n axis_ranges : array_like\n list of lists describing the ranges possible along each axis. Ranges are expressed by adjacent indices.\n i.e. feas[1,0] is valid if the value is in range [axis_ranges[0][0], axis_ranges[0][1])\n range_index : list\n list of integer indices into the axis_ranges array for the valid combination of axis_values to use.\n values : list, optional\n robustness (or other) values associated with the feasible points.\n bnd : array_like, optional\n\n\n Returns\n -------\n feas : ndarray\n indexed values of the feas array input meeting criteria\n values : ndarray\n indexed values of the values array input matching the feasible points which satisfied criteria\n None if no values were supplied\n bnd : ndarray\n indexed values of the bnd array input meeting criteria\n\n \"\"\"\n feas_mask = np.ones(feas.shape[0])\n exp_dims = len(axis_ranges)\n for j in range(exp_dims):\n feas_mask = np.logical_and(feas_mask, feas[:,j] >= axis_ranges[j][range_index[j]])\n feas_mask = np.logical_and(feas_mask, feas[:,j] < axis_ranges[j][range_index[j]+1])\n feas = feas[feas_mask,exp_dims:]\n if values is not None:\n values = values[feas_mask]\n else:\n values = None\n if bnd is not None:\n bnd_mask = np.ones(bnd.shape[0])\n for j in range(exp_dims):\n bnd_mask = np.logical_and(bnd_mask, bnd[:,j] >= axis_ranges[j][range_index[j]])\n bnd_mask = np.logical_and(bnd_mask, bnd[:,j] < axis_ranges[j][range_index[j]+1])\n bnd = bnd[bnd_mask, exp_dims:]\n else:\n bnd = None\n return feas, values, bnd\n\n \ndef get_sub_dim_discrete(feas, axis_values, value_index, values=None, bnd=None):\n \"\"\"\n Get points with discrete values (not ranges) to plot.\n Select points whose location in the first n dimensions is exactly that of the criteria described by axis_values and value_index.\n\n Parameters\n ----------\n feas : ndarray\n (n_points, n_dim) array of feasible points to plot\n axis_values : array_like\n list of lists describing the discrete values possible along each axis.\n The length of the first dimension of axis_values is the number of dimensions to select for exact values (flatten into separate plots) while plotting.\n values are compared exactly, use get_sub_dim_range for values which may fall inside a range.\n value_index : list\n list of integer indices into the axis_values array for the valid combination of axis_values to use.\n All combinations of feas[:,i] == axis_values[i][value_index[i]] must be true for points to be selected.\n Must be of same length or longer than axis_values.\n values : list, optional\n robustness (or other) values associated with the feasible points.\n bnd : array_like, optional\n\n\n Returns\n -------\n feas : ndarray\n indexed values of the feas array input meeting criteria\n values : ndarray\n indexed values of the values array input matching the feasible points which satisfied criteria\n None if no values were supplied\n bnd : ndarray\n indexed values of the bnd array input meeting criteria\n\n \"\"\"\n feas_mask = np.ones(feas.shape[0])\n exp_dims = len(axis_values)\n for j in range(exp_dims):\n feas_mask = np.logical_and(feas_mask, feas[:,j] == axis_values[j][value_index[j]])\n feas = feas[feas_mask,exp_dims:]\n if values is not None:\n values = np.asarray(values)\n values = values[feas_mask]\n else:\n values = None\n if bnd is not None:\n bnd_mask = np.ones(bnd.shape[0])\n for j in range(exp_dims):\n bnd_mask = np.logical_and(bnd_mask, bnd[:,j] == axis_values[j][value_index[j]])\n bnd = np.asarray(bnd)\n bnd = bnd[bnd_mask, exp_dims:]\n else:\n bnd = None\n return feas, values, bnd\n \n \ndef plot_expand_dims_subplot(feas, values=None, bnd=None, names=[], axis_ranges=[], discrete_dims=False):\n \"\"\"\n Best method so far to plot higher dimensional spaces. Expanded dimensions are those who are not plotted on the individual subplot, but rather expanded over a sequence of subplots.\n That is, a 2D grid of subplots may be used to plot the first two dimensions of a series of points, with each subplot being used to plot the remainder of dimensions.\n Currently support expanding 3 dimensions, that is create multiple grid images that may be flipped through in sequence to provide 3-dimensional plot expansion.\n Always saves plots since they typically are of such resolution as to not display nicely on the user's screen.\n\n Parameters\n ----------\n feas : ndarray\n (n_points, n_dim) array of feasible points to plot\n values : list, optional\n List of values associated with the feasible points. Colorbar will be created to show variation of these values.\n bnd : ndarray, optional\n (n_bound_points, n_dim) array of boundary points. Will always be plotted in black to differentiate from feasible points.\n names : list, optional\n List of various plotting names that must be supplied. In order; plot title, 1-axis, 2-axis (if exists), 3-axis (if exists), 4-axis (if exists), 5-axis (if exists), ... n-axis (if exists), and values (if exists).\n axis_ranges : list of lists, optional\n First list iterates over axes to be expanded in plotting. Internal lists provide values to be used for plotting these axis.\n e.g. [[0,1], [2,4,6]] will expand the first two and create a 2x3 grid of subplots. If discrete_dims is False, feas and bnd points between 0 and 1 on the first axis and between 2 and 4 on the second axis on subplot (2,3,1)\n Default to [], which shortcircuits to normal plotting.\n discrete_dims : bool, optional\n If True, each value in axis_ranges is treated as a discrete selection from feas and bnd to plot, not as ranges. Not recommened to use if boundary points are supplied since these are rarely found along discrete values. Defaults to False.\n Returns\n -------\n\n \"\"\"\n exp_dims = len(axis_ranges)\n if exp_dims == 0:\n plot(feas,values,bnd,names)\n plt.show()\n if exp_dims > 3:\n raise ValueError(\"Currently only support expansion of three dimensions or less\")\n if discrete_dims:\n dim_num_subplots = np.asarray(map(len, axis_ranges))\n else:\n dim_num_subplots = np.asarray([len(axis_ranges[i])-1 for i in range(exp_dims)])\n y_subplots = dim_num_subplots[-2] if len(dim_num_subplots) > 1 else 1\n x_subplots = dim_num_subplots[-1]\n x_size = base_fig_size[0]*x_subplots\n y_size = base_fig_size[1]*y_subplots\n prev_fig_num = 0\n num_plots = np.prod(dim_num_subplots)\n \n if discrete_dims:\n col_names = [names[exp_dims] + \" $ = %.3e$\" %\n (axis_ranges[-1][j]) for j in range(dim_num_subplots[-1])]\n else:\n col_names = [names[exp_dims] + \" $ \\in [%.3e,%.3e)$\" %\n (axis_ranges[-1][j], axis_ranges[-1][j+1]) for j in range(dim_num_subplots[-1])]\n if exp_dims > 2:\n if discrete_dims:\n fig_names = [names[1] + \" $ = %.3e$\" % \n (axis_ranges[0][j]) for j in range(dim_num_subplots[0])]\n else:\n fig_names = [names[1] + \" $ \\in [%.3e,%.3e)$\" % \n (axis_ranges[0][j], axis_ranges[0][j+1]) for j in range(dim_num_subplots[0]-1)]\n else:\n fig_names = ['']\n if exp_dims > 1:\n if discrete_dims:\n row_names = [names[exp_dims-1] + \" $ = %.3e$\" %\n (axis_ranges[-2][j]) for j in range(dim_num_subplots[-2])]\n else:\n row_names = [names[exp_dims-1] + \" $\\in [%.3e,%.3e)$\" %\n (axis_ranges[-2][j], axis_ranges[-2][j+1]) for j in range(dim_num_subplots[-2])]\n else:\n row_names = ['']\n \n for i in range(num_plots):\n range_index = np.unravel_index(i, dim_num_subplots)\n fig_num = 0 if len(axis_ranges) < 3 else range_index[0]\n sub_num = i - fig_num*np.prod(dim_num_subplots[1:]) + 1\n subplot = (y_subplots, x_subplots, sub_num)\n if discrete_dims:\n new_feas, new_values, new_bnd = get_sub_dim_discrete(feas, axis_ranges, range_index, values=values, bnd=bnd)\n else:\n new_feas, new_values, new_bnd = get_sub_dim_range(feas, axis_ranges, range_index, values=values, bnd=bnd)\n if sub_num == 1:\n fig = plt.figure(fig_num, figsize=(x_size, y_size), dpi=fig_dpi)\n new_names = [names[0]]\n new_names.extend(names[(1+exp_dims):])\n ax = plot(new_feas, new_values, new_bnd, new_names, fig, subplot=subplot)\n sub_num -= 1\n if sub_num % x_subplots == 0:\n ax.annotate(row_names[sub_num/x_subplots], xy=(0,.5), xytext=(-6*pad_dist, 0),\n xycoords='axes fraction', textcoords='offset points',\n ha='center', va='center', rotation=90, fontsize=1.5*f_size)\n if sub_num / x_subplots == 0:\n ax.annotate(col_names[sub_num], xy=(.5,1), xytext=(0,pad_dist*2),\n xycoords='axes fraction', textcoords='offset points',\n ha='center', va='center', fontsize=1.5*f_size)\n \n if prev_fig_num != fig_num or i == (num_plots-1):\n prev_fig = plt.figure(prev_fig_num)\n # add figure title\n if exp_dims == 3:\n prev_fig.text(.5, .93, fig_names[prev_fig_num],\n transform=prev_fig.transFigure, fontsize=2*f_size, ha='center', va='bottom')\n prev_fig.tight_layout()\n prev_fig.subplots_adjust(left=.1, top=.85)\n prev_fig.savefig(\"expanded_dims_fig_%d\" % prev_fig_num, dpi=fig.dpi)\n plt.close(prev_fig)\n prev_fig_num = fig_num\n\ndef plot_combinations(feas, values=None, bnd=None, names=[], max_dim=3, save_fig=False):\n \"\"\"\n Plot various combinations of input variables. Take points in higher dimensions space, restrict them to max_dim number of variables in several lower dimension plots.\n Additional dimensions will be flattened into the lower dimensional plot.\n e.g. 5 dimensional points plotted in a maximum 3D plot will produce 10 unique plots displaying the combinations of 3 variables on a 3D plot to display the 5D points provided.\n Lower dimensions plots are made using the plot function.\n\n Parameters\n ----------\n feas : ndarray\n (n_points, n_dim) array of feasible points to plot\n values : list, optional\n List of values associated with the feasible points. Colorbar will be created to show variation of these values.\n bnd : ndarray, optional\n (n_bound_points, n_dim) array of boundary points. Will always be plotted in black to differentiate from feasible points.\n names : list, optional\n List of various plotting names that must be supplied. In order; plot title, 1-axis, 2-axis (if exists), 3-axis (if exists), 4-axis (if exists), 5-axis (if exists), and values (if exists).\n All values which are not necessary collapse the order, e.g. a labelled 2-axis plot with values would be ordered [1-axis label, 2-axis label, value label (will be assigned to Colorbar).\n max_dim : int, optional\n Controls the number of dimensions that will be shown on each plot\n default is a 3D plot\n save_fig : bool, optional\n Control whether to save the figures or plot to the first n figures where n is the number of plot combinations\n Saved figures will be named according to standard conventions for this module\n default False\n Returns\n -------\n\n \"\"\"\n \"\"\" Plot all possible combinations of max_dim number of dimensions.\n The remaining dimensions are projected onto the lower dimensional subspace \"\"\"\n \n n = feas.shape[1]\n if len(bnd) > 0 and bnd.shape[1] != n:\n raise ValueError(\"boundary and feasible region must exist in same n-dimensional space\")\n \n if n <= max_dim:\n plot(feas, values, bnd, names, save_fig=save_fig)\n else:\n dims = range(n)\n comb = itertools.combinations(dims, max_dim)\n for i, c in enumerate(comb):\n print(c)\n print(i)\n feas_sub = feas[:,c]\n bnd_sub = bnd\n if len(bnd) > 0:\n bnd_sub = bnd[:,c]\n if len(names) > n:\n names_sub = [0]*(max_dim+1)\n names_sub[0] = names[0]\n for j in range(max_dim):\n names_sub[j+1] = names[c[j]+1]\n else:\n names_sub = []\n fig = plt.figure(i, figsize=base_fig_size, dpi=fig_dpi)\n plot(feas_sub, values, bnd_sub, names_sub, fig=fig, save_fig=save_fig)\n\n\ndef size_scale(values, s_min, s_max):\n \"\"\"\n\n Parameters\n ----------\n values : ndarray\n values to be displayed using the size of the scatter points\n s_min : float\n minimum value this set of values should be compared to\n s_max : float\n maximum value this set of values should be compared to\n\n Returns\n -------\n sizes : ndarray\n arbitrary scaling of values which should be appropriate for linearly scaled data and be visually distinct\n\n \"\"\"\n return 30 + 200*(values-s_min)/(s_max-s_min)\n\n\ndef plot(feas, values=None, bnd=None, names=None, fig=None, save_fig=False, subplot=(1,1,1)):\n \"\"\"\n Base plotting function for IDEM related visulaizations. Incorporates plotting of feasible points and boundary points.\n Feasible points may be colored based on objective values or robustness measures, for example.\n\n Plotting will automatically adjust from 2D to 3D plots. For points in n-dimensional space where n < 3, all plots are 2D. Colors are assigned based on values, if provided.\n Plotting in 3D will automatically adjust to display up to 5 dimensions, where the 4th is shown as size, and the 5th as color. This is not advised for gridded spaces with n > 3 since points will overlap in 3D coordinates.\n\n Parameters\n ----------\n feas : ndarray\n (n_points, n_dim) array of feasible points to plot. n_dim <= 5\n values : list, optional\n List of values associated with the feasible points. Colorbar will be created to show variation of these values.\n bnd : ndarray, optional\n (n_bound_points, n_dim) array of boundary points. Will always be plotted in black to differentiate from feasible points.\n names : list, optional\n List of various plotting names that must be supplied. In order; plot title, 1-axis, 2-axis (if exists), 3-axis (if exists), 4-axis (if exists), 5-axis (if exists), and values (if exists).\n All values which are not necessary collapse the order, e.g. a labelled 2-axis plot with values would be ordered [1-axis label, 2-axis label, value label (will be assigned to Colorbar).\n fig : matplotlib.figure or int, optional\n Preexisting figure instance or figure number to plot on. If not supplied, a new figure will be created.\n save_fig : bool, optional\n Save the figure when done. Default to False.\n subplot : location, optional\n See matplotlib documentation for valid subplot location indices. Default to plotting over the entire figure, subplot (1,1,1)\n Returns\n -------\n ax : matplotlib.axes\n Axis that was used to draw the current plot for further manipulation\n \"\"\"\n \"\"\" names in order (title, axis 1, ..., axis n, value) \"\"\"\n norm_size = 30\n\n fig_num = 1\n if type(fig) == int:\n fig_num = fig\n fig = None\n\n if fig is None:\n fig = plt.figure(fig_num, figsize=base_fig_size, dpi=fig_dpi)\n fig.clf()\n\n if not enable_plotting:\n print(\"plotting has been disabled, please ensure matplotlib is installed correctly\")\n return\n if len(feas.shape) < 2:\n raise ValueError(\"Feasible points must be in array_like with second dimension = spatial dimension\")\n if feas.shape[0] == 0:\n print(\"No data to plot\")\n ax = fig.add_subplot(*subplot, axisbg='white')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.patch.set_visible(False)\n ax.axis('off')\n return ax\n\n values = np.array([]) if values is None else np.asarray(values)\n bnd = np.zeros((0,feas.shape[1])) if bnd is None else np.asarray(bnd)\n names = [] if names is None else names\n\n n = feas.shape[1] \n # cm = plt.cm.get_cmap('spring')\n # cm = custom_cmap\n cm = plt.cm.get_cmap('viridis')\n cbar = None\n \n\n if bnd.shape[0] > 0 and bnd.shape[1] != n:\n raise ValueError(\"boundary and feasible region must exist in same n-dimensional space\")\n\n if n < 2:\n feas = np.reshape(feas, (len(feas), 1))\n feas = np.concatenate((feas, np.ones(feas.shape)), axis=1)\n bnd = np.reshape(bnd, (len(bnd), 1))\n bnd = np.concatenate((bnd, np.ones(bnd.shape)), axis=1)\n if n < 3:\n ax = fig.add_subplot(*subplot, axisbg='white')\n if len(values) == feas.shape[0]:\n colors = values\n if len(names) > 3:\n clabel = names[3]\n else:\n clabel = '$HD_{EMI}$'\n else:\n colors = 'b'\n im = ax.scatter(feas[:,0], feas[:,1], c=colors, cmap=cm, edgecolor='none')\n if colors is not 'b':\n cbar = fig.colorbar(im, ax=ax)\n cbar.set_label(clabel)\n im.set_clim(np.min(values), np.max(values))\n if len(bnd) > 0:\n ax.scatter(bnd[:,0], bnd[:,1], marker='^', c='k')\n if len(names) > 0:\n ax.set_title(names[0], fontsize=f_size*1.25)\n if len(names) > 1:\n ax.set_xlabel(names[1], fontsize=f_size, labelpad=pad_dist)\n if len(names) > 2:\n ax.set_ylabel(names[2], fontsize=f_size, labelpad=pad_dist)\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(f_size)\n item.set_color(\"k\")\n \n elif n < 6:\n ax = fig.add_subplot(*subplot, projection='3d', axisbg='white')\n plt.hold('on')\n if len(values) > 0:\n c_min = np.min(values)\n c_max = np.max(values)\n else:\n c_min = 0\n c_max = 1\n if n < 5 and len(values) == feas.shape[0]:\n colors = values\n if len(names) > (n+1):\n clabel = names[n+1]\n else:\n clabel = '$HD_{EMI}$'\n elif n == 5:\n # get min and max values for color dimension\n c_min = np.min(feas[:,4])\n c_max = np.max(feas[:,4])\n if len(bnd) > 0:\n c_min = min(c_min, np.min(bnd[:,4]))\n c_max = max(c_min, np.max(bnd[:,4]))\n colors = feas[:,4]\n clabel = names[5]\n else:\n colors = 'b'\n if n > 3:\n s_min = np.min(feas[:,3])\n s_max = np.max(feas[:,3])\n if len(bnd) > 0:\n s_min = min(s_min, np.min(bnd[:,3]))\n s_max = max(s_min, np.max(bnd[:,3]))\n sizes = size_scale(feas[:,3], s_min, s_max)\n else:\n sizes = norm_size\n im = ax.scatter(feas[:,0], feas[:,1], feas[:,2], s=sizes, c=colors, vmin=c_min, vmax=c_max, edgecolor='none', cmap=cm, alpha=1)\n if colors is not 'b':\n cbar = fig.colorbar(im, ax=ax)\n cbar.set_label(clabel)\n im.set_clim(c_min, c_max)\n if len(bnd) > 0:\n if n == 5:\n colors = bnd[:,4]\n else:\n colors = 'k'\n if n > 3:\n sizes = size_scale(bnd[:,3], s_min, s_max)\n else:\n sizes = norm_size\n ax.scatter(bnd[:,0], bnd[:,1], bnd[:,2], marker='^', s=sizes, c=colors, vmin=c_min, vmax=c_max, edgecolor='none', cmap=cm, alpha=1)\n if len(names) > 0:\n ax.set_title(names[0], fontsize=f_size*1.25)\n if len(names) > 1:\n ax.set_xlabel(names[1], fontsize=f_size, labelpad=pad_dist)\n if len(names) > 2:\n ax.set_ylabel(names[2], fontsize=f_size, labelpad=pad_dist)\n if len(names) > 3:\n ax.set_zlabel(names[3], fontsize=f_size, labelpad=pad_dist)\n plt.hold('off')\n else:\n raise ValueError(\"Plots of greater than 5 dimensions are not supported\")\n \n to_change_labels = ax.get_xticklabels() + ax.get_yticklabels()\n if n > 2:\n to_change_labels += ax.get_zticklabels()\n if cbar is not None:\n to_change_labels += cbar.ax.get_yticklabels()\n for temp_text in cbar.ax.findobj(match=Text, include_self=False):\n to_change_labels.append(temp_text)\n for item in (to_change_labels):\n item.set_fontsize(f_size)\n item.set_color(\"k\")\n ax.tick_params(labelsize=f_size*0.75)\n fig.tight_layout()\n if save_fig:\n plt.savefig('feasible_plot_fig_%d' % fig.number, dpi=fig.dpi)\n return ax\n\n\ndef kriging_upper(gp, x):\n y, mse = gp.predict(x, eval_MSE=True)\n vals = y + 2*np.sqrt(mse)\n return vals[0]\n\n\ndef kriging_lower(gp, x):\n y, mse = gp.predict(x, eval_MSE=True)\n vals = y - 2*np.sqrt(mse)\n return vals[0]\n\n\ndef make_fourier(points, ranges, order=5):\n constants = np.ones((points.shape[0], 1))\n temp = np.copy(points)\n for i in range(temp.shape[1]):\n temp[:, i] /= ranges[i]\n basis = np.concatenate((constants, points, np.cos(np.pi * temp), np.sin(np.pi * temp)), axis=1)\n # basis = np.concatenate((constants, np.cos(np.pi*temp), np.sin(np.pi*temp)), axis=1)\n for i in range(order - 1):\n basis = np.concatenate((basis, np.cos((i + 2) * np.pi * temp), np.sin((i + 2) * np.pi * temp)), axis=1)\n return basis\n\n\ndef eval_fourier(points, ranges, coeff):\n return np.dot(make_fourier(points, ranges), coeff)\n\n\ndef mesh_2_list(x, y):\n temp = np.column_stack([x.flatten(), y.flatten()])\n print(temp.shape)\n return temp" ]
[ [ "numpy.split", "numpy.sqrt", "numpy.asarray", "matplotlib.pyplot.hold", "numpy.concatenate", "numpy.all", "numpy.max", "numpy.mean", "numpy.any", "numpy.ravel_multi_index", "numpy.where", "numpy.unique", "numpy.reshape", "matplotlib.colors.LinearSegmentedColormap", "numpy.arange", "numpy.sin", "numpy.copy", "matplotlib.pyplot.close", "numpy.zeros", "numpy.unravel_index", "matplotlib.pyplot.figure", "numpy.logical_not", "matplotlib.pyplot.cm.get_cmap", "numpy.min", "matplotlib.pyplot.savefig", "scipy.interpolate.splev", "numpy.logical_or", "numpy.atleast_2d", "numpy.transpose", "numpy.meshgrid", "numpy.logical_and", "numpy.array", "matplotlib.pyplot.show", "numpy.abs", "scipy.spatial.Delaunay", "numpy.cos", "numpy.linalg.norm", "numpy.ones", "numpy.prod", "numpy.average" ] ]
leeliang/machine-learning-notes
[ "57c6081e2dec4df843e638cc5c44b5bdab99f6f1" ]
[ "other/KNN/plot_kdtree.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nCreated on 2017-08\n@author: LI Liang\n\"\"\"\n\nimport networkx as nx\nimport matplotlib.pyplot as plt \n\ndef binary_tree_layout(G, root, width=1., vert_gap = 0.2, vert_loc = 0, xcenter = 0.5, \n pos = None, parent = None):\n '''If there is a cycle that is reachable from root, then this will see infinite recursion.\n G: the graph\n root: the root node of current branch\n width: horizontal space allocated for this branch - avoids overlap with other branches\n vert_gap: gap between levels of hierarchy\n vert_loc: vertical location of root\n xcenter: horizontal location of root\n pos: a dict saying where all nodes go if they have been assigned\n parent: parent of this branch.\n each node has an attribute \"left: or \"right\"'''\n if pos == None:\n pos = {root:(xcenter,vert_loc)}\n else:\n pos[root] = (xcenter, vert_loc)\n neighbors = G.neighbors(root)\n if parent != None:\n neighbors.remove(parent)\n if len(neighbors)!=0:\n dx = width/2.\n leftx = xcenter - dx/2\n rightx = xcenter + dx/2\n for neighbor in neighbors:\n if G.node[neighbor]['child_status'] == 'left':\n pos = binary_tree_layout(G,neighbor, width = dx, vert_gap = vert_gap, \n vert_loc = vert_loc-vert_gap, xcenter=leftx, pos=pos, \n parent = root)\n elif G.node[neighbor]['child_status'] == 'right':\n pos = binary_tree_layout(G,neighbor, width = dx, vert_gap = vert_gap, \n vert_loc = vert_loc-vert_gap, xcenter=rightx, pos=pos, \n parent = root)\n return pos\n#\ndef plotPoint(p):\n plt.ylim([0,10])\n plt.xlim([0,10])\n for i in range(len(p)):\n x = p[i][0]\n y = p[i][1]\n plt.plot(x,y, 'o', fillstyle='none', ms=10, color='b')\n text = chr(65+i) \n plt.annotate(text, xy=p[i], xytext=(5,5), textcoords='offset points',color='b')\n plt.xticks([])\n plt.yticks([])\n\ndef plotTree(G):\n for node in G.nodes():\n if node in [\"B\",\"D\",\"F\"]:\n G.node[node]['child_status'] = 'left' #assign even to be left\n else:\n G.node[node]['child_status'] = 'right' #and odd to be right\n pos = binary_tree_layout(G,\"A\")\n color_list = [\"lightcoral\",\"lightgrey\",\"lightgrey\",\"lightcoral\",\"lightcoral\",\"lightcoral\"]\n nx.draw(G, pos=pos, with_labels=True, node_size=1600, node_color=color_list[:len(G)])\n return pos\n# Data from wikipedia\nc = [(7,2), (5,4), (9,6), (2,3), (4,7), (8,1)]\n#\nplt.figure(figsize=[10,10])\nplt.subplot(321)\nplotPoint(c)\nplt.axvline(x = c[0][0],lw=2,color='r')\n#\nplt.subplot(322)\nG= nx.Graph()\nG.add_node(\"A\")\npos = plotTree(G)\nplt.text( pos[\"A\"][0]*1.15, pos[\"A\"][1],'$X^{(0)}$',color='r')\n#\nplt.subplot(323)\nplotPoint(c)\nplt.axvline(x = c[0][0],lw=2,color='r')\nplt.axhline(y=c[1][1], xmin=0, xmax=c[0][0]/10., color='k',lw=1.5)\nplt.axhline(y=c[2][1], xmin=c[0][0]/10., xmax=1, color='k',lw=1.5)\n#\nplt.subplot(324)\nG.add_edges_from([(\"A\",\"B\"),(\"A\",\"C\")])\npos = plotTree(G)\nplt.text( pos[\"C\"][0]*1.15, pos[\"A\"][1],'$X^{(0)}$',color='r')\nplt.text( pos[\"C\"][0]*1.15, pos[\"C\"][1],'$X^{(1)}$')\n#\nplt.subplot(325)\nplotPoint(c)\nplt.axvline(x = c[0][0],lw=2,color='r')\nplt.axhline(y=c[1][1], xmin=0, xmax=c[0][0]/10., color='k',lw=1.5)\nplt.axhline(y=c[2][1], xmin=c[0][0]/10., xmax=1, color='k',lw=1.5)\nplt.axvline(ymin=0, ymax=c[1][1]/10., x=c[3][0], color='r', ls='--')\nplt.axvline(ymin=c[1][1]/10., ymax=1, x=c[4][0], color='r', ls='--')\nplt.axvline(ymin=0, ymax=c[2][1]/10., x=c[5][0], color='r', ls='--')\n#\nplt.subplot(326)\nG.add_edges_from([(\"B\",\"D\"), (\"B\",\"E\"), (\"C\",\"F\")])\npos = plotTree(G)\nplt.text( pos[\"C\"][0]*1.15, pos[\"A\"][1],'$X^{(0)}$',color='r')\nplt.text( pos[\"C\"][0]*1.15, pos[\"C\"][1],'$X^{(1)}$')\nplt.text( pos[\"C\"][0]*1.15, pos[\"F\"][1],'$X^{(0)}$',color='r')\n\nplt.savefig('kdtree.png')" ]
[ [ "matplotlib.pyplot.yticks", "matplotlib.pyplot.axvline", "matplotlib.pyplot.axhline", "matplotlib.pyplot.ylim", "matplotlib.pyplot.annotate", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplot", "matplotlib.pyplot.text", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
marcomussi/RecommenderSystemPolimi
[ "ce45b1eee2231abe1a844697648e94b98dadabea" ]
[ "DataReaderWithoutValid_new.py" ]
[ "\r\nimport numpy as np\r\nimport scipy.sparse as sps\r\nimport time, sys\r\nimport pandas as pd\r\nimport csv\r\n\r\nfrom math import log\r\nimport math\r\n\r\n\r\n\r\nclass dataReader:\r\n\r\n def __init__(self):\r\n super(dataReader, self).__init__()\r\n\r\n # Lettura degli input\r\n tracks = pd.read_csv(\"data/tracks.csv\")\r\n \r\n train = pd.read_csv(\"data/trainLT25.csv\")\r\n #train = pd.read_csv(\"data/trainGT25.csv\")\r\n \r\n targetPlaylist = pd.read_csv(\"data/targetLT25.csv\")\r\n #targetPlaylist = pd.read_csv(\"data/targetGT25.csv\")\r\n\r\n train_seq = pd.read_csv(\"data/trainLT25Seq.csv\")\r\n #train_seq = pd.read_csv(\"data/trainGT25Seq.csv\")\r\n \r\n # Creo una lista di tutte le target playlist\r\n targetPlaylistCol = targetPlaylist.playlist_id.tolist()\r\n\r\n # Creo una lista di tuple (x,y) così composte:\r\n # x = indice playlist\r\n # y = indice track contenuta nella playlist y\r\n playlistColTuples_tot = list(train.apply(tuple, axis=1))\r\n # Creo un set di tuple (x,y) così composte:\r\n # x = indice playlist\r\n # y = indice track contenuta nella playlist y\r\n playlistColTuples_seq = set(train_seq.apply(tuple, axis=1))\r\n \r\n # Estrai le tuple NON sequenziali\r\n playlistColTuples = list(filter(lambda x: x not in playlistColTuples_seq, playlistColTuples_tot))\r\n # Estrai le tuple del target che non sono sequenziali\r\n playlistCol_target_notseq = list(filter(lambda x: x[0] in targetPlaylistCol, playlistColTuples))\r\n # Estrai la lista di tutte le tracce\r\n trackCol = tracks.track_id.tolist()\r\n # Estrai la lista di tutte le playlist NON sequenziali\r\n playlistCol = [x[0] for x in playlistColTuples]\r\n # Estrai la lista di tutti le playlist NON sequenziali nel target\r\n playlistCol_target = [x[0] for x in playlistCol_target_notseq]\r\n # Estrai la lista di tutte le tracce contenute in playlist NON sequenziali\r\n tracklistCol = [x[1] for x in playlistColTuples]\r\n # Estrai la lista di tutte le tracce contenute in playlist NON sequenziali nel target\r\n tracklistCol_target = [x[1] for x in playlistCol_target_notseq]\r\n # Estrai la colonne degli album, degli artisti e delle durate\r\n albumIdCol = tracks.album_id.tolist() # column ALBUM_ID from tracks.csv\r\n artistIdCol = tracks.artist_id.tolist() # column ARTIST_ID from tracks.csv\r\n #durSecCol = tracks.duration_sec.tolist() # column DURATION_SEC from tracks.csv\r\n numTrack = len(trackCol)\r\n #numPlayList = len(playlistCol)\r\n # Combina le colonne con gli id degli album e degli artisti\r\n #albumIdArtistIdCol = albumIdCol + artistIdCol\r\n # Ritorna il numero di playlists\r\n number_of_play = max(train.playlist_id.tolist())\r\n # Ritorna un'array di uno lungo quanto il numero di playlists NON sequenziali \r\n numPlaylist_notseq = np.ones(len(playlistColTuples), dtype=int)\r\n # Crea la URM di playlist+tracce NON sequenziali\r\n mat_notseq = sps.coo_matrix((numPlaylist_notseq, (playlistCol, tracklistCol)),\r\n shape=(number_of_play + 1, len(trackCol)))\r\n # Converte in CSR\r\n mat_notseq = mat_notseq.tocsr()\r\n\r\n\r\n # Ritorna una lista di tutte le playlist\r\n PlaylistColumn = train.playlist_id.tolist()\r\n # Ritorna una lista delle tracce di tutte le playlist\r\n trackColumn = train.track_id.tolist()\r\n # Ritorna un'array di uno lungo quanto il numero di playlists \r\n numPlaylist = np.ones(len(PlaylistColumn), dtype=int)\r\n # Crea la URM di playlist+tracce COMPLETA\r\n self.mat_complete = sps.coo_matrix((numPlaylist, (PlaylistColumn, trackColumn)),\r\n shape=(number_of_play + 1, len(trackCol)))\r\n # Converte in CSR\r\n self.mat_complete = self.mat_complete.tocsr()\r\n\r\n # Ritorna un array di uno lungo quanto il numero di playlist target NON sequenziali\r\n numPlaylist_notseq_target = np.ones(len(playlistCol_target_notseq), dtype=int)\r\n # Crea la URM di playlist NON sequenziali contenute nel target\r\n mat_notseq_target = sps.coo_matrix((numPlaylist_notseq_target, (playlistCol_target, tracklistCol_target)),\r\n shape=(number_of_play + 1, len(trackCol)))\r\n \r\n # Estrai le playlist sequenziali\r\n playlistCol_seq = train_seq.playlist_id.tolist()\r\n # Estrai il numero di playlist sequenziali\r\n numPlaylist_seq = len(playlistCol_seq)\r\n # Estrai le tracce sequenziali\r\n tracklistCol_seq = train_seq.track_id.tolist()\r\n # Ritorna un array di uno lungo quanto il numero di playlist target sequenziali\r\n numPlaylist_seq = np.ones(numPlaylist_seq, dtype=int)\r\n # Crea la URM di playlist sequenziali\r\n mat_seq = sps.coo_matrix((numPlaylist_seq, (playlistCol_seq, tracklistCol_seq)),\r\n shape=(number_of_play + 1, len(trackCol)))\r\n # Converti in CSR\r\n mat_seq = mat_seq.tocsr()\r\n \r\n # Crea una lista da 1 fino al numero di playlist sequenziali\r\n incremental = [i + 1 for i in range(len(playlistCol_seq))]\r\n # Ordina la lista in ordine DECRESCENTE\r\n incremental = list(reversed(incremental))\r\n # Crea una matrice speciale in cui assegno i valori decrescenti creati prima\r\n mat_seq_rank = sps.coo_matrix((incremental, (playlistCol_seq, tracklistCol_seq)),\r\n shape=(number_of_play + 1, len(trackCol)))\r\n # Converti in CSR\r\n mat_seq_rank = mat_seq_rank.tocsr()\r\n # Crea un set delle playlist sequenziali\r\n nonempty_seq = set(playlistCol_seq)\r\n\r\n # Per ogni playlist sequenziale, assegna un peso maggiore alle tracce inserite per prime\r\n for i in nonempty_seq:\r\n mask_min = (mat_seq[i] * (mat_seq_rank[i, mat_seq_rank[i].nonzero()[1]].min() - 1)) # the mask with the minimum of each row\r\n mat_seq_rank[i] = mat_seq_rank[i] - mask_min # subtract each row, this way the first in playlist will have the highest number\r\n \r\n # Crea matrice track-album\r\n matTrack_Album = sps.coo_matrix(\r\n ((np.ones(numTrack, dtype=int)), (trackCol, albumIdCol))) # sparse matrix ROW: track_id COLUMN: album_id\r\n matTrack_Album = matTrack_Album.tocsr()\r\n \r\n # Crea matrice track-artista\r\n matTrack_Artist = sps.coo_matrix(\r\n ((np.ones(numTrack, dtype=int)), (trackCol, artistIdCol))) # sparse matrix ROW: track_id COLUMN: artist_id\r\n matTrack_Artist = matTrack_Artist.tocsr()\r\n \r\n \r\n URM_train_seq, URM_train, URM_test_seq, URM_test = self.train_test_holdout(mat_notseq_target, mat_seq, mat_seq_rank, nonempty_seq, train_perc=0.8)\r\n # mat contiene l'URM delle playlist non sequenziali che non sono contenute nel target\r\n # NB: mat_notseq non ha avuto nessuno split\r\n mat = mat_notseq - mat_notseq_target\r\n \r\n self.ICM_Art = matTrack_Artist\r\n self.ICM_Alb = matTrack_Album\r\n \r\n # Nel train metti:\r\n # -> URM_train (basato sulle playlist del target), splittato\r\n # -> URM_train_seq (basato sulle playlist sequenziali), splittato\r\n # -> mat (basato su tutte le playlist non sequenziali non contenute nel target), non splittato\r\n self.mat_Train = URM_train + URM_train_seq + mat\r\n # Contiene il test + test sequenziale\r\n self.mat_Test = URM_test+URM_test_seq\r\n # Vuota, in questo caso non ci interessa il valid\r\n self.mat_Valid = sps.csr_matrix(mat.shape, dtype=int)\r\n\r\n\r\n def get_URM_complete(self):\r\n return self.mat_complete\r\n\r\n def get_URM_train(self):\r\n return self.mat_Train\r\n\r\n def get_URM_validation(self):\r\n return self.mat_Valid\r\n\r\n def get_ICM_Art(self):\r\n return self.ICM_Art\r\n\r\n def get_ICM_Alb(self):\r\n return self.ICM_Alb\r\n\r\n def get_URM_test(self):\r\n return self.mat_Test\r\n\r\n def train_test_holdout(self, URM_all, URM_all_seq, URM_all_seq_rank, nonempty_seq, train_perc=0.8):\r\n # Numero interazioni totali (=numero di non zero in URM_all)\r\n numInteractions = URM_all.nnz\r\n # Trasforma URM_all in COO\r\n URM_all = URM_all.tocoo()\r\n # Scegli, a caso, tra true e false con una probabilità di train_perc per True e di 1-train_perc per False\r\n train_mask = np.random.choice([True, False], numInteractions, [train_perc, 1 - train_perc])\r\n # Metti in URM_train la matrice di train\r\n URM_train = sps.coo_matrix((URM_all.data[train_mask], (URM_all.row[train_mask], URM_all.col[train_mask])),\r\n shape=URM_all.shape)\r\n # Converti in CSR\r\n URM_train = URM_train.tocsr()\r\n # Inverti la train_mask\r\n test_mask = np.logical_not(train_mask)\r\n # Metti in URM_test quello che non è in URM_train\r\n URM_test = sps.coo_matrix((URM_all.data[test_mask], (URM_all.row[test_mask], URM_all.col[test_mask])),\r\n shape=URM_all.shape)\r\n # Converti in CSR\r\n URM_test = URM_test.tocsr()\r\n # Inizializza URM_train_seq con le stesse dimensioni di URM_all\r\n URM_train_seq = sps.coo_matrix(URM_all.shape, dtype=int)\r\n URM_train_seq = URM_train_seq.tocsr()\r\n # Per ogni playlist sequenziale, prendi il peso massimo della riga e moltiplicalo per 1-trainperc\r\n # Es: 25 * 0.2 = 5\r\n # E metti in URM_train_seq tutte le tracce che hanno un peso maggiore di perc, in questo caso 5, per cui \r\n # in questa riga avrò 20 canzoni nel train e 5 nel test\r\n for i in nonempty_seq:\r\n perc = int(math.ceil(URM_all_seq_rank[i].max() * (1 - train_perc)))\r\n URM_train_seq[i] = URM_all_seq_rank[i] > perc\r\n # Crea URM_test_seq come differenza \r\n URM_test_seq = URM_all_seq - URM_train_seq\r\n # NEW: Faccio in modo che nel test set ci vadano soltanto le playlist con più di 25 interazioni\r\n #for i in range(np.size(URM_test_seq ,axis=0)):\r\n #if URM_test_seq[i].nnz>25*(1-train_perc): # (FOR LT25)\r\n # if URM_test_seq[i].nnz<=25*(1-train_perc): # (FOR GT25)\r\n # URM_test_seq[i] = URM_all_seq[i]*0\r\n #print(mask)\r\n #print(URM_test)\r\n #print(URM_test_seq)\r\n # Ritorna le 4 matrici\r\n return URM_train_seq, URM_train, URM_test_seq, URM_test\r\n\r\n def train_valid_holdout(self, URM_all, URM_all_seq, URM_all_seq_rank, nonempty_seq, train_perc=0.75, old_perc=0.8):\r\n numInteractions = URM_all.nnz\r\n URM_all = URM_all.tocoo()\r\n\r\n train_mask = np.random.choice([True, False], numInteractions, [train_perc, 1 - train_perc])\r\n\r\n URM_train = sps.coo_matrix((URM_all.data[train_mask], (URM_all.row[train_mask], URM_all.col[train_mask])),\r\n shape=URM_all.shape)\r\n URM_train = URM_train.tocsr()\r\n\r\n test_mask = np.logical_not(train_mask)\r\n\r\n URM_test = sps.coo_matrix((URM_all.data[test_mask], (URM_all.row[test_mask], URM_all.col[test_mask])),\r\n shape=URM_all.shape)\r\n URM_test = URM_test.tocsr()\r\n\r\n URM_train_seq = sps.coo_matrix(URM_all.shape, dtype=int)\r\n URM_train_seq = URM_train_seq.tocsr()\r\n\r\n for i in nonempty_seq:\r\n perc = int(math.ceil(URM_all_seq_rank[i].max() * (1 - old_perc)))\r\n newperc = int(math.ceil((URM_all_seq_rank[i].max() - perc) * (1 - train_perc)))\r\n URM_train_seq[i] = URM_all_seq_rank[i].multiply(URM_all_seq[i]) - (URM_all_seq[i] * perc) > newperc\r\n\r\n URM_test_seq = URM_all_seq - URM_train_seq\r\n\r\n return URM_train_seq, URM_train, URM_test_seq, URM_test\r\n\r\n" ]
[ [ "numpy.logical_not", "scipy.sparse.coo_matrix", "pandas.read_csv", "numpy.random.choice", "scipy.sparse.csr_matrix", "numpy.ones" ] ]
bbeale/PyPortfolioOpt
[ "caace2c18e6d7b7a310df3bbf2e0d4d66175f15b" ]
[ "tests/test_efficient_cvar.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pytest\nfrom pypfopt import (\n risk_models,\n expected_returns,\n EfficientCVaR,\n objective_functions,\n)\nfrom tests.utilities_for_tests import setup_efficient_cvar, get_data\nfrom pypfopt.exceptions import OptimizationError\n\n\ndef test_cvar_example():\n beta = 0.95\n df = get_data()\n mu = expected_returns.mean_historical_return(df)\n historical_rets = expected_returns.returns_from_prices(df).dropna()\n\n cv = EfficientCVaR(mu, historical_rets, beta=beta)\n w = cv.min_cvar()\n\n assert isinstance(w, dict)\n assert set(w.keys()) == set(cv.tickers)\n np.testing.assert_almost_equal(cv.weights.sum(), 1)\n assert all([i >= -1e-5 for i in w.values()])\n\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.17745746040573562, 0.017049502122532853),\n rtol=1e-4,\n atol=1e-4,\n )\n\n cvar = cv.portfolio_performance()[1]\n portfolio_rets = historical_rets @ cv.weights\n\n var_hist = portfolio_rets.quantile(1 - beta)\n cvar_hist = -portfolio_rets[portfolio_rets < var_hist].mean()\n np.testing.assert_almost_equal(cvar_hist, cvar, decimal=3)\n\n\ndef test_es_return_sample():\n df = get_data()\n mu = expected_returns.mean_historical_return(df)\n S = risk_models.sample_cov(df)\n\n # Generate a 1y sample of daily data\n np.random.seed(0)\n mu_daily = (1 + mu) ** (1 / 252) - 1\n S_daily = S / 252\n sample_rets = pd.DataFrame(\n np.random.multivariate_normal(mu_daily, S_daily, 300), columns=mu.index\n )\n\n cv = EfficientCVaR(mu, sample_rets)\n w = cv.efficient_return(0.2)\n\n assert isinstance(w, dict)\n assert set(w.keys()) == set(cv.tickers)\n np.testing.assert_almost_equal(cv.weights.sum(), 1)\n assert all([i >= -1e-5 for i in w.values()])\n\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.20, 0.01789275427676941),\n rtol=1e-4,\n atol=1e-4,\n )\n # Cover verbose param case\n np.testing.assert_equal(\n cv.portfolio_performance(verbose=True), cv.portfolio_performance()\n )\n\n\ndef test_cvar_example_weekly():\n beta = 0.95\n df = get_data()\n df = df.resample(\"W\").first()\n mu = expected_returns.mean_historical_return(df, frequency=52)\n historical_rets = expected_returns.returns_from_prices(df).dropna()\n cv = EfficientCVaR(mu, historical_rets, beta=beta)\n cv.efficient_return(0.2)\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.2, 0.03447723250708958),\n rtol=1e-4,\n atol=1e-4,\n )\n\n cvar = cv.portfolio_performance()[1]\n portfolio_rets = historical_rets @ cv.weights\n\n var_hist = portfolio_rets.quantile(1 - beta)\n cvar_hist = -portfolio_rets[portfolio_rets < var_hist].mean()\n np.testing.assert_almost_equal(cvar_hist, cvar, decimal=3)\n\n\ndef test_cvar_example_monthly():\n beta = 0.95\n df = get_data()\n df = df.resample(\"M\").first()\n mu = expected_returns.mean_historical_return(df, frequency=12)\n historical_rets = expected_returns.returns_from_prices(df).dropna()\n cv = EfficientCVaR(mu, historical_rets, beta=beta)\n cv.efficient_return(0.2)\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.2, 0.02343809217822161),\n rtol=1e-4,\n atol=1e-4,\n )\n\n cvar = cv.portfolio_performance()[1]\n portfolio_rets = historical_rets @ cv.weights\n\n var_hist = portfolio_rets.quantile(1 - beta)\n cvar_hist = -portfolio_rets[portfolio_rets < var_hist].mean()\n np.testing.assert_almost_equal(cvar_hist, cvar, decimal=3)\n\n\ndef test_cvar_beta():\n # cvar should decrease (i.e higher loss) as beta increases\n cv = setup_efficient_cvar()\n cv._beta = 0.5\n cv.min_cvar()\n cvar = cv.portfolio_performance()[1]\n\n for beta in np.arange(0.55, 1, 0.05):\n cv = setup_efficient_cvar()\n cv._beta = beta\n cv.min_cvar()\n cvar_test = cv.portfolio_performance()[1]\n assert cvar_test >= cvar\n cvar = cvar_test\n\n\ndef test_cvar_example_short():\n df = get_data()\n mu = expected_returns.mean_historical_return(df)\n historical_rets = expected_returns.returns_from_prices(df).dropna()\n cv = EfficientCVaR(\n mu,\n historical_rets,\n weight_bounds=(-1, 1),\n )\n w = cv.efficient_return(0.2, market_neutral=True)\n assert isinstance(w, dict)\n assert set(w.keys()) == set(cv.tickers)\n np.testing.assert_almost_equal(cv.weights.sum(), 0)\n\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.2, 0.013406209257292611),\n rtol=1e-4,\n atol=1e-4,\n )\n\n\ndef test_min_cvar_extra_constraints():\n cv = setup_efficient_cvar()\n w = cv.min_cvar()\n assert w[\"GOOG\"] < 0.02 and w[\"AAPL\"] > 0.02\n\n cv = setup_efficient_cvar()\n cv.add_constraint(lambda x: x[0] >= 0.03)\n cv.add_constraint(lambda x: x[1] <= 0.03)\n w = cv.min_cvar()\n assert w[\"GOOG\"] >= 0.025 and w[\"AAPL\"] <= 0.035\n\n\ndef test_min_cvar_different_solver():\n cv = setup_efficient_cvar(solver=\"ECOS\")\n w = cv.min_cvar()\n assert isinstance(w, dict)\n assert set(w.keys()) == set(cv.tickers)\n np.testing.assert_almost_equal(cv.weights.sum(), 1)\n assert all([i >= 0 for i in w.values()])\n test_performance = (0.08447037713814826, 0.017049502122532853)\n np.testing.assert_allclose(\n cv.portfolio_performance(), test_performance, rtol=1e-2, atol=1e-2\n )\n\n\ndef test_min_cvar_tx_costs():\n # Baseline\n cv = setup_efficient_cvar()\n cv.min_cvar()\n w1 = cv.weights\n\n # Pretend we were initally equal weight\n cv = setup_efficient_cvar()\n prev_w = np.array([1 / cv.n_assets] * cv.n_assets)\n cv.add_objective(objective_functions.transaction_cost, w_prev=prev_w)\n cv.min_cvar()\n w2 = cv.weights\n\n # TX cost should pull closer to prev portfolio\n assert np.abs(prev_w - w2).sum() < np.abs(prev_w - w1).sum()\n\n\ndef test_min_cvar_L2_reg():\n cv = setup_efficient_cvar(solver=\"ECOS\")\n cv.add_objective(objective_functions.L2_reg, gamma=0.1)\n weights = cv.min_cvar()\n assert isinstance(weights, dict)\n assert set(weights.keys()) == set(cv.tickers)\n np.testing.assert_almost_equal(cv.weights.sum(), 1)\n assert all([i >= 0 for i in weights.values()])\n\n cv2 = setup_efficient_cvar()\n cv2.min_cvar()\n\n # L2_reg should pull close to equal weight\n equal_weight = np.full((cv.n_assets,), 1 / cv.n_assets)\n assert (\n np.abs(equal_weight - cv.weights).sum()\n < np.abs(equal_weight - cv2.weights).sum()\n )\n\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.08981817616931259, 0.020427209685618623),\n rtol=1e-4,\n atol=1e-4,\n )\n\n\ndef test_min_cvar_sector_constraints():\n sector_mapper = {\n \"GOOG\": \"tech\",\n \"AAPL\": \"tech\",\n \"FB\": \"tech\",\n \"AMZN\": \"tech\",\n \"BABA\": \"tech\",\n \"GE\": \"utility\",\n \"AMD\": \"tech\",\n \"WMT\": \"retail\",\n \"BAC\": \"fig\",\n \"GM\": \"auto\",\n \"T\": \"auto\",\n \"UAA\": \"airline\",\n \"SHLD\": \"retail\",\n \"XOM\": \"energy\",\n \"RRC\": \"energy\",\n \"BBY\": \"retail\",\n \"MA\": \"fig\",\n \"PFE\": \"pharma\",\n \"JPM\": \"fig\",\n \"SBUX\": \"retail\",\n }\n\n sector_upper = {\n \"tech\": 0.2,\n \"utility\": 0.1,\n \"retail\": 0.2,\n \"fig\": 0.4,\n \"airline\": 0.05,\n \"energy\": 0.2,\n }\n sector_lower = {\"utility\": 0.01, \"fig\": 0.02, \"airline\": 0.01}\n\n cv = setup_efficient_cvar()\n cv.add_sector_constraints(sector_mapper, sector_lower, sector_upper)\n weights = cv.min_cvar()\n\n for sector in list(set().union(sector_upper, sector_lower)):\n sector_sum = 0\n for t, v in weights.items():\n if sector_mapper[t] == sector:\n sector_sum += v\n assert sector_sum <= sector_upper.get(sector, 1) + 1e-5\n assert sector_sum >= sector_lower.get(sector, 0) - 1e-5\n\n\ndef test_efficient_risk():\n cv = setup_efficient_cvar()\n w = cv.efficient_risk(0.02)\n\n assert isinstance(w, dict)\n assert set(w.keys()) == set(cv.tickers)\n np.testing.assert_almost_equal(cv.weights.sum(), 1)\n assert all([i >= -1e-5 for i in w.values()])\n\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.2267893986249195, 0.02),\n rtol=1e-4,\n atol=1e-4,\n )\n\n\ndef test_efficient_risk_low_risk():\n cv = setup_efficient_cvar()\n cv.min_cvar()\n min_value = cv.portfolio_performance()[1]\n\n # Should fail below\n with pytest.raises(OptimizationError):\n cv = setup_efficient_cvar()\n cv.efficient_risk(min_value - 0.01)\n\n cv = setup_efficient_cvar()\n cv.efficient_risk(min_value + 0.01)\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.363470415007482, min_value + 0.01),\n rtol=1e-4,\n atol=1e-4,\n )\n\n\ndef test_efficient_risk_market_neutral():\n cv = EfficientCVaR(*setup_efficient_cvar(data_only=True), weight_bounds=(-1, 1))\n w = cv.efficient_risk(0.025, market_neutral=True)\n assert isinstance(w, dict)\n assert set(w.keys()) == set(cv.tickers)\n np.testing.assert_almost_equal(cv.weights.sum(), 0)\n assert (cv.weights < 1).all() and (cv.weights > -1).all()\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.5895653670063358, 0.025),\n rtol=1e-4,\n atol=1e-4,\n )\n\n\ndef test_efficient_risk_L2_reg():\n cv = setup_efficient_cvar()\n cv.add_objective(objective_functions.L2_reg, gamma=1)\n weights = cv.efficient_risk(0.03)\n\n assert isinstance(weights, dict)\n assert set(weights.keys()) == set(cv.tickers)\n np.testing.assert_almost_equal(cv.weights.sum(), 1)\n np.testing.assert_array_less(np.zeros(len(weights)), cv.weights + 1e-4)\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.2889961577134966, 0.029393474756427136),\n rtol=1e-4,\n atol=1e-4,\n )\n\n ef2 = setup_efficient_cvar()\n cv.add_objective(objective_functions.L2_reg, gamma=1)\n ef2.efficient_risk(0.19)\n\n # L2_reg should pull close to equal weight\n equal_weight = np.full((cv.n_assets,), 1 / cv.n_assets)\n assert (\n np.abs(equal_weight - cv.weights).sum()\n < np.abs(equal_weight - ef2.weights).sum()\n )\n\n\ndef test_efficient_return():\n cv = setup_efficient_cvar()\n w = cv.efficient_return(0.25)\n assert isinstance(w, dict)\n assert set(w.keys()) == set(cv.tickers)\n np.testing.assert_almost_equal(cv.weights.sum(), 1)\n assert all([i >= -1e-5 for i in w.values()])\n\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.25, 0.021036631225933487),\n rtol=1e-4,\n atol=1e-4,\n )\n\n\ndef test_efficient_return_short():\n cv = EfficientCVaR(*setup_efficient_cvar(data_only=True), weight_bounds=(-3.0, 3.0))\n w = cv.efficient_return(0.26)\n assert isinstance(w, dict)\n assert set(w.keys()) == set(cv.tickers)\n np.testing.assert_almost_equal(cv.weights.sum(), 1)\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.26, 0.01804624747353764),\n rtol=1e-4,\n atol=1e-4,\n )\n cvar = cv.portfolio_performance()[1]\n\n ef_long_only = EfficientCVaR(\n *setup_efficient_cvar(data_only=True), weight_bounds=(0.0, 1.0)\n )\n ef_long_only.efficient_return(0.26)\n long_only_cvar = ef_long_only.portfolio_performance()[1]\n\n assert long_only_cvar > cvar\n\n\ndef test_efficient_return_L2_reg():\n cv = setup_efficient_cvar()\n cv.add_objective(objective_functions.L2_reg, gamma=1)\n w = cv.efficient_return(0.25)\n assert isinstance(w, dict)\n assert set(w.keys()) == set(cv.tickers)\n np.testing.assert_almost_equal(cv.weights.sum(), 1)\n assert all([i >= -1e-5 for i in w.values()])\n np.testing.assert_allclose(\n cv.portfolio_performance(),\n (0.25, 0.02660410793952383),\n rtol=1e-4,\n atol=1e-4,\n )\n\n\ndef test_cvar_errors():\n df = get_data()\n mu = expected_returns.mean_historical_return(df)\n historical_rets = expected_returns.returns_from_prices(df)\n\n with pytest.warns(UserWarning):\n EfficientCVaR(mu, historical_rets)\n\n historical_rets = historical_rets.dropna(axis=0, how=\"any\")\n assert EfficientCVaR(mu, historical_rets)\n\n cv = setup_efficient_cvar()\n\n with pytest.raises(NotImplementedError):\n cv.min_volatility()\n\n with pytest.raises(NotImplementedError):\n cv.max_sharpe()\n\n with pytest.raises(NotImplementedError):\n cv.max_quadratic_utility()\n\n with pytest.raises(ValueError):\n # Beta must be between 0 and 1\n cv = EfficientCVaR(mu, historical_rets, 1)\n\n with pytest.warns(UserWarning):\n cv = EfficientCVaR(mu, historical_rets, 0.1)\n\n with pytest.raises(OptimizationError):\n # Must be <= max expected return\n cv = EfficientCVaR(mu, historical_rets)\n cv.efficient_return(target_return=np.abs(mu).max() + 0.01)\n\n with pytest.raises(TypeError):\n # list not supported.\n EfficientCVaR(mu, historical_rets.to_numpy().tolist())\n\n historical_rets = historical_rets.iloc[:, :-1]\n with pytest.raises(ValueError):\n EfficientCVaR(mu, historical_rets)\n" ]
[ [ "numpy.abs", "numpy.random.seed", "numpy.arange", "numpy.random.multivariate_normal", "numpy.full", "numpy.testing.assert_almost_equal", "numpy.array" ] ]
ndricke/kmc_ML
[ "630fd51005428c2ea3108306cd4b8a94381e6a46" ]
[ "kmc_NN.py" ]
[ "\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom pylab import rcParams\n\nimport sklearn.preprocessing as skprep\nimport sklearn.model_selection as skms\nimport sklearn.metrics as skm\nfrom sklearn.neural_network import MLPRegressor\n\nimport kmc_ml_util\n\nfont = {'size':18}\nmpl.rc('font',**font)\nrcParams['figure.figsize'] = 10,10\n\n\n#al = 10.**-6 # L2 regularization for neural network\nal = 0.\nB3_string = sys.argv[1] # the 3-site correlation term to estimate\n\ndf = pd.read_csv(\"~/work/tafel/KMC/k_run.csv\")\nassert len(B3_string) == 3 #this model is only made to predict 3-site terms\n\n# construct terms for B3_Estimate function that Andrew has been using\ndf['O'] = 1. - df.A - df.B\nB3_middle = B3_string[1]\nB3_left2 = kmc_ml_util.B2_convert(B3_string[0], B3_string[1])\nB3_right2 = kmc_ml_util.B2_convert(B3_string[1], B3_string[2])\nB3 = kmc_ml_util.B3_convert(B3_string)\n\n#ep = 10.**-10\n#df_x[\"A\"] = 1./(df_x[\"A\"]+ep)\n#df_x[\"B\"] = 1./(df_x[\"B\"]+ep)\n#df_x[\"O\"] = 1./(df_x[\"O\"]+ep)\n\ndf[\"OA+AO\"] = df[\"OA+AO\"]/2.\ndf[\"OB+BO\"] = df[\"OB+BO\"]/2.\ndf[\"AB+BA\"] = df[\"AB+BA\"]/2.\n\ndf[\"B3_Estimate\"] = df[B3_left2]*df[B3_right2]/(df[B3_middle])\ndf[\"B3sub_Estimate\"] = df[B3_left2]*df[B3_string[2]] + df[B3_right2]*df[B3_string[0]] - df[B3_string[0]]*df[B3_string[1]]*df[B3_string[2]]\n\ndf_x = df[[\"OA+AO\",\"OB+BO\",\"AA\",\"AB+BA\",\"BB\", \"B3_Estimate\"]].values\ndf_B3 = df[B3]\n\ny = np.array(df_B3)\n\nsc_x = skprep.StandardScaler() # standardize input data\nX_std = sc_x.fit_transform(df_x)\n#sc_y = skprep.StandardScaler()\n#y = sc_y.fit_transform(y.reshape(-1,1)).flatten() # scaling target values shouldn't generally be necessary\n\nX_train, X_test, y_train, y_test = skms.train_test_split(X_std, y, test_size=0.2, random_state=1)\n\nprint(\"Feature shape: \", X_train.shape)\nprint(\"y: \", y)\n\nregr = MLPRegressor(hidden_layer_sizes=(84,42,20), random_state=2, alpha=al, max_iter=200)\n\nregr.fit(X_train, y_train)\nprint(regr.n_iter_) # number of training epochs before fit terminated\ny_fit = regr.predict(X_train)\ny_pred = regr.predict(X_test)\nprint(\"Mean squared error train: %.8f\" % skm.mean_squared_error(y_train, y_fit))\nprint(\"Mean squared error test: %.8f\" % skm.mean_squared_error(y_test, y_pred))\n# Explained variance score: 1 is perfect prediction\nprint('Variance score: %.4f' % skm.r2_score(y_test, y_pred))\n\nplt.scatter(y_fit, y_train, color='blue', label=\"Training Data\")\nplt.scatter(y_pred, y_test, color='orange', label=\"Testing Data\")\n\nplt.plot([-1,1],[-1,1], color=\"black\")\n#plt.ylabel(\"%s from KMC\" % B3)\nplt.ylabel(\"Kinetic Monte Carlo\")\nplt.xlabel(\"Fit\")\nplt.xlim([-0.004,np.max(y_pred)+0.005])\nplt.ylim([-0.004, np.max(y)+0.005])\nplt.legend()\n\nplt.show()\n#plt.savefig(\"OAB_B3subest_LL_krun.png\", transparent=True, bbox_inches='tight', pad_inches=0.05)\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "sklearn.metrics.r2_score", "matplotlib.pyplot.scatter", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.show", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.plot", "numpy.max", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler", "numpy.array", "matplotlib.rc", "sklearn.neural_network.MLPRegressor" ] ]
wenjunyoung/PAN_PLUS
[ "c893ff4775c8ff137a21c15d34fb93b9394dbfe5" ]
[ "dataset/pan/pan_synth.py" ]
[ "import numpy as np\nfrom PIL import Image\nfrom torch.utils import data\nimport cv2\nimport random\nimport torchvision.transforms as transforms\nimport torch\nimport pyclipper\nimport Polygon as plg\nimport math\nimport string\nimport scipy.io as scio\n\nsynth_root_dir = './data/SynthText/'\nsynth_train_data_dir = synth_root_dir\nsynth_train_gt_path = synth_root_dir + 'gt.mat'\n\n\ndef get_img(img_path, read_type='pil'):\n try:\n if read_type == 'cv2':\n img = cv2.imread(img_path)\n img = img[:, :, [2, 1, 0]]\n elif read_type == 'pil':\n img = np.array(Image.open(img_path))\n except Exception as e:\n print(img_path)\n raise\n return img\n\n\ndef get_ann(img, gts, texts, index):\n bboxes = np.array(gts[index])\n bboxes = np.reshape(bboxes, (bboxes.shape[0], bboxes.shape[1], -1))\n bboxes = bboxes.transpose(2, 1, 0)\n bboxes = np.reshape(bboxes, (bboxes.shape[0], -1)) / ([img.shape[1], img.shape[0]] * 4)\n\n words = []\n for text in texts[index]:\n text = text.replace('\\n', ' ').replace('\\r', ' ')\n words.extend([w for w in text.split(' ') if len(w) > 0])\n\n return bboxes, words\n\n\ndef random_horizontal_flip(imgs):\n if random.random() < 0.5:\n for i in range(len(imgs)):\n imgs[i] = np.flip(imgs[i], axis=1).copy()\n return imgs\n\n\ndef random_rotate(imgs):\n max_angle = 10\n angle = random.random() * 2 * max_angle - max_angle\n for i in range(len(imgs)):\n img = imgs[i]\n w, h = img.shape[:2]\n rotation_matrix = cv2.getRotationMatrix2D((h / 2, w / 2), angle, 1)\n img_rotation = cv2.warpAffine(img, rotation_matrix, (h, w), flags=cv2.INTER_NEAREST)\n imgs[i] = img_rotation\n return imgs\n\n\ndef scale_aligned(img, h_scale, w_scale):\n h, w = img.shape[0:2]\n h = int(h * h_scale + 0.5)\n w = int(w * w_scale + 0.5)\n if h % 32 != 0:\n h = h + (32 - h % 32)\n if w % 32 != 0:\n w = w + (32 - w % 32)\n img = cv2.resize(img, dsize=(w, h))\n return img\n\n\ndef random_scale(img, short_size=736):\n h, w = img.shape[0:2]\n\n scale = np.random.choice(np.array([0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3]))\n scale = (scale * short_size) / min(h, w)\n\n aspect = np.random.choice(np.array([0.9, 0.95, 1.0, 1.05, 1.1]))\n h_scale = scale * math.sqrt(aspect)\n w_scale = scale / math.sqrt(aspect)\n\n img = scale_aligned(img, h_scale, w_scale)\n return img\n\n\ndef random_crop_padding(imgs, target_size):\n h, w = imgs[0].shape[0:2]\n t_w, t_h = target_size\n p_w, p_h = target_size\n if w == t_w and h == t_h:\n return imgs\n\n t_h = t_h if t_h < h else h\n t_w = t_w if t_w < w else w\n\n if random.random() > 3.0 / 8.0 and np.max(imgs[1]) > 0:\n # make sure to crop the text region\n tl = np.min(np.where(imgs[1] > 0), axis=1) - (t_h, t_w)\n tl[tl < 0] = 0\n br = np.max(np.where(imgs[1] > 0), axis=1) - (t_h, t_w)\n br[br < 0] = 0\n br[0] = min(br[0], h - t_h)\n br[1] = min(br[1], w - t_w)\n\n i = random.randint(tl[0], br[0]) if tl[0] < br[0] else 0\n j = random.randint(tl[1], br[1]) if tl[1] < br[1] else 0\n else:\n i = random.randint(0, h - t_h) if h - t_h > 0 else 0\n j = random.randint(0, w - t_w) if w - t_w > 0 else 0\n\n n_imgs = []\n for idx in range(len(imgs)):\n if len(imgs[idx].shape) == 3:\n s3_length = int(imgs[idx].shape[-1])\n img = imgs[idx][i:i + t_h, j:j + t_w, :]\n img_p = cv2.copyMakeBorder(img, 0, p_h - t_h, 0, p_w - t_w, borderType=cv2.BORDER_CONSTANT,\n value=tuple(0 for i in range(s3_length)))\n else:\n img = imgs[idx][i:i + t_h, j:j + t_w]\n img_p = cv2.copyMakeBorder(img, 0, p_h - t_h, 0, p_w - t_w, borderType=cv2.BORDER_CONSTANT, value=(0,))\n n_imgs.append(img_p)\n return n_imgs\n\n\ndef update_word_mask(instance, instance_before_crop, word_mask):\n labels = np.unique(instance)\n\n for label in labels:\n if label == 0:\n continue\n ind = instance == label\n if np.sum(ind) == 0:\n word_mask[label] = 0\n continue\n ind_before_crop = instance_before_crop == label\n # print(np.sum(ind), np.sum(ind_before_crop))\n if float(np.sum(ind)) / np.sum(ind_before_crop) > 0.9:\n continue\n word_mask[label] = 0\n\n return word_mask\n\n\ndef dist(a, b):\n return np.linalg.norm((a - b), ord=2, axis=0)\n\n\ndef perimeter(bbox):\n peri = 0.0\n for i in range(bbox.shape[0]):\n peri += dist(bbox[i], bbox[(i + 1) % bbox.shape[0]])\n return peri\n\n\ndef shrink(bboxes, rate, max_shr=20):\n rate = rate * rate\n shrinked_bboxes = []\n for bbox in bboxes:\n area = plg.Polygon(bbox).area()\n peri = perimeter(bbox)\n\n try:\n pco = pyclipper.PyclipperOffset()\n pco.AddPath(bbox, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)\n offset = min(int(area * (1 - rate) / (peri + 0.001) + 0.5), max_shr)\n\n shrinked_bbox = pco.Execute(-offset)\n if len(shrinked_bbox) == 0:\n shrinked_bboxes.append(bbox)\n continue\n\n shrinked_bbox = np.array(shrinked_bbox[0])\n if shrinked_bbox.shape[0] <= 2:\n shrinked_bboxes.append(bbox)\n continue\n\n shrinked_bboxes.append(shrinked_bbox)\n except Exception as e:\n print('area:', area, 'peri:', peri)\n shrinked_bboxes.append(bbox)\n\n return shrinked_bboxes\n\n\ndef get_vocabulary(voc_type, EOS='EOS', PADDING='PAD', UNKNOWN='UNK'):\n if voc_type == 'LOWERCASE':\n voc = list(string.digits + string.ascii_lowercase)\n elif voc_type == 'ALLCASES':\n voc = list(string.digits + string.ascii_letters)\n elif voc_type == 'ALLCASES_SYMBOLS':\n voc = list(string.printable[:-5])\n else:\n raise KeyError('voc_type must be one of \"LOWERCASE\", \"ALLCASES\", \"ALLCASES_SYMBOLS\"')\n\n # update the voc with specifical chars\n voc.append(EOS)\n voc.append(PADDING)\n voc.append(UNKNOWN)\n\n char2id = dict(zip(voc, range(len(voc))))\n id2char = dict(zip(range(len(voc)), voc))\n\n return voc, char2id, id2char\n\n\nclass PAN_Synth(data.Dataset):\n def __init__(self,\n is_transform=False,\n img_size=None,\n short_size=736,\n kernel_scale=0.5,\n with_rec=False,\n read_type='pil'):\n self.is_transform = is_transform\n\n self.img_size = img_size if (img_size is None or isinstance(img_size, tuple)) else (img_size, img_size)\n self.kernel_scale = kernel_scale\n self.short_size = short_size\n self.with_rec = with_rec\n self.read_type = read_type\n\n data = scio.loadmat(synth_train_gt_path)\n\n self.img_paths = data['imnames'][0]\n self.gts = data['wordBB'][0]\n self.texts = data['txt'][0]\n\n self.voc, self.char2id, self.id2char = get_vocabulary('LOWERCASE')\n self.max_word_num = 200\n self.max_word_len = 32\n\n def __len__(self):\n return len(self.img_paths)\n\n def __getitem__(self, index):\n img_path = synth_train_data_dir + self.img_paths[index][0]\n img = get_img(img_path, read_type=self.read_type)\n bboxes, words = get_ann(img, self.gts, self.texts, index)\n\n if bboxes.shape[0] > self.max_word_num:\n bboxes = bboxes[:self.max_word_num]\n words = words[:self.max_word_num]\n\n gt_words = np.full((self.max_word_num, self.max_word_len), self.char2id['PAD'], dtype=np.int32)\n word_mask = np.zeros((self.max_word_num,), dtype=np.int32)\n for i, word in enumerate(words):\n if word == '###':\n continue\n word = word.lower()\n gt_word = np.full((self.max_word_len,), self.char2id['PAD'], dtype=np.int)\n for j, char in enumerate(word):\n if j > self.max_word_len - 1:\n break\n if char in self.char2id:\n gt_word[j] = self.char2id[char]\n else:\n gt_word[j] = self.char2id['UNK']\n if len(word) > self.max_word_len - 1:\n gt_word[-1] = self.char2id['EOS']\n else:\n gt_word[len(word)] = self.char2id['EOS']\n gt_words[i + 1] = gt_word\n word_mask[i + 1] = 1\n\n if self.is_transform:\n img = random_scale(img, self.short_size)\n\n gt_instance = np.zeros(img.shape[0:2], dtype='uint8')\n training_mask = np.ones(img.shape[0:2], dtype='uint8')\n if bboxes.shape[0] > 0:\n bboxes = np.reshape(bboxes * ([img.shape[1], img.shape[0]] * 4),\n (bboxes.shape[0], -1, 2)).astype('int32')\n for i in range(bboxes.shape[0]):\n cv2.drawContours(gt_instance, [bboxes[i]], -1, i + 1, -1)\n if words[i] == '###':\n cv2.drawContours(training_mask, [bboxes[i]], -1, 0, -1)\n\n gt_kernels = []\n for rate in [self.kernel_scale]:\n gt_kernel = np.zeros(img.shape[0:2], dtype='uint8')\n kernel_bboxes = shrink(bboxes, rate)\n for i in range(bboxes.shape[0]):\n cv2.drawContours(gt_kernel, [kernel_bboxes[i]], -1, 1, -1)\n gt_kernels.append(gt_kernel)\n\n if self.is_transform:\n imgs = [img, gt_instance, training_mask]\n imgs.extend(gt_kernels)\n\n if not self.with_rec:\n imgs = random_horizontal_flip(imgs)\n imgs = random_rotate(imgs)\n gt_instance_before_crop = imgs[1].copy()\n imgs = random_crop_padding(imgs, self.img_size)\n img, gt_instance, training_mask, gt_kernels = imgs[0], imgs[1], imgs[2], imgs[3:]\n word_mask = update_word_mask(gt_instance, gt_instance_before_crop, word_mask)\n\n gt_text = gt_instance.copy()\n gt_text[gt_text > 0] = 1\n gt_kernels = np.array(gt_kernels)\n\n max_instance = np.max(gt_instance)\n gt_bboxes = np.zeros((self.max_word_num, 4), dtype=np.int32)\n for i in range(1, max_instance + 1):\n ind = gt_instance == i\n if np.sum(ind) == 0:\n continue\n points = np.array(np.where(ind)).transpose((1, 0))\n tl = np.min(points, axis=0)\n br = np.max(points, axis=0) + 1\n gt_bboxes[i] = (tl[0], tl[1], br[0], br[1])\n\n img = Image.fromarray(img)\n img = img.convert('RGB')\n\n if self.is_transform:\n img = transforms.ColorJitter(brightness=32.0 / 255, saturation=0.5)(img)\n\n img = transforms.ToTensor()(img)\n img = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img)\n\n gt_text = torch.from_numpy(gt_text).long()\n gt_kernels = torch.from_numpy(gt_kernels).long()\n training_mask = torch.from_numpy(training_mask).long()\n gt_instance = torch.from_numpy(gt_instance).long()\n gt_bboxes = torch.from_numpy(gt_bboxes).long()\n gt_words = torch.from_numpy(gt_words).long()\n word_mask = torch.from_numpy(word_mask).long()\n\n data = dict(\n imgs=img,\n gt_texts=gt_text,\n gt_kernels=gt_kernels,\n training_masks=training_mask,\n gt_instances=gt_instance,\n gt_bboxes=gt_bboxes,\n )\n if self.with_rec:\n data.update(dict(\n gt_words=gt_words,\n word_masks=word_mask\n ))\n\n return data\n" ]
[ [ "numpy.unique", "numpy.reshape", "numpy.min", "scipy.io.loadmat", "numpy.linalg.norm", "torch.from_numpy", "numpy.full", "numpy.ones", "numpy.max", "numpy.where", "numpy.flip", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
kynk94/torch-firewood
[ "8ecd03c166bcadaae22a6cb2c1457a82f2c644eb", "8ecd03c166bcadaae22a6cb2c1457a82f2c644eb" ]
[ "firewood/trainer/callbacks/image.py", "tests/helpers/datasets.py" ]
[ "import math\r\nimport os\r\nfrom typing import Any, Iterator, Optional, Tuple, cast\r\n\r\nimport torch\r\nimport torchvision.transforms.functional_tensor as TFT\r\nimport torchvision.utils as TU\r\nfrom pytorch_lightning import Callback, LightningModule, Trainer\r\nfrom torch import Tensor\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom firewood import utils\r\nfrom firewood.common.types import DEVICE\r\nfrom firewood.utils.image import batch_flat_to_square, save_tensor_to_image\r\nfrom firewood.utils.torch_op import args_to, kwargs_to\r\n\r\n\r\ndef _pass_through(*args: Any, **kwargs: Any) -> None:\r\n return None\r\n\r\n\r\nclass _ImageCallback(Callback):\r\n def __init__(\r\n self,\r\n step: Optional[int] = None,\r\n epoch: int = 1,\r\n num_samples: int = 4,\r\n nrow: Optional[int] = None,\r\n padding: int = 2,\r\n normalize: bool = False,\r\n norm_range: Optional[Tuple[int, int]] = None,\r\n on_epoch_end: bool = True,\r\n add_fixed_samples: bool = False,\r\n scale_each: bool = False,\r\n pad_value: int = 0,\r\n save_image: bool = False,\r\n grid_max_resolution: Optional[int] = None,\r\n **kwargs: Any,\r\n ) -> None:\r\n self.step = step\r\n self.epoch = epoch\r\n self.num_samples = num_samples\r\n self.nrow = nrow or max(4, math.ceil(math.sqrt(num_samples)))\r\n self.padding = padding\r\n self.normalize = normalize\r\n self.norm_range = norm_range\r\n self.add_fixed_samples = add_fixed_samples\r\n self.scale_each = scale_each\r\n self.pad_value = pad_value\r\n self.save_image = save_image\r\n self.grid_max_resolution = grid_max_resolution\r\n self.kwargs = kwargs\r\n self.device: Optional[DEVICE] = None\r\n\r\n if self.step is None:\r\n if not on_epoch_end:\r\n raise ValueError(\r\n \"step must be specified if on_epoch_end is False\"\r\n )\r\n setattr(self, \"on_train_batch_end\", _pass_through)\r\n setattr(self, \"on_test_batch_end\", _pass_through)\r\n setattr(self, \"on_validation_batch_end\", _pass_through)\r\n setattr(self, \"on_predict_batch_end\", _pass_through)\r\n if not on_epoch_end:\r\n setattr(self, \"on_train_epoch_end\", _pass_through)\r\n setattr(self, \"on_test_epoch_end\", _pass_through)\r\n setattr(self, \"on_validation_epoch_end\", _pass_through)\r\n setattr(self, \"on_predict_epoch_end\", _pass_through)\r\n\r\n self._train_data_iter: Optional[Iterator] = None\r\n self._test_data_iter: Optional[Iterator] = None\r\n self._val_data_iter: Optional[Iterator] = None\r\n self._fixed_train_batch: Optional[Tuple[Any, ...]] = None\r\n self._fixed_test_batch: Optional[Tuple[Any, ...]] = None\r\n self._fixed_val_batch: Optional[Tuple[Any, ...]] = None\r\n\r\n @torch.no_grad()\r\n def _sample(\r\n self,\r\n pl_module: LightningModule,\r\n input: Tensor,\r\n *args: Any,\r\n **kwargs: Any,\r\n ) -> Tensor:\r\n if self.device is None:\r\n self.device = pl_module.device\r\n args = args_to(*args, dtype=input.dtype, device=self.device)\r\n kwargs.update(self.kwargs)\r\n kwargs = kwargs_to(**kwargs, dtype=input.dtype, device=self.device)\r\n\r\n pl_module.eval()\r\n generated_images: Tensor = pl_module(\r\n input.to(device=pl_module.device, non_blocking=True),\r\n *args,\r\n **kwargs,\r\n )\r\n pl_module.train()\r\n\r\n if generated_images.ndim == 2:\r\n img_dim = getattr(pl_module, \"img_dim\", None)\r\n if img_dim is None:\r\n generated_images = batch_flat_to_square(generated_images)\r\n else:\r\n generated_images = generated_images.view(\r\n self.num_samples, *img_dim\r\n )\r\n return generated_images\r\n\r\n def _make_grid(self, input: Tensor) -> Tensor:\r\n grid = TU.make_grid(\r\n tensor=input,\r\n nrow=self.nrow,\r\n padding=self.padding,\r\n normalize=self.normalize,\r\n value_range=self.norm_range,\r\n scale_each=self.scale_each,\r\n pad_value=self.pad_value,\r\n )\r\n if self.grid_max_resolution is None:\r\n return grid\r\n\r\n height, width = grid.shape[-2:]\r\n larger = max(height, width)\r\n if larger < self.grid_max_resolution:\r\n return grid\r\n\r\n ratio = self.grid_max_resolution / larger\r\n new_resolution = (int(height * ratio), int(width * ratio))\r\n return TFT.resize(grid, new_resolution, antialias=True)\r\n\r\n def log_image(\r\n self,\r\n trainer: Trainer,\r\n pl_module: LightningModule,\r\n input: Tensor,\r\n title: Optional[str] = None,\r\n global_step: Optional[int] = None,\r\n ) -> None:\r\n title = title or \"images\"\r\n str_title = f\"{pl_module.__class__.__name__}_{title}\"\r\n global_step = global_step or trainer.global_step\r\n\r\n writer = getattr(trainer.logger, \"experiment\")\r\n writer.add_image(str_title, input, global_step=global_step)\r\n\r\n if self.save_image and trainer.global_rank == 0:\r\n log_dir = getattr(trainer.logger, \"log_dir\")\r\n image_dir = utils.makedirs(log_dir, \"images\")\r\n basename = utils.validate_filename(f\"{str_title}_{global_step}.png\")\r\n save_tensor_to_image(input, os.path.join(image_dir, basename))\r\n\r\n def _set_data_iter(self, trainer: Trainer, stage: str = \"train\") -> None:\r\n datamodule = getattr(trainer, \"datamodule\", None)\r\n stage = stage.lower()\r\n if datamodule is not None:\r\n if stage.startswith(\"train\"):\r\n dataloader: DataLoader = datamodule.train_dataloader\r\n self._train_data_iter = iter(dataloader)\r\n elif stage.startswith(\"test\"):\r\n dataloader = datamodule.test_dataloader\r\n self._test_data_iter = iter(dataloader)\r\n elif stage.startswith(\"val\"):\r\n dataloader = datamodule.val_dataloader\r\n self._val_data_iter = iter(dataloader)\r\n else:\r\n raise ValueError(f\"Unknown stage: {stage}\")\r\n return\r\n if stage.startswith(\"train\"):\r\n data_source = trainer._data_connector._train_dataloader_source\r\n dataloader = cast(DataLoader, data_source.instance)\r\n if dataloader is not None:\r\n self._train_data_iter = iter(dataloader)\r\n return\r\n elif stage.startswith(\"test\"):\r\n data_source = trainer._data_connector._test_dataloader_source\r\n dataloader = cast(DataLoader, data_source.instance)\r\n if dataloader is not None:\r\n self._test_data_iter = iter(dataloader)\r\n return\r\n elif stage.startswith(\"val\"):\r\n data_source = trainer._data_connector._val_dataloader_source\r\n dataloader = cast(DataLoader, data_source.instance)\r\n if dataloader is not None:\r\n self._val_data_iter = iter(dataloader)\r\n return\r\n else:\r\n raise ValueError(f\"Unknown stage: {stage}\")\r\n raise ValueError(\r\n f\"No {stage} dataloader found for {utils.get_name*(trainer)}.\"\r\n )\r\n\r\n def get_train_batch(self, trainer: Trainer) -> Any:\r\n if self._train_data_iter is None:\r\n self._set_data_iter(trainer, \"train\")\r\n try:\r\n return next(cast(Iterator, self._train_data_iter))\r\n except StopIteration:\r\n self._set_data_iter(trainer, \"train\")\r\n return next(cast(Iterator, self._train_data_iter))\r\n\r\n def get_test_batch(self, trainer: Trainer) -> Any:\r\n if self._test_data_iter is None:\r\n self._set_data_iter(trainer, \"test\")\r\n try:\r\n return next(cast(Iterator, self._test_data_iter))\r\n except StopIteration:\r\n self._set_data_iter(trainer, \"test\")\r\n return next(cast(Iterator, self._test_data_iter))\r\n\r\n def get_val_batch(self, trainer: Trainer) -> Any:\r\n if self._val_data_iter is None:\r\n self._set_data_iter(trainer, \"validation\")\r\n try:\r\n return next(cast(Iterator, self._val_data_iter))\r\n except StopIteration:\r\n self._set_data_iter(trainer, \"validation\")\r\n return next(cast(Iterator, self._val_data_iter))\r\n\r\n def get_fixed_train_batch(self, trainer: Trainer) -> Any:\r\n if not self.add_fixed_samples:\r\n return\r\n if self.fixed_train_batch is None:\r\n self.fixed_train_batch = self.get_train_batch(trainer)\r\n return self.fixed_train_batch\r\n\r\n def get_fixed_test_batch(self, trainer: Trainer) -> Any:\r\n if not self.add_fixed_samples:\r\n return\r\n if self.fixed_test_batch is None:\r\n self.fixed_test_batch = self.get_test_batch(trainer)\r\n return self.fixed_test_batch\r\n\r\n def get_fixed_val_batch(self, trainer: Trainer) -> Any:\r\n if not self.add_fixed_samples:\r\n return\r\n if self.fixed_val_batch is None:\r\n self.fixed_val_batch = self.get_val_batch(trainer)\r\n return self.fixed_val_batch\r\n\r\n @property\r\n def fixed_train_batch(self) -> Optional[Tuple[Any, ...]]:\r\n if self._fixed_train_batch is None:\r\n return None\r\n return _return_batch(self._fixed_train_batch, self.device)\r\n\r\n @fixed_train_batch.setter\r\n def fixed_train_batch(self, value: Any) -> None:\r\n if isinstance(value, Tensor):\r\n value = value.to(device=\"cpu\")\r\n else:\r\n value = args_to(*value, device=self.device)\r\n self._fixed_train_batch = value\r\n\r\n @property\r\n def fixed_test_batch(self) -> Optional[Tuple[Any, ...]]:\r\n if self._fixed_test_batch is None:\r\n return None\r\n return _return_batch(self._fixed_test_batch, self.device)\r\n\r\n @fixed_test_batch.setter\r\n def fixed_test_batch(self, value: Any) -> None:\r\n if isinstance(value, Tensor):\r\n value = value.to(device=\"cpu\")\r\n else:\r\n value = args_to(*value, device=self.device)\r\n self._fixed_test_batch = value\r\n\r\n @property\r\n def fixed_val_batch(self) -> Optional[Tuple[Any, ...]]:\r\n if self._fixed_val_batch is None:\r\n return None\r\n return _return_batch(self._fixed_val_batch, self.device)\r\n\r\n @fixed_val_batch.setter\r\n def fixed_val_batch(self, value: Any) -> None:\r\n if isinstance(value, Tensor):\r\n value = value.to(device=\"cpu\")\r\n else:\r\n value = args_to(*value, device=self.device)\r\n self._fixed_val_batch = value\r\n\r\n\r\ndef _return_batch(batch: Any, device: Optional[DEVICE] = None) -> Any:\r\n if device is None:\r\n return batch\r\n if isinstance(batch, Tensor):\r\n return batch.to(device=device, non_blocking=True)\r\n return args_to(*batch, device=device)\r\n", "import torch\r\nfrom torch import Tensor\r\nfrom torch.utils.data import DataLoader, Dataset\r\n\r\n\r\nclass RandomDataset(Dataset):\r\n def __init__(self, size: int, length: int) -> None:\r\n self.length = length\r\n self.data = torch.randn(length, size)\r\n\r\n def __getitem__(self, index) -> Tensor:\r\n return self.data[index]\r\n\r\n def __len__(self):\r\n return self.length\r\n\r\n\r\nclass RandomImageDataset(Dataset):\r\n def __init__(self, size: int, length: int, channel: int = 3) -> None:\r\n self.length = length\r\n self.data = torch.randn(length, channel, size, size)\r\n\r\n def __getitem__(self, index) -> Tensor:\r\n return self.data[index]\r\n\r\n def __len__(self):\r\n return self.length\r\n\r\n\r\ndef get_dataloader(\r\n dataset: Dataset,\r\n batch_size: int = 1,\r\n shuffle: bool = True,\r\n num_workers: int = 0,\r\n) -> DataLoader:\r\n return DataLoader(\r\n dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers\r\n )\r\n\r\n\r\ndef get_random_dataset(\r\n size: int,\r\n length: int,\r\n batch_size: int = 1,\r\n shuffle: bool = True,\r\n num_workers: int = 0,\r\n) -> DataLoader:\r\n return get_dataloader(\r\n RandomDataset(size, length), batch_size, shuffle, num_workers\r\n )\r\n\r\n\r\ndef get_random_image_dataset(\r\n size: int,\r\n length: int,\r\n channel: int = 3,\r\n batch_size: int = 1,\r\n shuffle: bool = True,\r\n num_workers: int = 0,\r\n) -> DataLoader:\r\n return get_dataloader(\r\n RandomImageDataset(size, length, channel),\r\n batch_size,\r\n shuffle,\r\n num_workers,\r\n )\r\n" ]
[ [ "torch.no_grad" ], [ "torch.randn", "torch.utils.data.DataLoader" ] ]
Kamalhsn/deraining
[ "9d91abe8178ae654e2b75d987070b684ee478507" ]
[ "train_model.py" ]
[ "\"\"\"\nModel training script\n\"\"\"\nimport os\nimport os.path as ops\nimport argparse\nimport time\n\nimport tensorflow as tf\ntf.compat.v1.disable_v2_behavior()\nimport numpy as np\nimport glog as log\nimport sys\n\nimport data_feed_pipline\nimport global_config\n#import derain_drop_net2\nimport derain_drop_net\n\nCFG = global_config.cfg\nVGG_MEAN = [103.939, 116.779, 123.68]\n\n\ndef init_args():\n \"\"\"\n\n :return:\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_dir', type=str, help='The dataset dir', default = r\"E:\\Kamal\\debugging\\attentionGAN\\tools_tf2\\dataset\")\n parser.add_argument('--weights_path', type=str,\n help='The pretrained weights path', default=None)#r\"E:\\Kamal\\debugging\\attentionGAN\\tools\\tools_tf2\\tf2_model\\weights\\derain_gan_2022-02-03-20-36-31.ckpt-19100\")\n\n return parser.parse_args()\n\n\ndef average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(grads, 0)\n grad = tf.math.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n\n return average_grads\n\n\ndef compute_net_gradients(images, labels, net, optimizer=None, is_net_first_initialized=False):\n \"\"\"\n Calculate gradients for single GPU\n :param images: images for training\n :param labels: labels corresponding to images\n :param net: classification model\n :param optimizer: network optimizer\n :param is_net_first_initialized: if the network is initialized\n :return:\n \"\"\"\n net_loss = net.compute_loss(input_tensor=images,\n labels=labels,\n name='attentive_derain',\n reuse=is_net_first_initialized)\n if optimizer is not None:\n grads = optimizer.compute_gradients(net_loss)\n else:\n grads = None\n\n return net_loss, grads\n\n\ndef train_model(dataset_dir, weights_path=None):\n \"\"\"\n\n :param dataset_dir:\n :param weights_path:\n :return:\n \"\"\"\n # Build a data set\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n tf_device='/gpu'\n #with tf.device('/gpu:0'):\n train_dataset = data_feed_pipline.DerainDataFeeder(dataset_dir=dataset_dir, flags='train')\n val_dataset = data_feed_pipline.DerainDataFeeder(dataset_dir=dataset_dir, flags='val')\n \n \n train_input_tensor, train_label_tensor, train_mask_tensor = train_dataset.inputs(CFG.TRAIN.BATCH_SIZE, 1) \n val_input_tensor, val_label_tensor, val_mask_tensor = val_dataset.inputs(CFG.TRAIN.BATCH_SIZE, 1)\n\n # define network\n derain_net = derain_drop_net.DeRainNet(phase=tf.constant('train', dtype=tf.string))\n\n # calculate train loss and validation loss\n train_gan_loss, train_discriminative_loss, train_net_output = derain_net.compute_loss(\n input_tensor=train_input_tensor,\n gt_label_tensor=train_label_tensor,\n mask_label_tensor=train_mask_tensor,\n name='derain_net',\n reuse=False\n )\n\n val_gan_loss, val_discriminative_loss, val_net_output = derain_net.compute_loss(\n input_tensor=val_input_tensor,\n gt_label_tensor=val_label_tensor,\n mask_label_tensor=val_mask_tensor,\n name='derain_net',\n reuse=True\n )\n\n # calculate train ssim, psnr and validation ssim, psnr\n train_label_tensor_scale = tf.image.convert_image_dtype(\n image=(train_label_tensor + 1.0) / 2.0,\n dtype=tf.uint8\n )\n train_net_output_tensor_scale = tf.image.convert_image_dtype(\n image=(train_net_output + 1.0) / 2.0,\n dtype=tf.uint8\n )\n val_label_tensor_scale = tf.image.convert_image_dtype(\n image=(val_label_tensor + 1.0) / 2.0,\n dtype=tf.uint8\n )\n val_net_output_tensor_scale = tf.image.convert_image_dtype(\n image=(val_net_output + 1.0) / 2.0,\n dtype=tf.uint8\n )\n\n train_label_tensor_scale = tf.image.rgb_to_grayscale(\n images=tf.reverse(train_label_tensor_scale, axis=[-1])\n )\n train_net_output_tensor_scale = tf.image.rgb_to_grayscale(\n images=tf.reverse(train_net_output_tensor_scale, axis=[-1])\n )\n val_label_tensor_scale = tf.image.rgb_to_grayscale(\n images=tf.reverse(val_label_tensor_scale, axis=[-1])\n )\n val_net_output_tensor_scale = tf.image.rgb_to_grayscale(\n images=tf.reverse(val_net_output_tensor_scale, axis=[-1])\n )\n\n train_ssim = tf.math.reduce_mean(tf.image.ssim(\n train_label_tensor_scale, train_net_output_tensor_scale, max_val=255),\n name='avg_train_ssim'\n )\n train_psnr = tf.math.reduce_mean(tf.image.psnr(\n train_label_tensor_scale, train_net_output_tensor_scale, max_val=255),\n name='avg_train_psnr'\n )\n val_ssim = tf.math.reduce_mean(tf.image.ssim(\n val_label_tensor_scale, val_net_output_tensor_scale, max_val=255),\n name='avg_val_ssim'\n )\n val_psnr = tf.math.reduce_mean(tf.image.psnr(\n val_label_tensor_scale, val_net_output_tensor_scale, max_val=255),\n name='avg_val_psnr'\n )\n\n # collect trainable vars to update\n train_vars = tf.compat.v1.trainable_variables()\n\n d_vars = [tmp for tmp in train_vars if 'discriminator' in tmp.name]\n g_vars = [tmp for tmp in train_vars if 'attentive_' in tmp.name and 'vgg_feats' not in tmp.name]\n vgg_vars = [tmp for tmp in train_vars if \"vgg_feats\" in tmp.name]\n\n # set optimizer\n global_step = tf.Variable(0, trainable=False)\n learning_rate = tf.compat.v1.train.exponential_decay(\n CFG.TRAIN.LEARNING_RATE, global_step, 100000, 0.1, staircase=True\n )\n\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n d_optim = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(\n train_discriminative_loss, var_list=d_vars)\n g_optim = tf.compat.v1.train.MomentumOptimizer(\n learning_rate=learning_rate,\n momentum=tf.constant(0.9, tf.float32)).minimize(train_gan_loss, var_list=g_vars)\n\n # Set tf saver\n saver = tf.compat.v1.train.Saver(max_to_keep=None)\n model_save_dir = \"tf2_model/weights\"\n #model_save_dir = r'E:\\Kamal\\deraining\\attentionGAN\\tools\\tools_tf2\\model\\weights'\n if not ops.exists(model_save_dir):\n os.makedirs(model_save_dir)\n #train_start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))\n #model_name = 'derain_gan_{:s}.ckpt'.format(str(train_start_time))\n #model_save_path = ops.join(model_save_dir, model_name)\n\n # Set tf summary\n tboard_save_path = 'tf2_tboard/derain_gan'\n if not ops.exists(tboard_save_path):\n os.makedirs(tboard_save_path)\n\n #train_g_loss_scalar = tf.compat.v1.summary.scalar(name='train_gan_loss', tensor=train_gan_loss)\n train_g_loss_scalar = tf.compat.v1.summary.scalar(name='train_gan_loss', tensor=train_gan_loss)\n train_d_loss_scalar = tf.compat.v1.summary.scalar(name='train_discriminative_loss', tensor=train_discriminative_loss)\n train_ssim_scalar = tf.compat.v1.summary.scalar(name='train_image_ssim', tensor=train_ssim)\n train_psnr_scalar = tf.compat.v1.summary.scalar(name='train_image_psnr', tensor=train_psnr)\n val_g_loss_scalar = tf.compat.v1.summary.scalar(name='val_gan_loss', tensor=val_gan_loss)\n val_d_loss_scalar = tf.compat.v1.summary.scalar(name='val_discriminative_loss', tensor=val_discriminative_loss)\n val_ssim_scalar = tf.compat.v1.summary.scalar(name='val_image_ssim', tensor=val_ssim)\n val_psnr_scalar = tf.compat.v1.summary.scalar(name='val_image_psnr', tensor=val_psnr)\n\n lr_scalar = tf.compat.v1.summary.scalar(name='learning_rate', tensor=learning_rate)\n\n train_summary_op = tf.compat.v1.summary.merge(\n [train_g_loss_scalar, train_d_loss_scalar, train_ssim_scalar, train_psnr_scalar, lr_scalar]\n )\n val_summary_op = tf.compat.v1.summary.merge(\n [val_g_loss_scalar, val_d_loss_scalar, val_ssim_scalar, val_psnr_scalar]\n )\n\n # Set sess configuration\n sess_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)\n sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TRAIN.GPU_MEMORY_FRACTION\n sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH\n sess_config.gpu_options.allocator_type = 'BFC'\n\n sess = tf.compat.v1.Session(config=sess_config)\n\n summary_writer = tf.compat.v1.summary.FileWriter(tboard_save_path)\n summary_writer.add_graph(sess.graph)\n\n # Set the training parameters\n train_epochs = CFG.TRAIN.EPOCHS\n\n log.info('Global configuration is as follows:')\n log.info(CFG)\n with tf.device('/gpu:0'):\n with sess.as_default():\n if weights_path is None:\n log.info('Training from scratch')\n init = tf.compat.v1.global_variables_initializer()\n sess.run(init)\n else:\n log.info('Restore model from last model checkpoint {:s}'.format(weights_path))\n saver.restore(sess=sess, save_path=weights_path)\n\n # Load pre-training parameters\n pretrained_weights = np.load('./vgg16.npy', encoding='latin1', allow_pickle=True).item()\n\n for vv in vgg_vars:\n weights_key = vv.name.split('/')[-3]\n try:\n weights = pretrained_weights[weights_key][0]\n _op = tf.compat.v1.assign(vv, weights)\n sess.run(_op)\n except Exception as _:\n continue\n\n # train loop\n for epoch in range(train_epochs):\n # training part\n t_start = time.time()\n \n d_op, g_op, train_d_loss, train_g_loss, train_avg_ssim, \\\n train_avg_psnr, train_summary, val_summary = sess.run(\n [d_optim, g_optim, train_discriminative_loss, train_gan_loss, train_ssim,\n train_psnr, train_summary_op, val_summary_op]\n )\n\n summary_writer.add_summary(train_summary, global_step=epoch)\n summary_writer.add_summary(val_summary, global_step=epoch)\n\n cost_time = time.time() - t_start\n\n log.info('Epoch_Train: {:d} D_loss: {:.5f} G_loss: '\n '{:.5f} SSIM: {:.5f} PSNR: {:.5f} Cost_time: {:.5f}s'.format(\n epoch, train_d_loss, train_g_loss, train_avg_ssim, train_avg_psnr, cost_time)\n )\n\n # Evaluate model\n if epoch % 500 == 0:\n val_d_loss, val_g_loss, val_avg_ssim, val_avg_psnr = sess.run(\n [val_discriminative_loss, val_gan_loss, val_ssim, val_psnr]\n )\n log.info('Epoch_Val: {:d} D_loss: {:.5f} G_loss: '\n '{:.5f} SSIM: {:.5f} PSNR: {:.5f} Cost_time: {:.5f}s'.format(\n epoch, val_d_loss, val_g_loss, val_avg_ssim, val_avg_psnr, cost_time)\n )\n\n # Save Model\n if epoch % 500 == 0:\n train_start_time = str(time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())))+ '_epoch_{:d}'.format(epoch)\n model_name = 'derain_gan_{:s}.ckpt'.format(str(train_start_time))\n model_save_path = ops.join(model_save_dir, model_name)\n saver.save(sess=sess, save_path=model_save_path, global_step=epoch)\n\n return\n\n\ndef train_multi_gpu(dataset_dir, weights_path=None):\n \"\"\"\n\n :param dataset_dir:\n :param weights_path:\n :return:\n \"\"\"\n raise NotImplementedError\n\n\nif __name__ == '__main__':\n # init args\n args = init_args()\n\n # train model\n train_model(args.dataset_dir, weights_path=args.weights_path)\n" ]
[ [ "tensorflow.device", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.image.ssim", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.Variable", "tensorflow.compat.v1.summary.merge", "tensorflow.compat.v1.trainable_variables", "numpy.load", "tensorflow.reverse", "tensorflow.compat.v1.assign", "tensorflow.compat.v1.train.exponential_decay", "tensorflow.compat.v1.get_collection", "tensorflow.compat.v1.summary.scalar", "tensorflow.image.psnr", "tensorflow.compat.v1.ConfigProto", "tensorflow.constant", "tensorflow.compat.v1.disable_v2_behavior", "tensorflow.compat.v1.summary.FileWriter", "tensorflow.expand_dims", "tensorflow.math.reduce_mean", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.Session", "tensorflow.image.convert_image_dtype" ] ]
a-taniguchi/SpCoTMHP
[ "785c6fd5d33c37a6779ab2fde36bb2df274764bd" ]
[ "src/planning/albert_b_spconavi_viterbi_path_calculate.py" ]
[ "#!/usr/bin/env python\n#coding:utf-8\nimport os\nimport time\nimport numpy as np\nfrom scipy.stats import multivariate_normal,multinomial\nimport collections\n#from itertools import izip\nimport spconavi_read_data\nimport spconavi_save_data\nfrom __init__ import *\nfrom submodules import *\n\ntools = spconavi_read_data.Tools()\nread_data = spconavi_read_data.ReadingData()\nsave_data = spconavi_save_data.SavingData()\n\n#ITO = 0 # 伊藤くん改変を適用する(1)\n\n\"\"\"\n#v# Ito #v#\ndef PostProbMap_nparray_jit_ITO( CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K): #,IndexMap):\n x,y = np.meshgrid(np.linspace(-10.0,9.1,map_width),np.linspace(-10.0,9.1,map_length))\n pos = np.dstack((x,y))\t\n #PostProbMap = np.array([ [ PostProb_ij([width, length],Mu,Sig,map_length,map_width, CostMapProb,it) for width in xrange(map_width) ] for length in xrange(map_length) ])\n for i in range(K):\n if i==0:\n PostProbMap=Phi_l[i][4]*multivariate_normal(Mu[i],Sig[i]).pdf(pos)\n else:\n PostProbMap+=Phi_l[i][4]*multivariate_normal(Mu[i],Sig[i]).pdf(pos)\n return CostMapProb * PostProbMap\n#^# Ito #^#\n\"\"\"\n\nclass PathPlanner:\n\n #gridmap and costmap から確率の形のCostMapProbを得ておく\n def CostMapProb_jit(self, gridmap, costmap):\n CostMapProb = (100.0 - costmap) / 100.0 #Change the costmap to the probabilistic costmap\n #gridの数値が0(非占有)のところだけ数値を持つようにマスクする\n GridMapProb = 1*(gridmap == 0) #gridmap * (gridmap != 100) * (gridmap != -1) #gridmap[][]が障害物(100)または未探索(-1)であれば確率0にする\n return CostMapProb * GridMapProb\n\n def PostProb_ij(self, Index_temp,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K, CostMapProb):\n if (CostMapProb[Index_temp[1]][Index_temp[0]] != 0.0): \n X_temp = tools.Array_index_To_Map_coordinates_albert(Index_temp) #map と縦横の座標系の軸が合っているか要確認\n #print(X_temp,Mu\n sum_i_GaussMulti = [ np.sum([multivariate_normal.pdf(X_temp, mean=Mu[k], cov=Sig[k]) * Phi_l[c][k] for k in range(K)]) for c in range(L) ] ##########np.array( ) !!! np.arrayにすると, numbaがエラーを吐く\n PostProb = np.sum( LookupTable_ProbCt * sum_i_GaussMulti ) #sum_c_ProbCtsum_i\n else:\n PostProb = 0.0\n return PostProb\n\n\n #@jit(parallel=True) #並列化されていない?1CPUだけ使用される\n def PostProbMap_nparray_jit(self, CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K): #,IndexMap):\n PostProbMap = np.array([ [ self.PostProb_ij([width, length],Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K, CostMapProb) for width in range(map_width) ] for length in range(map_length) ])\n return CostMapProb * PostProbMap\n\n\n #Global path estimation by dynamic programming (calculation of SpCoNavi)\n def PathPlanner(self, S_Nbest, X_init, THETA, CostMapProb, outputfile, speech_num, outputname): #gridmap, costmap):\n print(\"[RUN] PathPlanner\")\n #THETAを展開\n W, W_index, Mu, Sig, Pi, Phi_l, K, L = THETA\n\n #ROSの座標系の現在位置を2-dimension array index にする\n X_init_index = X_init ###TEST #Map_coordinates_To_Array_index_albert(X_init)\n print(\"Initial Xt:\",X_init_index)\n\n #length and width of the MAP cells\n map_length = len(CostMapProb) #len(costmap)\n map_width = len(CostMapProb[0]) #len(costmap[0])\n print(\"MAP[length][width]:\",map_length,map_width)\n\n #Pre-calculation できるものはしておく\n LookupTable_ProbCt = np.array([multinomial.pmf(S_Nbest, sum(S_Nbest), W[c])*Pi[c] for c in xrange(L)]) #Ctごとの確率分布 p(St|W_Ct)×p(Ct|Pi) の確率値\n ###SaveLookupTable(LookupTable_ProbCt, outputfile)\n ###LookupTable_ProbCt = ReadLookupTable(outputfile) #Read the result from the Pre-calculation file(計算する場合と大差ないかも)\n\n\n print(\"Please wait for PostProbMap\")\n output = outputfile + \"N\"+str(N_best)+\"G\"+str(speech_num) + \"_PathWeightMap.csv\"\n #if ITO == 1:\n # PathWeightMap = PostProbMap_nparray_jit_ITO(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) #,IndexMap) # Ito\n #\n # #[TEST]計算結果を先に保存\n # save_data.SaveProbMap(PathWeightMap, outputfile, speech_num)\n #else:\n if (os.path.isfile(output) == False) or (UPDATE_PostProbMap == 1): #すでにファイルがあれば作成しない\n #PathWeightMap = PostProbMap_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) #マルチCPUで高速化できるかも #CostMapProb * PostProbMap #後の処理のために, この時点ではlogにしない\n start_PWM_time = time.time()\n PathWeightMap = self.PostProbMap_nparray_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) #,IndexMap) \n end_PWM_time = time.time()\n if (SAVE_time == 1):\n time_pp = end_PWM_time - start_PWM_time #end_recog_time\n fp = open( outputfile + \"N\"+str(N_best)+\"G\"+str(speech_num) + \"_time_PathWeightMap.txt\", 'w')\n fp.write(str(time_pp)+\"\\n\")\n fp.close()\n\n #[TEST]計算結果を先に保存\n save_data.SaveProbMap(PathWeightMap, outputfile, speech_num)\n else:\n PathWeightMap = read_data.ReadProbMap(outputfile, speech_num)\n #print(\"already exists:\", output)\n print(\"[Done] PathWeightMap.\")\n\n PathWeightMap_origin = PathWeightMap\n\n\n #[メモリ・処理の軽減]初期位置のセルからT_horizonよりも離れた位置のセルをすべて2-dimension array から消す([(2*T_horizon)+1][(2*T_horizon)+1]の array になる)\n Bug_removal_savior = 1 #座標変換の際にバグを生まないようにするためのフラグ\n x_min = X_init_index[0] - T_horizon\n x_max = X_init_index[0] + T_horizon\n y_min = X_init_index[1] - T_horizon\n y_max = X_init_index[1] + T_horizon\n if (x_min>=0 and x_max<=map_width and y_min>=0 and y_max<=map_length) and (memory_reduction == 1):\n PathWeightMap = PathWeightMap[x_min:x_max+1, y_min:y_max+1] # X[-T+I[0]:T+I[0],-T+I[1]:T+I[1]]\n X_init_index = [T_horizon, T_horizon]\n print(\"Re Initial Xt:\", X_init_index)\n #再度, length and width of the MAP cells\n map_length = len(PathWeightMap)\n map_width = len(PathWeightMap[0])\n elif(memory_reduction == 0):\n print(\"NO memory reduction process.\")\n Bug_removal_savior = 1 #バグを生まない(1)\n else:\n print(\"[WARNING] The initial position (or init_pos +/- T_horizon) is outside the map.\")\n Bug_removal_savior = 1 #バグを生まない(1)\n #print(X_init, X_init_index)\n\n #計算量削減のため状態数を減らす(状態空間をone-dimension array にする⇒0の要素を除く)\n #PathWeight = np.ravel(PathWeightMap)\n PathWeight_one_NOzero = PathWeightMap[PathWeightMap!=float(0.0)]\n state_num = len(PathWeight_one_NOzero)\n print(\"PathWeight_one_NOzero state_num:\", state_num)\n\n #map の2-dimension array インデックスとone-dimension array の対応を保持する\n IndexMap = np.array([[(i,j) for j in xrange(map_width)] for i in xrange(map_length)])\n IndexMap_one_NOzero = IndexMap[PathWeightMap!=float(0.0)].tolist() #先にリスト型にしてしまう #実装上, np.arrayではなく2-dimension array リストにしている\n print(\"IndexMap_one_NOzero\",len(IndexMap_one_NOzero))\n\n #one-dimension array 上の初期位置\n if (X_init_index in IndexMap_one_NOzero):\n X_init_index_one = IndexMap_one_NOzero.index(X_init_index)\n else:\n print(\"[ERROR] The initial position is not a movable position on the map.\")\n #print(X_init, X_init_index)\n X_init_index_one = 0\n exit()\n print(\"Initial index\", X_init_index_one)\n\n #移動先候補 index 座標のリスト(相対座標)\n MoveIndex_list = self.MovePosition_2D([0,0]) #.tolist()\n #MoveIndex_list = np.round(MovePosition(X_init_index)).astype(int)\n print(\"MoveIndex_list\")\n\n #Viterbi Algorithmを実行\n Path_one = self.ViterbiPath(X_init_index_one, np.log(PathWeight_one_NOzero), state_num,IndexMap_one_NOzero,MoveIndex_list, outputname, X_init, Bug_removal_savior) #, Transition_one_NOzero)\n\n #one-dimension array index を2-dimension array index へ⇒ROSの座標系にする\n Path_2D_index = np.array([ IndexMap_one_NOzero[Path_one[i]] for i in xrange(len(Path_one)) ])\n if ( Bug_removal_savior == 0):\n Path_2D_index_original = Path_2D_index + np.array(X_init) - T_horizon\n else:\n Path_2D_index_original = Path_2D_index\n Path_ROS = tools.Array_index_To_Map_coordinates_albert(Path_2D_index_original) #ROSのパスの形式にできればなおよい\n\n #Path = Path_2D_index_original #Path_ROS #必要な方をPathとして返す\n print(\"Init:\", X_init)\n print(\"Path:\\n\", Path_2D_index_original.tolist())\n return Path_2D_index_original, Path_ROS, PathWeightMap_origin, Path_one #, LogLikelihood_step, LogLikelihood_sum\n\n \n\n #移動位置の候補: 現在の位置(2-dimension array index )の近傍8セル+現在位置1セル\n def MovePosition_2D(self, Xt): \n if (NANAME == 1):\n PostPosition_list = np.array([ [-1,-1], [-1,0], [-1,1], [0,-1], [0,0], [0,1], [1,-1], [1,0], [1,1] ])*cmd_vel + np.array(Xt)\n else:\n PostPosition_list = np.array([ [-1,0], [0,-1], [0,0], [0,1], [1,0] ])*cmd_vel + np.array(Xt)\n \n return PostPosition_list\n \n\n #Viterbi Path計算用関数(参考: https://qiita.com/kkdd/items/6cbd949d03bc56e33e8e)\n def update(self, cost, trans, emiss):\n COST = 0 #COST, INDEX = range(2) #0,1\n arr = [c[COST]+t for c, t in zip(cost, trans)]\n max_arr = max(arr)\n #print(max_arr + emiss, arr.index(max_arr))\n return max_arr + emiss, arr.index(max_arr)\n\n\n def update_lite(self, cost, n, emiss, state_num,IndexMap_one_NOzero,MoveIndex_list,Transition):\n #Transition = np.array([approx_log_zero for j in xrange(state_num)]) #emissのindex番号に応じて, これをつくる処理を入れる\n for i in xrange(len(Transition)):\n Transition[i] = approx_log_zero #float('-inf')でも計算結果に変わりはない模様\n\n #今, 想定している位置1セルと隣接する8セルのみの遷移を考えるようにすればよい\n #Index_2D = IndexMap_one_NOzero[n] #.tolist()\n #print(MoveIndex_list, IndexMap_one_NOzero[n])\n MoveIndex_list_n = MoveIndex_list + IndexMap_one_NOzero[n] #Index_2D #絶対座標系にする ###ここが新しくエラーになった(ターミナルにCtrl+Cした?)\n MoveIndex_list_n_list = MoveIndex_list_n.tolist()\n #print(MoveIndex_list_n_list)\n\n count_t = 0\n for c in xrange(len(MoveIndex_list_n_list)): \n if (MoveIndex_list_n_list[c] in IndexMap_one_NOzero):\n m = IndexMap_one_NOzero.index(MoveIndex_list_n_list[c]) #cは移動可能な状態(セル)とは限らない\n Transition[m] = 0.0 #1 #Transition probability from state to state (index of this array is not x, y of map)\n count_t += 1\n #print(c, MoveIndex_list_n_list[c])\n \n #計算上おかしい場合はエラー表示を出す.\n if (count_t == 0): #遷移確率がすべて0.移動できないということを意味する.\n print(\"[ERROR] All transition is approx_log_zero.\")\n #elif (count_t == 1): #遷移確率がひとつだけある.移動可能な座標が一択.(このWARNINGが出ても問題ない場合がある?)\n # print(\"[WARNING] One transition can move only.\")\n #elif (count_t != 5):\n # print(count_t, MoveIndex_list_n_list)\n \n #trans = Transition #np.array(Transition)\n arr = cost + Transition #trans\n #max_arr = np.max(arr)\n max_arr_index = np.argmax(arr)\n #return max_arr + emiss, np.where(arr == max_arr)[0][0] #np.argmax(arr)#arr.index(max_arr)\n #print(0.0 in Transition, max_arr_index)\n return arr[max_arr_index] + emiss, max_arr_index\n\n \n\n #ViterbiPathを計算してPath(軌道)を返す\n def ViterbiPath(self, X_init, PathWeight, state_num,IndexMap_one_NOzero,MoveIndex_list, outputname, X_init_original, Bug_removal_savior): #, Transition):\n #Path = [[0,0] for t in xrange(T_horizon)] #各tにおけるセル番号[x,y]\n print(\"Start Viterbi Algorithm\")\n \n COST = 0\n INDEX = 1 \n INITIAL = (0.0, X_init) # (cost, index) #indexに初期値のone-dimension array インデックスを入れる\n #print(\"Initial:\",X_init)\n\n cost = [INITIAL for i in xrange(len(PathWeight))] \n cost[X_init] = (10.0**10, X_init) #初期位置は一意に与えられる(確率log(1.0))\n trellis = []\n\n e = PathWeight #emission(nstates[i])\n m = [i for i in xrange(len(PathWeight))] #Transition #transition(nstates[i-1], nstates[i]) #一つ前から現在への遷移\n \n Transition = np.array([approx_log_zero for _ in xrange(state_num)]) #参照渡しになってしまう\n\n\n temp = 1\n #Forward\n print(\"Forward\")\n for i in xrange(T_horizon): #len(nstates)): #計画区間まで1セルずつ移動していく+1+1\n #このfor文の中でiを別途インディケータとして使わないこと\n print(\"T:\",i+1)\n if (i+1 == T_restart):\n #outputname_restart = outputfile + \"T\"+str(T_restart)+\"N\"+str(N_best)+\"A\"+str(Approx)+\"S\"+str(init_position_num)+\"G\"+str(speech_num)\n trellis = read_data.ReadTrellis(outputname, i+1)\n cost = trellis[-1]\n if (i+1 >= T_restart):\n cost_np = np.array([cost[c][COST] for c in xrange(len(cost))])\n #Transition = np.array([approx_log_zero for j in xrange(state_num)]) #参照渡しになってしまう\n\n #cost = [update_lite(cost_np, t, e[t], state_num,IndexMap_one_NOzero,MoveIndex_list) for t in xrange(len(e))]\n cost = [self.update_lite(cost_np, t, f, state_num,IndexMap_one_NOzero,MoveIndex_list,Transition) for t, f in zip(m, e)] #izipの方がメモリ効率は良いが, zipとしても処理速度は変わらない\n trellis.append(cost)\n\n if (float('inf') in cost) or (float('-inf') in cost) or (float('nan') in cost):\n print(\"[ERROR] cost:\", str(cost))\n #print(\"i\", i, [(c[COST], c[INDEX]) for c in cost]) #前のノードがどこだったか(どこから来たか)を記録している\n\n if (SAVE_T_temp == temp):\n #Backward temp\n last = [trellis[-1][j][0] for j in xrange(len(trellis[-1]))]\n path_one = [ np.argmax(last) ] #[last.index(max(last))] #最終的にいらないが計算上必要⇒最後のノードの最大値インデックスを保持する形でもできるはず\n #print(\"last\",last,\"max\",path)\n\n for stepx in reversed(trellis):\n path_one = [stepx[path_one[0]][INDEX]] + path_one\n #print(\"x\", len(x), x)\n \n \"\"\"\n ### ERROR 対策 ###意味がなさそう\n re = 1\n while (path_one[0] != X_init): #and (path_one[0] == path_one[-1]): # 1次元化したときのインデックス番号になっている #and (path_one[0][1] == X_init[1]):\n print(\"[InitPOS ERROR]\",re, path_one, X_init)\n\n #Backward temp\n last = [trellis[-1*re][j][0] for j in xrange(len(trellis[-1*re]))]\n path_one = [ np.argmax(last) ] \n\n for x in reversed(trellis[0:-1*re]):\n path_one = [ x[path_one[0]][INDEX] ] + path_one\n #print(\"x\", len(x), x)\n re = re + 1\n\n if (len(path_one) <= 2):\n path_one = [X_init] + path_one\n ##################\n \"\"\"\n \n \n\n\n path_one = path_one[1:len(path_one)] #初期位置と処理上追加した最後の遷移を除く\n \n save_data.SavePathTemp(X_init_original, path_one, i+1, outputname, IndexMap_one_NOzero, Bug_removal_savior)\n \n ##log likelihood \n #PathWeight (log)とpath_oneからlog likelihoodの値を再計算する\n LogLikelihood_step = np.zeros(i+1)\n LogLikelihood_sum = np.zeros(i+1)\n \n for t in range(i+1):\n LogLikelihood_step[t] = PathWeight[ path_one[t] ]\n if (t == 0):\n LogLikelihood_sum[t] = LogLikelihood_step[t]\n elif (t >= 1):\n LogLikelihood_sum[t] = LogLikelihood_sum[t-1] + LogLikelihood_step[t]\n\n save_data.SaveLogLikelihood(LogLikelihood_step,0,i+1, outputname)\n save_data.SaveLogLikelihood(LogLikelihood_sum,1,i+1, outputname)\n\n #The moving distance of the path\n Distance = self.PathDistance(path_one)\n \n #Save the moving distance of the path\n save_data.SavePathDistance_temp(Distance, i+1, outputname)\n\n if (SAVE_Trellis == 1):\n save_data.SaveTrellis(trellis, outputname, i+1)\n temp = 0\n temp += 1\n\n #最後の遷移確率は一様にすればよいはず\n e_last = [0.0]\n m_last = [[0.0 for i in range(len(PathWeight))]]\n cost = [self.update(cost, t, f) for t, f in zip(m_last, e_last)]\n trellis.append(cost)\n\n #Backward\n print(\"Backward\")\n #last = [trellis[-1][i][0] for i in xrange(len(trellis[-1]))]\n path = [0] #[last.index(max(last))] #最終的にいらないが計算上必要⇒最後のノードの最大値インデックスを保持する形でもできるはず\n #print(\"last\",last,\"max\",path)\n\n for x in reversed(trellis):\n path = [x[path[0]][INDEX]] + path\n #print(\"x\", len(x), x)\n path = path[1:len(path)-1] #初期位置と処理上追加した最後の遷移を除く\n print('Maximum prob path:', path)\n return path\n\n\n #The moving distance of the pathを計算する\n def PathDistance(self, Path):\n Distance = len(collections.Counter(Path))\n print(\"Path Distance is \", Distance)\n return Distance\n\n" ]
[ [ "numpy.log", "numpy.argmax", "numpy.array", "numpy.zeros", "numpy.sum", "scipy.stats.multivariate_normal.pdf" ] ]
collincr/ini_team_13
[ "dca3d88fc31515ec0127cfd03963ce0b5ed735d8" ]
[ "task_scripts/task4.py" ]
[ "import geopandas as gpd\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nif __name__ == \"__main__\":\n \n x_bounds=(-124, -120)\n y_bounds=(37, 39)\n \n gdf = gpd.read_file(\"../data/geojson/calif_nev_ncei_grav.geojson\")\n \n gdf_subset = gdf[(gdf[\"latitude\"] > y_bounds[0]) &\n (gdf[\"latitude\"] < y_bounds[1]) &\n (gdf[\"longitude\"] > x_bounds[0]) &\n (gdf[\"longitude\"] < x_bounds[1])]\n \n gdf_subset_sorted = gdf_subset.sort_values(by=\"isostatic_anom\",\n ascending=False)\n\n fig, ax = plt.subplots(figsize=(16, 6))\n \n ax.bar(x=np.arange(gdf_subset.shape[0]), width=0.9,\n height=gdf_subset_sorted[\"isostatic_anom\"].values)\n \n fig.savefig(\"task4.png\")" ]
[ [ "numpy.arange", "matplotlib.pyplot.subplots" ] ]
dpsnewailab/adou
[ "bea7412202cb17893347e4ff63aab0fb8399bd3b" ]
[ "adou/image/network/EfficientNet.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom adou.image.network.feature import (\n relu_fn,\n round_filters,\n round_repeats,\n drop_connect,\n get_same_padding_conv2d,\n get_model_params,\n efficientnet_params,\n load_pretrained_weights,\n)\n\n\nclass MBConvBlock(nn.Module):\n \"\"\"\n Mobile Inverted Residual Bottleneck Block\n\n Args:\n block_args (namedtuple): BlockArgs, see above\n global_params (namedtuple): GlobalParam, see above\n\n Attributes:\n has_se (bool): Whether the block contains a Squeeze and Excitation layer.\n \"\"\"\n\n def __init__(self, block_args, global_params):\n super().__init__()\n self._block_args = block_args\n self._bn_mom = 1 - global_params.batch_norm_momentum\n self._bn_eps = global_params.batch_norm_epsilon\n self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)\n self.id_skip = block_args.id_skip # skip connection and drop connect\n\n # Get static or dynamic convolution depending on image size\n Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)\n\n # Expansion phase\n inp = self._block_args.input_filters # number of input channels\n oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels\n if self._block_args.expand_ratio != 1:\n self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)\n self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)\n\n # Depthwise convolution phase\n k = self._block_args.kernel_size\n s = self._block_args.stride\n self._depthwise_conv = Conv2d(\n in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise\n kernel_size=k, stride=s, bias=False)\n self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)\n\n # Squeeze and Excitation layer, if desired\n if self.has_se:\n num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))\n self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)\n self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)\n\n # Output phase\n final_oup = self._block_args.output_filters\n self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)\n self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)\n\n def forward(self, inputs, drop_connect_rate=None):\n \"\"\"\n :param inputs: input tensor\n :param drop_connect_rate: drop connect rate (float, between 0 and 1)\n :return: output of block\n \"\"\"\n\n # Expansion and Depthwise Convolution\n x = inputs\n if self._block_args.expand_ratio != 1:\n x = relu_fn(self._bn0(self._expand_conv(inputs)))\n x = relu_fn(self._bn1(self._depthwise_conv(x)))\n\n # Squeeze and Excitation\n if self.has_se:\n x_squeezed = F.adaptive_avg_pool2d(x, 1)\n x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))\n x = torch.sigmoid(x_squeezed) * x\n\n x = self._bn2(self._project_conv(x))\n\n # Skip connection and drop connect\n input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters\n if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:\n if drop_connect_rate:\n x = drop_connect(x, p=drop_connect_rate, training=self.training)\n x = x + inputs # skip connection\n return x\n\n\nclass EfficientNet(nn.Module):\n \"\"\"\n An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods\n Args:\n blocks_args (list): A list of BlockArgs to construct blocks\n global_params (namedtuple): A set of GlobalParams shared between blocks\n Example:\n model = EfficientNet.from_pretrained('efficientnet-b0')\n \"\"\"\n\n def __init__(self, blocks_args=None, global_params=None):\n super().__init__()\n assert isinstance(blocks_args, list), 'blocks_args should be a list'\n assert len(blocks_args) > 0, 'block args must be greater than 0'\n self._global_params = global_params\n self._blocks_args = blocks_args\n\n # Get static or dynamic convolution depending on image size\n Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)\n\n # Batch norm parameters\n bn_mom = 1 - self._global_params.batch_norm_momentum\n bn_eps = self._global_params.batch_norm_epsilon\n\n # Stem\n in_channels = 3 # rgb\n out_channels = round_filters(32, self._global_params) # number of output channels\n self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)\n self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)\n\n # Build blocks\n self._blocks = nn.ModuleList([])\n for block_args in self._blocks_args:\n\n # Update block input and output filters based on depth multiplier.\n block_args = block_args._replace(\n input_filters=round_filters(block_args.input_filters, self._global_params),\n output_filters=round_filters(block_args.output_filters, self._global_params),\n num_repeat=round_repeats(block_args.num_repeat, self._global_params)\n )\n\n # The first block needs to take care of stride and filter size increase.\n self._blocks.append(MBConvBlock(block_args, self._global_params))\n if block_args.num_repeat > 1:\n block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)\n for _ in range(block_args.num_repeat - 1):\n self._blocks.append(MBConvBlock(block_args, self._global_params))\n\n # Head\n in_channels = block_args.output_filters # output of final block\n out_channels = round_filters(1280, self._global_params)\n self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)\n self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)\n\n # Final linear layer\n self._dropout = self._global_params.dropout_rate\n self._fc = nn.Linear(out_channels, self._global_params.num_classes)\n\n def extract_features(self, inputs):\n \"\"\" Returns output of the final convolution layer \"\"\"\n\n # Stem\n x = relu_fn(self._bn0(self._conv_stem(inputs)))\n\n # Blocks\n for idx, block in enumerate(self._blocks):\n drop_connect_rate = self._global_params.drop_connect_rate\n if drop_connect_rate:\n drop_connect_rate *= float(idx) / len(self._blocks)\n x = block(x, drop_connect_rate=drop_connect_rate)\n\n # Head\n x = relu_fn(self._bn1(self._conv_head(x)))\n\n return x\n\n def forward(self, inputs):\n \"\"\" Calls extract_features to extract features, applies final linear layer, and returns logits. \"\"\"\n\n # Convolution layers\n x = self.extract_features(inputs)\n\n # # Pooling and final linear layer\n # x = F.adaptive_avg_pool2d(x, 1).squeeze(-1).squeeze(-1)\n # if self._dropout:\n # x = F.dropout(x, p=self._dropout, training=self.training)\n # x = self._fc(x)\n return x\n\n @classmethod\n def from_name(cls, model_name, override_params=None):\n cls._check_model_name_is_valid(model_name)\n blocks_args, global_params = get_model_params(model_name, override_params)\n return EfficientNet(blocks_args, global_params)\n\n @classmethod\n def from_pretrained(cls, model_name, num_classes=1000):\n model = EfficientNet.from_name(model_name, override_params={'num_classes': num_classes})\n load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000))\n return model\n\n @classmethod\n def get_image_size(cls, model_name):\n cls._check_model_name_is_valid(model_name)\n _, _, res, _ = efficientnet_params(model_name)\n return res\n\n @classmethod\n def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False):\n \"\"\" Validates model name. None that pretrained weights are only available for\n the first four models (efficientnet-b{i} for i in 0,1,2,3) at the moment. \"\"\"\n num_models = 4 if also_need_pretrained_weights else 8\n valid_models = ['efficientnet_b' + str(i) for i in range(num_models)]\n if model_name.replace('-', '_') not in valid_models:\n raise ValueError('model_name should be one of: ' + ', '.join(valid_models))\n" ]
[ [ "torch.sigmoid", "torch.nn.ModuleList", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.Linear", "torch.nn.BatchNorm2d" ] ]
niketanpatel/mds-provider-services
[ "230c14dac1cbc3e22bf07751ea2d796b78c0a0d2" ]
[ "analytics/query.py" ]
[ "from mds.db.load import data_engine\nimport os\nimport pandas\n\n\ndef parse_db_env():\n \"\"\"\n Gets the required database configuration out of the Environment.\n\n Returns dict:\n - user\n - password\n - db\n - host\n - port\n \"\"\"\n try:\n user, password = os.environ[\"MDS_USER\"], os.environ[\"MDS_PASSWORD\"]\n except:\n print(\"The MDS_USER or MDS_PASSWORD environment variables are not set. Exiting.\")\n exit(1)\n\n try:\n db = os.environ[\"MDS_DB\"]\n except:\n print(\"The MDS_DB environment variable is not set. Exiting.\")\n exit(1)\n\n try:\n host = os.environ[\"POSTGRES_HOSTNAME\"]\n except:\n print(\"The POSTGRES_HOSTNAME environment variable is not set. Exiting.\")\n exit(1)\n\n try:\n port = os.environ[\"POSTGRES_HOST_PORT\"]\n except:\n port = 5432\n print(\"No POSTGRES_HOST_PORT environment variable set, defaulting to:\", port)\n\n return { \"user\": user, \"password\": password, \"db\": db, \"host\": host, \"port\": port }\n\nENGINE = data_engine(**parse_db_env())\n\n\nclass TimeQuery:\n \"\"\"\n Represents a query over a time period.\n \"\"\"\n\n def __init__(self, start, end, **kwargs):\n \"\"\"\n Initialize a new `TimeQuery` with the given parameters.\n\n Required positional arguments:\n\n :start: A python datetime, ISO8601 datetime string, or Unix timestamp for the beginning of the interval.\n\n :end: A python datetime, ISO8601 datetime string, or Unix timestamp for the end of the interval.\n\n Supported optional keyword arguments:\n\n :engine: A `sqlalchemy.engine.Engine` representing a connection to the database.\n\n :table: Name of the table or view containing the source records. This is required either at initialization or query time.\n\n :provider_name: The name of a provider, as found in the providers registry.\n\n :vehicle_types: vehicle_type or list of vehicle_type to further restrict the query.\n\n :order_by: Column name(s) for the ORDER BY clause.\n\n :local: False (default) to query the Unix time data columns; True to query the local time columns.\n\n :debug: False (default) to supress debug messages; True to print debug messages.\n \"\"\"\n if not start or not end:\n raise ValueError(\"Start and End are required.\")\n\n self.start = start\n self.end = end\n self.engine = kwargs.get(\"engine\")\n self.table = kwargs.get(\"table\")\n self.provider_name = kwargs.get(\"provider_name\")\n self.vehicle_types = kwargs.get(\"vehicle_types\")\n self.order_by = kwargs.get(\"order_by\", \"\")\n self.local = kwargs.get(\"local\", False)\n self.debug = kwargs.get(\"debug\", False)\n\n def get(self, **kwargs):\n \"\"\"\n Execute a query against this `Query`'s table.\n\n Supported optional keyword arguments:\n\n :engine: A `sqlalchemy.engine.Engine` representing a connection to the database.\n\n :table: Name of the table or view containing the source records. This is required either at initialization or query time.\n\n :provider_name: The name of a provider, as found in the providers registry.\n\n :vehicle_types: vehicle_type or list of vehicle_type to further restrict the query.\n\n :predicates: Additional predicates that will be ANDed to the WHERE clause (e.g `vehicle_id = '1234'`).\n\n :order_by: Column name(s) for the ORDER BY clause.\n\n :returns: A `pandas.DataFrame` of trips from the given provider, crossing this query's time range.\n \"\"\"\n table = kwargs.get(\"table\", self.table)\n if not table:\n raise ValueError(\"This query does not specify a table.\")\n\n engine = kwargs.get(\"engine\", self.engine or ENGINE)\n\n start_time = \"start_time_local\" if self.local else \"start_time\"\n end_time = \"end_time_local\" if self.local else \"end_time\"\n\n predicates = kwargs.get(\"predicates\", [])\n predicates = [predicates] if not isinstance(predicates, list) else predicates\n\n provider_name = kwargs.get(\"provider_name\", self.provider_name)\n\n if provider_name:\n predicates.append(f\"provider_name = '{provider_name or self.provider_name}'\")\n\n vts = \"'::vehicle_types,'\"\n vehicle_types = kwargs.get(\"vehicle_types\", self.vehicle_types)\n if vehicle_types:\n if not isinstance(vehicle_types, list):\n vehicle_types = [vehicle_types]\n predicates.append(f\"vehicle_type IN ('{vts.join(vehicle_types)}'::vehicle_types)\")\n\n predicates = \" AND \".join(predicates)\n\n order_by = kwargs.get(\"order_by\", self.order_by)\n if order_by:\n if not isinstance(order_by, list):\n order_by = [order_by]\n order_by = \",\".join(order_by)\n order_by = f\"ORDER BY {order_by}\"\n\n sql = f\"\"\"\n SELECT\n *\n FROM\n {self.table}\n WHERE\n {predicates} AND\n (({start_time} <= %(start)s AND {end_time} > %(start)s) OR\n ({start_time} < %(end)s AND {end_time} >= %(end)s) OR\n ({start_time} >= %(start)s AND {end_time} <= %(end)s) OR\n ({start_time} < %(end)s AND {end_time} IS NULL))\n {order_by};\n \"\"\"\n\n if self.debug:\n print(\"Sending query:\")\n print(sql)\n\n data = pandas.read_sql(sql, engine, params={\"start\": self.start, \"end\": self.end}, index_col=None)\n\n if self.debug:\n print(f\"Got {len(data)} results\")\n\n return data\n\n\nclass Availability(TimeQuery):\n \"\"\"\n Represents a query of the availability view for a particular provider.\n \"\"\"\n\n DEFAULT_TABLE = \"availability\"\n\n def __init__(self, start, end, **kwargs):\n \"\"\"\n Initialize a new `Availability` query with the given parameters.\n\n Required positional arguments:\n\n :start: A python datetime, ISO8601 datetime string, or Unix timestamp for the beginning of the interval.\n\n :end: A python datetime, ISO8601 datetime string, or Unix timestamp for the end of the interval.\n\n Supported optional keyword arguments:\n\n :start_types: event_type or list of event_type to restrict the `start_event_type` (e.g. `available`).\n\n :end_types: event_type or list of event_type to restrict the `end_event_type` (e.g. `available`).\n\n See `TimeQuery` for additional optional keyword arguments.\n \"\"\"\n self.start_types = kwargs.get(\"start_types\")\n self.end_types = kwargs.get(\"end_types\")\n\n kwargs[\"table\"] = kwargs.get(\"table\", self.DEFAULT_TABLE)\n\n super().__init__(start, end, **kwargs)\n\n def get(self, **kwargs):\n \"\"\"\n Execute a query against the availability view.\n\n Supported optional keyword arguments:\n\n :start_types: event_type or list of event_type to restrict the start_event_type (e.g. `available`).\n\n :end_types: event_type or list of event_type to restrict the end_event_type (e.g. `available`).\n\n See `TimeQuery` for additional optional keyword arguments.\n\n :returns: A `pandas.DataFrame` of events from the given provider, crossing this query's time range.\n \"\"\"\n predicates = []\n\n if \"predicates\" in kwargs:\n predicates = kwargs.get(\"predicates\", [])\n predicates = [predicates] if not isinstance(predicates, list) else predicates\n\n ets = \"'::event_types,'\"\n start_types = kwargs.get(\"start_types\", self.start_types)\n end_types = kwargs.get(\"end_types\", self.end_types)\n\n if start_types:\n if not isinstance(start_types, list):\n start_types = [start_types]\n predicates.append(f\"start_event_type IN ('{ets.join(start_types)}'::event_types)\")\n\n if end_types:\n if not isinstance(end_types, list):\n end_types = [end_types]\n predicates.append(f\"end_event_type IN ('{ets.join(end_types)}'::event_types)\")\n\n kwargs[\"predicates\"] = predicates\n\n return super().get(**kwargs)\n\n\nclass Trips(TimeQuery):\n \"\"\"\n Represents a query of the trips table for a particular provider.\n \"\"\"\n\n DEFAULT_TABLE = \"trips\"\n\n def __init__(self, start, end, **kwargs):\n \"\"\"\n Initialize a new `Trips` query with the given parameters.\n\n Required positional arguments:\n\n :start: A python datetime, ISO8601 datetime string, or Unix timestamp for the beginning of the interval.\n\n :end: A python datetime, ISO8601 datetime string, or Unix timestamp for the end of the interval.\n\n Supported optional keyword arguments:\n\n See `TimeQuery` for additional optional keyword arguments.\n \"\"\"\n kwargs[\"table\"] = kwargs.get(\"table\", self.DEFAULT_TABLE)\n\n super().__init__(start, end, **kwargs)\n\n def get(self, **kwargs):\n \"\"\"\n Execute a query against trip records.\n\n Supported optional keyword arguments:\n\n See `TimeQuery` for additional optional keyword arguments.\n\n :returns: A `pandas.DataFrame` of trips from the given provider, crossing this query's time range.\n \"\"\"\n return super().get(**kwargs)\n" ]
[ [ "pandas.read_sql" ] ]
DMIU-ShELL/deeprl-shell
[ "a7845ab1c4967ba2af9486625086c3d0b176d293" ]
[ "run_shell.py" ]
[ "#######################################################################\n# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\n'''\nShared Experience Lifelong Learning (ShELL) experiments\nMulti-agent continual lifelong learners\n\nEach agent is a ppo agent with supermask superposition \nlifelong learning algorithm.\nhttps://arxiv.org/abs/2006.14769\n'''\n\nimport json\nimport copy\nimport shutil\nimport matplotlib\nmatplotlib.use(\"Pdf\")\nfrom deep_rl import *\nimport os\nimport argparse\n\n# helper function\ndef global_config(config, name):\n config.env_name = name\n config.env_config_path = None\n config.lr = 0.00015\n config.cl_preservation = 'supermask'\n config.seed = 9157\n random_seed(config.seed)\n config.log_dir = None\n config.logger = None \n config.num_workers = 4\n config.optimizer_fn = lambda params, lr: torch.optim.RMSprop(params, lr=lr)\n\n config.policy_fn = SamplePolicy\n #config.state_normalizer = ImageNormalizer()\n # rescale state normaliser: suitable for grid encoding of states in minigrid\n config.state_normalizer = RescaleNormalizer(1./10.)\n config.discount = 0.99\n config.use_gae = True\n config.gae_tau = 0.99\n config.entropy_weight = 0.1 #0.75\n config.rollout_length = 128\n config.optimization_epochs = 8\n config.num_mini_batches = 64\n config.ppo_ratio_clip = 0.1\n config.iteration_log_interval = 1\n config.gradient_clip = 5\n config.max_steps = 1e3\n config.evaluation_episodes = 10\n config.cl_requires_task_label = True\n config.task_fn = None\n config.eval_task_fn = None\n config.network_fn = None \n config.eval_interval = 25\n return config\n\n'''\nshared experience lifelong learning (ShELL)\nlifelong (continual) learning algorithm for each ShELL agent: supermask superposition\nRL agent/algorithm: PPO\n'''\ndef shell_minigrid(name, args):\n shell_config_path = args.shell_config_path\n env_config_path = args.env_config_path\n\n with open(shell_config_path, 'r') as f:\n shell_config = json.load(f)\n agents = []\n num_agents = len(shell_config['agents'])\n \n # set up logging system\n exp_id = ''\n log_dir = get_default_log_dir(name + '-shell' + exp_id)\n logger = get_logger(log_dir=log_dir, file_name='train-log')\n\n # create/initialise agents\n for idx in range(num_agents):\n logger.info('*****initialising agent {0}'.format(idx))\n config = Config()\n config = global_config(config, name)\n # task may repeat, so get number of unique tasks.\n num_tasks = len(set(shell_config['agents'][idx]['task_ids'])) \n config.cl_num_tasks = num_tasks\n config.task_ids = shell_config['agents'][idx]['task_ids']\n if isinstance(shell_config['agents'][idx]['max_steps'], list):\n config.max_steps = shell_config['agents'][idx]['max_steps']\n else:\n config.max_steps = [shell_config['agents'][idx]['max_steps'], ] * num_tasks\n task_fn = lambda log_dir: MiniGridFlatObs(name, env_config_path, log_dir, config.seed, False)\n config.task_fn = lambda: ParallelizedTask(task_fn,config.num_workers,log_dir=config.log_dir)\n eval_task_fn= lambda log_dir: MiniGridFlatObs(name, env_config_path,log_dir,config.seed,True)\n config.eval_task_fn = eval_task_fn\n config.network_fn = lambda state_dim, action_dim, label_dim: CategoricalActorCriticNet_SS(\\\n state_dim, action_dim, label_dim, \n phi_body=FCBody_SS(state_dim, task_label_dim=label_dim, \\\n hidden_units=(200, 200, 200), num_tasks=num_tasks), \n actor_body=DummyBody_CL(200), \n critic_body=DummyBody_CL(200),\n num_tasks=num_tasks)\n\n agent = ShellAgent_SP(config)\n config.agent_name = agent.__class__.__name__ + '_{0}'.format(idx)\n agents.append(agent)\n\n shell_train(agents, logger)\n\nif __name__ == '__main__':\n mkdir('log')\n set_one_thread()\n select_device(0) # -1 is CPU, a positive integer is the index of GPU\n\n # minigrid experiments\n name = 'MiniGrid'\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--shell_config_path', help='shell config', default='./shell.json')\n parser.add_argument('--env_config_path',help='environment config', \\\n default='./env_configs/minigrid_sc_3.json')\n args = parser.parse_args()\n shell_minigrid(name, args)\n" ]
[ [ "matplotlib.use" ] ]
labdoyon/declarativeTask
[ "bb66bee338009c43f123863941ea64ec38237366" ]
[ "src/ld_example.py" ]
[ "import sys\n\nimport numpy as np\nfrom expyriment import control, stimuli, io, design, misc\nfrom expyriment.misc import constants\nfrom expyriment.misc._timer import get_time\n\nfrom ld_matrix import LdMatrix\nfrom ld_utils import setCursor, newRandomPresentation, readMouse, path_leaf\nfrom config import *\n\nif not windowMode: # Check WindowMode and Resolution\n control.defaults.window_mode = windowMode\n control.defaults.window_size = misc.get_monitor_resolution()\n windowSize = control.defaults.window_size\nelse:\n control.defaults.window_mode = windowMode\n control.defaults.window_size = windowSize\n\narguments = str(''.join(sys.argv[1:])).split(',') # Get arguments - experiment name and subject\n\nexperimentName = arguments[0]\nsubjectName = arguments[1]\n\nexp = design.Experiment(experimentName) # Save experiment name\nexp.add_experiment_info(['Subject: ']) # Save Subject Code\nexp.add_experiment_info([subjectName]) # Save Subject Code\n\n# Save time, nblocks, position, correctAnswer, RT\nexp.add_data_variable_names(['Time', 'NBlock', 'Picture', 'Answers', 'RT'])\n\nm = LdMatrix(matrixSize, windowSize) # Create Matrix\n\ninstructionRectangle = stimuli.Rectangle(size=(windowSize[0], m.gap * 2 + cardSize[1]), position=(\n 0, -windowSize[1]/float(2) + (2 * m.gap + cardSize[1])/float(2)), colour=constants.C_DARKGREY)\n\npicturesExamples = np.random.permutation(picturesExamples)\n\npresentationOrder = newRandomPresentation()\npresentationOrder = presentationOrder[0:3]\n\ncontrol.initialize(exp)\ncontrol.start(exp, auto_create_subject_id=True, skip_ready_screen=True)\n\nnPict = 0\nfor nCard in presentationOrder:\n m._matrix.item(nCard).setPicture(picturesExamplesFolder + picturesExamples[nPict])\n nPict += 1\n\nmouse = io.Mouse() # Create Mouse instance\nmouse.set_logging(True) # Log mouse\nmouse.hide_cursor(True, True) # Hide cursor\n\nsetCursor(arrow)\n\nbs = stimuli.BlankScreen(bgColor) # Create blank screen\nm.plotDefault(bs, True) # Draw default grid\n\nexp.clock.wait(shortRest)\n\nexp.add_experiment_info(['Block {} - Presentation'.format(0)]) # Add listPictures\nexp.add_experiment_info(presentationOrder) # Add listPictures\n\ninstructions = stimuli.TextLine(' PRESENTATION ',\n position=(0, -windowSize[1]/float(2) + (2*m.gap + cardSize[1])/float(2)),\n text_font=None, text_size=textSize, text_bold=None, text_italic=None,\n text_underline=None, text_colour=textColor,\n background_colour=bgColor,\n max_width=None)\ninstructionRectangle.plot(bs)\ninstructions.plot(bs)\nbs.present(False, True)\n\nexp.clock.wait(shortRest) # Short Rest between presentation and cue-recall\n\ninstructionRectangle.plot(bs)\nbs.present(False, True)\n\nexp.clock.wait(shortRest)\n\nfor nCard in presentationOrder:\n mouse.hide_cursor(True, True)\n m.plotCard(nCard, True, bs, True) # Show Location for ( 2s )\n exp.clock.wait(presentationCard)\n m.plotCard(nCard, False, bs, True)\n\n ISI = design.randomize.rand_int(min_max_ISI[0], min_max_ISI[1])\n exp.clock.wait(ISI)\n\npresentationOrder = np.random.permutation(presentationOrder)\nexp.clock.wait(shortRest) # Short Rest between presentation and cue-recall\n\ninstructions = stimuli.TextLine(' TEST ',\n position=(0, -windowSize[1]/float(2) + (2*m.gap + cardSize[1])/float(2)),\n text_font=None, text_size=textSize, text_bold=None, text_italic=None,\n text_underline=None, text_colour=textColor,\n background_colour=bgColor,\n max_width=None)\ninstructionRectangle.plot(bs)\ninstructions.plot(bs)\nbs.present(False, True)\n\nexp.clock.wait(shortRest) # Short Rest between presentation and cue-recall\n\ninstructionRectangle.plot(bs)\nbs.present(False, True)\n\ncorrectAnswers = 0\n\nexp.add_experiment_info(['Block {} - Test'.format(0)]) # Add listPictures\nexp.add_experiment_info(presentationOrder) # Add listPictures\n\nfor nCard in presentationOrder:\n\n m._cueCard.setPicture(m._matrix.item(nCard).stimuli[0].filename) # Associate Picture to CueCard\n\n m.plotCueCard(True, bs, True) # Show Cue\n\n exp.clock.wait(presentationCard) # Wait presentationCard\n\n m.plotCueCard(False, bs, True) # Hide Cue\n\n mouse.show_cursor(True, True)\n\n start = get_time()\n rt, position = readMouse(start, mouseButton, responseTime)\n\n mouse.hide_cursor(True, True)\n\n if rt is not None:\n\n currentCard = m.checkPosition(position)\n\n if currentCard is not None:\n m._matrix.item(currentCard).color = clickColor\n m.plotCard(currentCard, False, bs, True)\n\n exp.clock.wait(clicPeriod) # Wait 500ms\n\n m._matrix.item(currentCard).color = cardColor\n m.plotCard(currentCard, False, bs, True)\n\n if currentCard == nCard:\n correctAnswers += 1\n exp.data.add([exp.clock.time, 0,\n path_leaf(m._matrix.item(nCard).stimuli[0].filename),\n path_leaf(m._matrix.item(currentCard).stimuli[0].filename),\n rt])\n\n elif currentCard is None:\n exp.data.add([exp.clock.time, 0,\n path_leaf(m._matrix.item(nCard).stimuli[0].filename),\n None,\n rt])\n else:\n exp.data.add([exp.clock.time, 0,\n path_leaf(m._matrix.item(nCard).stimuli[0].filename),\n path_leaf(m._matrix.item(currentCard).stimuli[0].filename),\n rt])\n else:\n exp.data.add([exp.clock.time, 0,\n path_leaf(m._matrix.item(nCard).stimuli[0].filename),\n None,\n rt])\n\n ISI = design.randomize.rand_int(min_max_ISI[0], min_max_ISI[1])\n exp.clock.wait(ISI)\n\n\nif correctAnswers == 3:\n instructions = stimuli.TextLine(' PERFECT ',\n position=(0, -windowSize[1]/float(2) + (2*m.gap + cardSize[1])/float(2)),\n text_font=None, text_size=textSize, text_bold=None, text_italic=None,\n text_underline=None, text_colour=textColor,\n background_colour=bgColor,\n max_width=None)\n instructions.plot(bs)\n bs.present(False, True)\n\n exp.clock.wait(responseTime)\n\n\n" ]
[ [ "numpy.random.permutation" ] ]
Aamer98/cdfsl-benchmark
[ "9464f04269372362b6989b2de701b4496c7e19d8" ]
[ "backbone.py" ]
[ "import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport math\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.nn.utils.weight_norm import WeightNorm\n\ndef init_layer(L):\n # Initialization using fan-in\n if isinstance(L, nn.Conv2d):\n n = L.kernel_size[0]*L.kernel_size[1]*L.out_channels\n L.weight.data.normal_(0,math.sqrt(2.0/float(n)))\n elif isinstance(L, nn.BatchNorm2d):\n pass\n #L.weight.data.fill_(1)\n #L.bias.data.fill_(0)\n\nclass Flatten(nn.Module):\n def __init__(self):\n super(Flatten, self).__init__()\n \n def forward(self, x): \n return x.view(x.size(0), -1)\n\n# Simple ResNet Block\nclass SimpleBlock(nn.Module):\n maml = False #Default\n def __init__(self, indim, outdim, half_res):\n super(SimpleBlock, self).__init__()\n self.indim = indim\n self.outdim = outdim\n\n self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)\n self.BN1 = nn.BatchNorm2d(outdim, affine = False)\n \n self.C2 = nn.Conv2d(outdim, outdim,kernel_size=3, padding=1,bias=False)\n self.BN2 = nn.BatchNorm2d(outdim, affine = False)\n\n self.relu1 = nn.ReLU(inplace=True)\n self.relu2 = nn.ReLU(inplace=True)\n\n self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]\n\n self.half_res = half_res\n\n # if the input number of channels is not equal to the output, then need a 1x1 convolution\n if indim!=outdim:\n\n self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else 1, bias=False)\n self.BNshortcut = nn.BatchNorm2d(outdim, affine = False)\n\n self.parametrized_layers.append(self.shortcut)\n self.parametrized_layers.append(self.BNshortcut)\n self.shortcut_type = '1x1'\n else:\n self.shortcut_type = 'identity'\n\n for layer in self.parametrized_layers:\n init_layer(layer)\n\n def forward(self, x):\n out = self.C1(x)\n out = self.BN1(out)\n out = self.relu1(out)\n\n out = self.C2(out)\n out = self.BN2(out)\n short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(self.shortcut(x))\n out = out + short_out\n out = self.relu2(out)\n return out\n\n# Bottleneck block\nclass BottleneckBlock(nn.Module):\n def __init__(self, indim, outdim, half_res):\n super(BottleneckBlock, self).__init__()\n bottleneckdim = int(outdim/4)\n self.indim = indim\n self.outdim = outdim\n\n self.C1 = nn.Conv2d(indim, bottleneckdim, kernel_size=1, bias=False)\n self.BN1 = nn.BatchNorm2d(bottleneckdim, affine = False)\n self.C2 = nn.Conv2d(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)\n self.BN2 = nn.BatchNorm2d(bottleneckdim, affine = False)\n self.C3 = nn.Conv2d(bottleneckdim, outdim, kernel_size=1, bias=False)\n self.BN3 = nn.BatchNorm2d(outdim, affine = False)\n\n self.relu = nn.ReLU()\n self.parametrized_layers = [self.C1, self.BN1, self.C2, self.BN2, self.C3, self.BN3]\n self.half_res = half_res\n\n\n # if the input number of channels is not equal to the output, then need a 1x1 convolution\n if indim!=outdim:\n\n self.shortcut = nn.Conv2d(indim, outdim, 1, stride=2 if half_res else 1, bias=False)\n\n self.parametrized_layers.append(self.shortcut)\n self.shortcut_type = '1x1'\n else:\n self.shortcut_type = 'identity'\n\n for layer in self.parametrized_layers:\n init_layer(layer)\n\n\n def forward(self, x):\n\n short_out = x if self.shortcut_type == 'identity' else self.shortcut(x)\n out = self.C1(x)\n out = self.BN1(out)\n out = self.relu(out)\n out = self.C2(out)\n out = self.BN2(out)\n out = self.relu(out)\n out = self.C3(out)\n out = self.BN3(out)\n out = out + short_out\n\n out = self.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = False):\n # list_of_num_layers specifies number of layers in each stage\n # list_of_out_dims specifies number of output channel for each stage\n super(ResNet,self).__init__()\n assert len(list_of_num_layers)==4, 'Can have only four stages'\n\n conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n bn1 = nn.BatchNorm2d(64, affine = False)\n\n relu = nn.ReLU()\n pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n init_layer(conv1)\n init_layer(bn1)\n\n trunk = [conv1, bn1, relu, pool1]\n\n indim = 64\n for i in range(4):\n\n for j in range(list_of_num_layers[i]):\n half_res = (i>=1) and (j==0)\n B = block(indim, list_of_out_dims[i], half_res)\n trunk.append(B)\n indim = list_of_out_dims[i]\n\n if flatten:\n avgpool = nn.AvgPool2d(7)\n trunk.append(avgpool)\n trunk.append(Flatten())\n self.final_feat_dim = indim\n else:\n self.final_feat_dim = [ indim, 7, 7]\n\n self.trunk = nn.Sequential(*trunk)\n\n def forward(self,x):\n out = self.trunk(x)\n return out\n\ndef ResNet10( flatten = True):\n return ResNet(SimpleBlock, [1,1,1,1],[64,128,256,512], flatten)\n\n\n\n\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
tqslj2/openspeech
[ "10307587f08615224df5a868fb5249c68c70b12d" ]
[ "tests/test_acoustic_models/test_quartznet10x5.py" ]
[ "import unittest\nimport torch\nimport torch.nn as nn\nimport logging\n\nfrom openspeech.criterion.ctc.ctc import CTCLossConfigs\nfrom openspeech.models import QuartzNet10x5Model, QuartzNet10x5Configs\nfrom openspeech.utils import DUMMY_INPUTS, DUMMY_INPUT_LENGTHS, DUMMY_TARGETS, DUMMY_TARGET_LENGTHS, build_dummy_configs\nfrom openspeech.tokenizers.ksponspeech.character import KsponSpeechCharacterTokenizer\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestQuartzNet10x5(unittest.TestCase):\n def test_forward(self):\n configs = build_dummy_configs(model_configs=QuartzNet10x5Configs(), criterion_configs=CTCLossConfigs())\n\n vocab = KsponSpeechCharacterTokenizer(configs)\n model = QuartzNet10x5Model(configs, vocab)\n\n criterion = nn.CTCLoss(blank=3, reduction='mean', zero_infinity=True)\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-04)\n\n for i in range(3):\n outputs = model(DUMMY_INPUTS, DUMMY_INPUT_LENGTHS)\n\n loss = criterion(\n outputs[\"logits\"].transpose(0, 1),\n DUMMY_TARGETS[:, 1:],\n outputs[\"output_lengths\"],\n DUMMY_TARGET_LENGTHS,\n )\n loss.backward()\n optimizer.step()\n assert type(loss.item()) == float\n\n def test_training_step(self):\n configs = build_dummy_configs(model_configs=QuartzNet10x5Configs(), criterion_configs=CTCLossConfigs())\n\n vocab = KsponSpeechCharacterTokenizer(configs)\n model = QuartzNet10x5Model(configs, vocab)\n\n for i in range(3):\n outputs = model.training_step(\n batch=(DUMMY_INPUTS, DUMMY_TARGETS, DUMMY_INPUT_LENGTHS, DUMMY_TARGET_LENGTHS), batch_idx=i\n )\n assert type(outputs[\"loss\"].item()) == float\n\n def test_validation_step(self):\n configs = build_dummy_configs(model_configs=QuartzNet10x5Configs(), criterion_configs=CTCLossConfigs())\n\n vocab = KsponSpeechCharacterTokenizer(configs)\n model = QuartzNet10x5Model(configs, vocab)\n\n for i in range(3):\n outputs = model.validation_step(\n batch=(DUMMY_INPUTS, DUMMY_TARGETS, DUMMY_INPUT_LENGTHS, DUMMY_TARGET_LENGTHS), batch_idx=i\n )\n assert type(outputs[\"loss\"].item()) == float\n\n def test_test_step(self):\n configs = build_dummy_configs(model_configs=QuartzNet10x5Configs(), criterion_configs=CTCLossConfigs())\n\n vocab = KsponSpeechCharacterTokenizer(configs)\n model = QuartzNet10x5Model(configs, vocab)\n\n for i in range(3):\n outputs = model.test_step(\n batch=(DUMMY_INPUTS, DUMMY_TARGETS, DUMMY_INPUT_LENGTHS, DUMMY_TARGET_LENGTHS), batch_idx=i\n )\n assert type(outputs[\"loss\"].item()) == float\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.nn.CTCLoss" ] ]
cheginit/spotpy
[ "38feaf7dbb0ddcbf31e138519ef649f07ac0cded", "38feaf7dbb0ddcbf31e138519ef649f07ac0cded" ]
[ "tests/test_database.py", "spotpy/algorithms/dds.py" ]
[ "# -*- coding: utf-8 -*-\n'''\nCopyright (c) 2018 by Tobias Houska\nThis file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).\n:author: Tobias Houska\n'''\nimport unittest\nimport os\nimport glob\nimport spotpy\nimport spotpy.database as db\nimport numpy as np\n\n#https://docs.python.org/3/library/unittest.html\n\nclass MockSetup:\n \"\"\"\n Mock class to use the save function of a spotpy setup\n \"\"\"\n def save(self, *args, **kwargs):\n pass\n\n\nclass TestDatabase(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n self.parnames = ['x1', 'x2', 'x3', 'x4', 'x5']\n self.like = 0.213\n self.randompar = [175.21733934706367, 0.41669126598819262, 0.25265012080652388, 0.049706767415682945, 0.69674090782836173]\n\n self.simulations_multi = []\n for i in range(5):\n self.simulations_multi.append(np.random.uniform(0, 1, 5).tolist())\n\n self.simulations = np.random.uniform(0, 1, 5)\n\n @classmethod\n def tearDownClass(self):\n for filename in glob.glob(\"UnitTest_tmp*\"):\n os.remove(filename)\n\n def objf(self):\n return np.random.uniform(0, 1, 1)[0]\n\n def test_csv_multiline(self):\n csv = db.get_datawriter('csv', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations_multi, chains=1, save_sim=True)\n\n csv.save(self.like, self.randompar, self.simulations_multi)\n csv.save(self.like, self.randompar, self.simulations_multi)\n # Save Simulations\n\n csv.finalize()\n csvdata = csv.getdata()\n self.assertEqual(str(type(csvdata)), str(type(np.array([]))))\n self.assertEqual(len(csvdata[0]), 32)\n self.assertEqual(len(csvdata), 2)\n self.assertEqual(len(csv.header), 32)\n\n\n def test_csv_multiline_false(self):\n # Save not Simulations\n csv = db.get_datawriter('csv', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations_multi, chains=1, save_sim=False)\n\n csv.save(self.like, self.randompar, self.simulations_multi)\n csv.save(self.like, self.randompar, self.simulations_multi)\n\n csv.finalize()\n csvdata = csv.getdata()\n self.assertEqual(str(type(csvdata)), str(type(np.array([]))))\n self.assertEqual(len(csvdata[0]), 7)\n self.assertEqual(len(csvdata), 2)\n self.assertEqual(len(csv.header), 7)\n\n def test_csv_single(self):\n csv = db.get_datawriter('csv', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations,\n chains=1, save_sim=True)\n\n csv.save(self.like, self.randompar, self.simulations)\n csv.save(self.like, self.randompar, self.simulations)\n\n csv.finalize()\n csvdata = csv.getdata()\n self.assertEqual(str(type(csvdata)), str(type(np.array([]))))\n self.assertEqual(len(csvdata[0]), 12)\n self.assertEqual(len(csvdata), 2)\n self.assertEqual(len(csv.header), 12)\n\n def test_csv_append(self):\n csv = db.get_datawriter(\n 'csv', \"UnitTest_tmp\",\n self.parnames, self.like, self.randompar,\n simulations=self.simulations, chains=1, save_sim=True,\n )\n\n csv.save(self.like, self.randompar, self.simulations)\n csv.save(self.like, self.randompar, self.simulations)\n csv.finalize()\n\n csv_new = db.get_datawriter(\n 'csv', \"UnitTest_tmp\",\n self.parnames, self.like, self.randompar,\n simulations=self.simulations, chains=1, save_sim=True,\n dbappend=True\n )\n\n csv_new.save(self.like, self.randompar, self.simulations)\n csv_new.save(self.like, self.randompar, self.simulations)\n csv_new.finalize()\n\n csvdata = csv_new.getdata()\n self.assertEqual(len(csvdata), 4)\n\n def test_csv_single_false(self):\n csv = db.get_datawriter('csv', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations,\n chains=1, save_sim=False)\n\n csv.save(self.like, self.randompar, self.simulations)\n csv.save(self.like, self.randompar, self.simulations)\n\n csv.finalize()\n csvdata = csv.getdata()\n self.assertEqual(str(type(csvdata)), str(type(np.array([]))))\n self.assertEqual(len(csvdata[0]), 7)\n self.assertEqual(len(csvdata), 2)\n self.assertEqual(len(csv.header), 7)\n\n def test_hdf5_multiline(self):\n hdf5 = db.get_datawriter('hdf5', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations_multi, chains=1, save_sim=True)\n\n hdf5.save(self.like, self.randompar, self.simulations_multi)\n hdf5.save(self.like, self.randompar, self.simulations_multi)\n # Save Simulations\n\n hdf5.finalize()\n hdf5data = hdf5.getdata()\n self.assertEqual(str(type(hdf5data)), str(type(np.array([]))))\n self.assertEqual(len(hdf5data[0]), 8)\n self.assertEqual(len(hdf5data), 2)\n self.assertEqual(len(hdf5.header), 32)\n\n\n def test_hdf5_multiline_false(self):\n # Save not Simulations\n hdf5 = db.get_datawriter('hdf5', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations_multi, chains=1, save_sim=False)\n\n hdf5.save(self.like, self.randompar, self.simulations_multi)\n hdf5.save(self.like, self.randompar, self.simulations_multi)\n\n hdf5.finalize()\n hdf5data = hdf5.getdata()\n self.assertEqual(str(type(hdf5data)), str(type(np.array([]))))\n self.assertEqual(len(hdf5data[0]), 7)\n self.assertEqual(len(hdf5data), 2)\n self.assertEqual(len(hdf5.header), 7)\n\n def test_hdf5_single(self):\n hdf5 = db.get_datawriter('hdf5', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations,\n chains=1, save_sim=True)\n\n hdf5.save(self.like, self.randompar, self.simulations)\n hdf5.save(self.like, self.randompar, self.simulations)\n\n hdf5.finalize()\n hdf5data = hdf5.getdata()\n self.assertEqual(str(type(hdf5data)), str(type(np.array([]))))\n self.assertEqual(len(hdf5data[0]), 8)\n self.assertEqual(len(hdf5data), 2)\n self.assertEqual(len(hdf5.header), 12)\n\n def test_hdf5_append(self):\n hdf5 = db.get_datawriter(\n 'hdf5', \"UnitTest_tmp\",\n self.parnames, self.like, self.randompar,\n simulations=self.simulations, chains=1, save_sim=True,\n )\n\n hdf5.save(self.like, self.randompar, self.simulations)\n hdf5.save(self.like, self.randompar, self.simulations)\n hdf5.finalize()\n\n hdf5_new = db.get_datawriter(\n 'hdf5', \"UnitTest_tmp\",\n self.parnames, self.like, self.randompar,\n simulations=self.simulations, chains=1, save_sim=True,\n dbappend=True\n )\n\n hdf5_new.save(self.like, self.randompar, self.simulations)\n hdf5_new.save(self.like, self.randompar, self.simulations)\n hdf5_new.finalize()\n\n hdf5data = hdf5_new.getdata()\n self.assertEqual(len(hdf5data), 4)\n\n\n def test_hdf5_single_false(self):\n hdf5 = db.get_datawriter('hdf5', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations,\n chains=1, save_sim=False)\n\n hdf5.save(self.like, self.randompar, self.simulations)\n hdf5.save(self.like, self.randompar, self.simulations)\n\n hdf5.finalize()\n hdf5data = hdf5.getdata()\n self.assertEqual(str(type(hdf5data)), str(type(np.array([]))))\n self.assertEqual(len(hdf5data[0]), 7)\n self.assertEqual(len(hdf5data), 2)\n self.assertEqual(len(hdf5.header), 7)\n\n\n def test_sql_multiline(self):\n sql = db.get_datawriter('sql', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations_multi, chains=1, save_sim=True)\n sql.save(self.like, self.randompar, self.simulations_multi)\n sql.finalize()\n sqldata = sql.getdata()\n self.assertEqual(str(type(sqldata)), str(type(np.array([]))))\n self.assertEqual(len(sqldata[0]), 32)\n self.assertEqual(len(sqldata), 1)\n self.assertEqual(len(sql.header), 32)\n\n\n def test_sql_multiline_false(self):\n sql = db.get_datawriter('sql', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations_multi, chains=1, save_sim=False)\n sql.save(self.like, self.randompar, self.simulations_multi)\n sql.finalize()\n sqldata = sql.getdata()\n self.assertEqual(str(type(sqldata)), str(type(np.array([]))))\n self.assertEqual(len(sqldata[0]), 7)\n self.assertEqual(len(sqldata), 1)\n self.assertEqual(len(sql.header), 7)\n\n def test_sql_single(self):\n sql = db.get_datawriter('sql', \"UnitTest_tmp\", self.parnames, self.like, self.randompar,\n simulations=self.simulations, chains=1, save_sim=True)\n sql.save(self.like, self.randompar, self.simulations)\n sql.finalize()\n sqldata = sql.getdata()\n self.assertEqual(str(type(sqldata)), str(type(np.array([]))))\n self.assertEqual(len(sqldata[0]), 12)\n self.assertEqual(len(sqldata), 1)\n self.assertEqual(len(sql.header), 12)\n\n def test_sql_single_false(self):\n sql = db.get_datawriter('sql', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations,\n chains=1, save_sim=False)\n sql.save(self.like, self.randompar, self.simulations)\n sql.finalize()\n\n sqldata = sql.getdata()\n self.assertEqual(str(type(sqldata)), str(type(np.array([]))))\n self.assertEqual(len(sqldata[0]), 7)\n self.assertEqual(len(sqldata), 1)\n self.assertEqual(len(sql.header), 7)\n\n def test_ram_multiline(self):\n ram = db.get_datawriter('ram', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations_multi, chains=1, save_sim=True)\n ram.save(self.like, self.randompar, self.simulations_multi)\n ram.finalize()\n\n ramdata = ram.getdata()\n self.assertEqual(str(type(ramdata)), str(type(np.array([]))))\n self.assertEqual(len(ram.header), 32)\n self.assertEqual(len(ramdata[0]), 32)\n self.assertEqual(len(ramdata), 1)\n self.assertEqual(len(ramdata.dtype), len(ram.header))\n\n def test_ram_multiline_false(self):\n ram = db.get_datawriter('ram', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations_multi, chains=1, save_sim=False)\n ram.save(self.like, self.randompar, self.simulations_multi)\n\n ram.finalize()\n ramdata = ram.getdata()\n self.assertEqual(str(type(ramdata)), str(type(np.array([]))))\n self.assertEqual(len(ramdata[0]), 7)\n self.assertEqual(len(ramdata), 1)\n self.assertEqual(len(ramdata.dtype), len(ram.header))\n self.assertEqual(len(ram.header), 7)\n\n def test_ram_single(self):\n ram = db.get_datawriter('ram', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations,\n chains=1, save_sim=True)\n ram.save(self.like, self.randompar, self.simulations)\n\n ram.finalize()\n ramdata = ram.getdata()\n self.assertEqual(str(type(ramdata)), str(type(np.array([]))))\n self.assertEqual(len(ramdata[0]), 12)\n self.assertEqual(len(ramdata), 1)\n self.assertEqual(len(ramdata.dtype), len(ram.header))\n self.assertEqual(len(ram.header), 12)\n\n def test_ram_single_false(self):\n ram = db.get_datawriter('ram', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations,\n chains=1, save_sim=False)\n ram.save(self.like, self.randompar, self.simulations)\n\n ram.finalize()\n ramdata = ram.getdata()\n self.assertEqual(str(type(ramdata)), str(type(np.array([]))))\n self.assertEqual(len(ramdata[0]), 7)\n self.assertEqual(len(ramdata), 1)\n self.assertEqual(len(ramdata.dtype), len(ram.header))\n self.assertEqual(len(ram.header), 7)\n\n def test_not_existing_dbformat(self):\n with self.assertRaises(AttributeError):\n _ = db.get_datawriter('xxx', \"UnitTest_tmp\", self.parnames, self.like, self.randompar, simulations=self.simulations,\n chains=1, save_sim=True)\n\n def test_noData(self):\n nodata = db.get_datawriter(\n 'noData', \"UnitTest_tmp\",\n self.parnames, np.array(self.like), self.randompar,\n simulations=self.simulations, chains=1, save_sim=True\n )\n nodata.save(self.like, self.randompar, self.simulations)\n nodata.finalize()\n self.assertEqual(nodata.getdata(), None)\n\n def test_custom(self):\n custom = db.get_datawriter(\n 'custom', \"UnitTest_tmp\",\n self.parnames, self.like, self.randompar,\n setup=MockSetup(),\n simulations=self.simulations, chains=1, save_sim=True\n )\n custom.save(self.like, self.randompar, self.simulations)\n custom.finalize()\n self.assertEqual(custom.getdata(),None)\n\n def test_custom_no_setup(self):\n with self.assertRaises(ValueError):\n _ = db.get_datawriter(\n 'custom', \"UnitTest_tmp\",\n self.parnames, self.like, self.randompar,\n simulations=self.simulations, chains=1, save_sim=True\n )\n\n def test_custom_wrong_setup(self):\n with self.assertRaises(AttributeError):\n _ = db.get_datawriter(\n 'custom', \"UnitTest_tmp\",\n self.parnames, self.like, self.randompar,\n setup=[],\n simulations=self.simulations, chains=1, save_sim=True\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n", "import numpy as np\nfrom . import _algorithm\nfrom spotpy.parameter import ParameterSet\n\n\nclass DDSGenerator:\n \"\"\"\n This class is used by the DDS algorithm to generate a new sample of parameters based on the current one.\n Current parameter are exchanged in `ParameterSet` objects.\n \"\"\"\n\n def __init__(self, np_random):\n self.np_random = np_random\n\n def neigh_value_continuous(self, s, x_min, x_max, r):\n \"\"\"\n select a RANDOM neighbouring real value of a SINGLE decision variable\n CEE 509, HW 5 by Bryan Tolson, Mar 5, 2003 AND ALSO CEE PROJECT\n variables:\n x_range is the range of the real variable (s_max-s_min)\n\n :param s: is a current SINGLE decision variable VALUE\n :param x_min: is the min of variable s\n :param x_max: is the max of variable s\n :param r: is the neighbourhood parameter (replaces V parameter~see not\n It is defined as the ratio of the std deviation of the desired\n normal random number/x_range. Eg:\n std dev desired = r * x_range\n for comparison: variance (V) = (r * x_range)^2\n :return: x_new, a new sample of values in beetween a given range\n \"\"\"\n\n x_range = x_max - x_min\n\n x_new = s + self.np_random.normal(0, 1) * r * x_range\n\n # NEED to deal with variable upper and lower bounds:\n # Originally bounds in DDS were 100# reflective\n # But some times DVs are right on the boundary and with 100# reflective\n # boundaries it is hard to detect them. Therefore, we decided to make the\n # boundaries reflective with 50# chance and absorptive with 50# chance.\n # M. Asadzadeh and B. Tolson Dec 2008\n\n p_abs_or_ref = self.np_random.rand()\n\n if x_new < x_min: # works for any pos or neg x_min\n if p_abs_or_ref <= 0.5: # with 50%chance reflect\n x_new = x_min + (x_min - x_new)\n else: # with 50% chance absorb\n x_new = x_min\n\n # if reflection goes past x_max then value should be x_min since without reflection\n # the approach goes way past lower bound. This keeps X close to lower bound when X current\n # is close to lower bound:\n if x_new > x_max:\n x_new = x_min\n\n elif x_new > x_max: # works for any pos or neg x_max\n if p_abs_or_ref <= 0.5: # with 50% chance reflect\n x_new = x_max - (x_new - x_max)\n else: # with 50% chance absorb\n x_new = x_max\n\n # if reflection goes past x_min then value should be x_max for same reasons as above\n if x_new < x_min:\n x_new = x_max\n\n return x_new\n\n def neigh_value_discrete(self, s, s_min, s_max, r):\n \"\"\"\n Created by B.Tolson and B.Yung, June 2006\n Modified by B. Tolson & M. Asadzadeh, Sept 2008\n Modification: 1- Boundary for reflection at (s_min-0.5) & (s_max+0.5)\n 2- Round the new value at the end of generation.\n select a RANDOM neighbouring integer value of a SINGLE decision variable\n discrete distribution is approximately normal\n alternative to this appoach is reflecting triangular distribution (see Azadeh work)\n\n :param s: is a current SINGLE decision variable VALUE\n :param s_min: is the min of variable s\n :param s_max: is the max of variable s\n :param r: r is the neighbourhood parameter (replaces V parameter~see notes)\n It is defined as the ratio of the std deviation of the desired\n normal random number/s_range. Eg:\n std dev desired = r * s_range\n for comparison: variance (V) = (r * s_range)^2\n :return: s_new, a new sample of values in beetween a given range\n \"\"\"\n\n s_range = s_max - s_min\n delta = self.np_random.normal(0, 1) * r * s_range\n s_new = s + delta\n\n p_abs_or_ref = self.np_random.rand()\n\n if s_new < s_min - 0.5: # works for any pos or neg s_min\n if p_abs_or_ref <= 0.5: # with 50% chance reflect\n s_new = (s_min - 0.5) + ((s_min - 0.5) - s_new)\n else: # with 50% chance absorb\n s_new = s_min\n\n # if reflection goes past (s_max+0.5) then value should be s_min since without reflection\n # the approach goes way past lower bound. This keeps X close to lower bound when X current\n # is close to lower bound:\n if s_new > s_max + 0.5:\n s_new = s_min\n\n elif s_new > s_max + 0.5: # works for any pos or neg s_max\n if p_abs_or_ref <= 0.5: # with 50% chance reflect\n s_new = (s_max + 0.5) - (s_new - (s_max + 0.5))\n else: # with 50% chance absorb\n s_new = s_max\n\n # if reflection goes past (s_min-0.5) then value should be s_max for same reasons as above\n if s_new < s_min - 0.5:\n s_new = s_max\n\n s_new = np.round(s_new) # New value must be integer\n if s_new == s: # pick a number between s_max and s_min by a Uniform distribution\n sample = s_min - 1 + np.ceil((s_max - s_min) * self.np_random.rand())\n if sample < s:\n s_new = sample\n else: # must increment option number by one\n s_new = sample + 1\n return s_new\n\n def neigh_value_mixed(self, x_curr, r, j, x_min, x_max):\n \"\"\"\n\n :param x_curr:\n :type x_curr: ParameterSet\n :param r:\n :param j:\n :return:\n \"\"\"\n s = x_curr[j]\n\n if not x_curr.as_int[j]:\n return self.neigh_value_continuous(s, x_min, x_max, r)\n else:\n return self.neigh_value_discrete(s, x_min, x_max, r)\n\n\nclass dds(_algorithm):\n \"\"\"\n Implements the Dynamically dimensioned search algorithm for computationally efficient watershed model\n calibration\n by\n Tolson, B. A. and C. A. Shoemaker (2007), Dynamically dimensioned search algorithm for computationally efficient\n watershed model calibration, Water Resources Research, 43, W01413, 10.1029/2005WR004723.\n Asadzadeh, M. and B. A. Tolson (2013), Pareto archived dynamically dimensioned search with hypervolume-based\n selection for multi-objective optimization, Engineering Optimization. 10.1080/0305215X.2012.748046.\n\n http://www.civil.uwaterloo.ca/btolson/software.aspx\n\n Method:\n \"The DDS algorithm is a novel and simple stochastic single-solution based heuristic global search\n algorithm that was developed for the purpose of finding good global solutions\n (as opposed to globally optimal solutions) within a specified maximum function (or model) evaluation limit.\"\n (Page 3)\n\n The DDS algorithm is a simple greedy algorithm, always using the best solution (min or max) from the current\n point of view. This may not lead to the global optimization.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Input\n ----------\n spot_setup: class\n model: function\n Should be callable with a parameter combination of the parameter-function\n and return an list of simulation results (as long as evaluation list)\n parameter: function\n When called, it should return a random parameter combination. Which can\n be e.g. uniform or Gaussian\n objectivefunction: function\n Should return the objectivefunction for a given list of a model simulation and\n observation.\n evaluation: function\n Should return the true values as return by the model.\n\n dbname: str\n * Name of the database where parameter, objectivefunction value and simulation results will be saved.\n\n dbformat: str\n * ram: fast suited for short sampling time. no file will be created and results are saved in an array.\n * csv: A csv file will be created, which you can import afterwards.\n\n parallel: str\n * seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.\n * mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).\n\n save_sim: boolean\n * True: Simulation results will be saved\n * False: Simulation results will not be saved\n :param r: neighborhood size perturbation parameter (r) that defines the random perturbation size standard\n deviation as a fraction of the decision variable range. Default is 0.2.\n :type r: float\n\n \"\"\"\n\n try:\n self.r = kwargs.pop(\"r\")\n except KeyError:\n self.r = 0.2 # default value\n kwargs['optimization_direction'] = 'maximize'\n kwargs['algorithm_name'] = 'Dynamically Dimensioned Search (DDS) algorithm'\n super(dds, self).__init__(*args, **kwargs)\n\n self.np_random = np.random\n\n #self.status.params_max = ParameterSet(self.parameter())\n\n # self.generator_repetitions will be set in `sample` and is needed to generate a\n # generator which sends back actual parameter s_test\n self.generator_repetitions = -1\n\n self.dds_generator = DDSGenerator(self.np_random)\n\n def _set_np_random(self, f_rand):\n self.np_random = f_rand\n self.dds_generator.np_random = f_rand\n\n def get_next_x_curr(self):\n \"\"\"\n Fake a generator to run self.repeat to use multiprocessing\n \"\"\"\n # We need to shift position and length of the sampling process\n for rep in range(self.generator_repetitions):\n yield rep, self.calculate_next_s_test(self.params_max, rep, self.generator_repetitions, self.r)\n\n def sample(self, repetitions, trials=1, x_initial=np.array([])):\n \"\"\"\n Samples from the DDS Algorithm.\n\n DDS is a greedy type of algorithm since the current solution, also the best solution identified so far,\n is never updated with a solution that has an inferior value of the objective function.\n\n That means in detail:\n The DDS Algorithm starts with an initial phase:\n If the user does not defines an own initial configuration The DDS algorithm start with searching a parameter\n configuration in between the given parameter bounds.\n\n The next phase is the dds algorithm itself which runs in a loop `repetion` times:\n Based on the parameter configuration x_new the algorithm run the model and simulation with the current parameter set\n and calculates the objective function value called F_curr.\n\n If F_curr > F_best, where F_best is the current max value objective function value, we set x_best = x_curr and\n F_best = F_curr.\n\n Select k of all parameters to include them in the neighborhood calculation. This is performed by calculating a\n threshold probability_neighborhood (probability in neighbourhood).\n\n The neighbourhood calculation perturb x_best on standard normal distribution and reflect the result if it\n breaks the parameter boundary.\n The updated parameter configuration is called x_curr\n\n :param repetitions: Maximum number of runs.\n :type repetitions: int\n :param trials: amount of runs DDS algorithm will be performed\n :param x_initial: set an initial trial set as a first parameter configuration. If the set is empty the algorithm\n select an own initial parameter configuration\n :return: a key-value set of all parameter combination which has been used. May changed in future.\n \"\"\"\n\n # every iteration a map of all relevant values is stored, only for debug purpose.\n # Spotpy will not need this values.\n debug_results = []\n\n self.set_repetiton(repetitions)\n self.min_bound, self.max_bound = self.parameter(\n )['minbound'], self.parameter()['maxbound']\n print('Starting the DDS algotrithm with '+str(repetitions)+ ' repetitions...')\n\n number_of_parameters = self.status.parameters # number_of_parameters is the amount of parameters\n\n if len(x_initial) == 0:\n initial_iterations = np.int(np.max([5, round(0.005 * repetitions)]))\n elif len(x_initial) != number_of_parameters:\n raise ValueError(\"User specified 'x_initial' has not the same length as available parameters\")\n else:\n initial_iterations = 1\n x_initial = np.array(x_initial)\n if not (np.all(x_initial <= self.max_bound) and np.all(\n x_initial >= self.min_bound)):\n raise ValueError(\"User specified 'x_initial' but the values are not within the parameter range\")\n\n # Users can define trial runs in within \"repetition\" times the algorithm will be executed\n for trial in range(trials):\n #objectivefunction_max = -1e308\n params_max = x_initial\n # repitionno_best saves on which iteration the best parameter configuration has been found\n repitionno_best = initial_iterations # needed to initialize variable and avoid code failure when small # iterations\n params_max, repetions_left, objectivefunction_max = self.calc_initial_para_configuration(initial_iterations, trial,\n repetitions, x_initial)\n params_max = self.fix_status_params_format(params_max)\n trial_best_value = list(params_max)#self.status.params_max.copy()\n \n # important to set this field `generator_repetitions` so that\n # method `get_next_s_test` can generate exact parameters\n self.generator_repetitions = repetions_left\n self.params_max = params_max\n for rep, x_curr, simulations in self.repeat(self.get_next_x_curr()):\n\n like = self.postprocessing(rep, x_curr, simulations, chains=trial)\n if like > objectivefunction_max:\n objectivefunction_max = like\n self.params_max = list(x_curr)\n self.params_max = self.fix_status_params_format(self.params_max)\n\n print('Best solution found has obj function value of ' + str(objectivefunction_max) + ' at '\n + str(repitionno_best) + '\\n\\n')\n debug_results.append({\"sbest\": self.params_max, \"trial_initial\": trial_best_value,\"objfunc_val\": objectivefunction_max})\n self.final_call()\n return debug_results\n\n def fix_status_params_format(self, params_max):\n start_params = ParameterSet(self.parameter())\n start_params.set_by_array([j for j in params_max])\n return start_params\n\n def calc_initial_para_configuration(self, initial_iterations, trial, repetitions, x_initial):\n #max_bound, min_bound = self.status.params_max.maxbound, self.status.params_max.minbound\n parameter_bound_range = self.max_bound - self.min_bound\n number_of_parameters = len(parameter_bound_range)\n discrete_flag = ParameterSet(self.parameter()).as_int\n params_max = x_initial\n objectivefunction_max = -1e308\n # Calculate the initial Solution, if `initial_iterations` > 1 otherwise the user defined a own one.\n # If we need to find an initial solution we iterating initial_iterations times to warm um the algorithm\n # by trying which randomized generated input matches best\n # initial_iterations is the number of function evaluations to initialize the DDS algorithm solution\n if initial_iterations > 1:\n print('Finding best starting point for trial ' + str(trial + 1) + ' using ' + str(\n initial_iterations) + ' random samples.')\n repetions_left = repetitions - initial_iterations # use this to reduce number of fevals in DDS loop\n if repetions_left <= 0:\n raise ValueError('# Initialization samples >= Max # function evaluations.')\n\n starting_generator = (\n (rep, [self.np_random.randint(np.int(self.min_bound[j]), np.int(self.max_bound[j]) + 1) if\n discrete_flag[j] else self.min_bound[j] + parameter_bound_range[j] * self.np_random.rand()\n for j in\n range(number_of_parameters)]) for rep in range(int(initial_iterations)))\n\n for rep, x_curr, simulations in self.repeat(starting_generator):\n like = self.postprocessing(rep, x_curr, simulations) # get obj function value\n # status setting update\n if like > objectivefunction_max:\n objectivefunction_max = like\n params_max = list(x_curr) \n params_max = self.fix_status_params_format(params_max)\n\n else: # now initial_iterations=1, using a user supplied initial solution. Calculate obj func value.\n repetions_left = repetitions - 1 # use this to reduce number of fevals in DDS loop\n rep, x_test_param, simulations = self.simulate((0, x_initial)) # get from the inputs\n like = self.postprocessing(rep, x_test_param, simulations)\n if like > objectivefunction_max:\n objectivefunction_max = like\n params_max = list(x_test_param)\n params_max = self.fix_status_params_format(params_max)\n return params_max, repetions_left, objectivefunction_max\n\n def calculate_next_s_test(self, previous_x_curr, rep, rep_limit, r):\n \"\"\"\n Needs to run inside `sample` method. Calculate the next set of parameters based on a given set.\n This is greedy algorithm belonging to the DDS algorithm.\n\n `probability_neighborhood` is a threshold at which level a parameter is added to neighbourhood calculation.\n\n Using a normal distribution\n The decision variable\n\n `dvn_count` counts how many parameter configuration has been exchanged with neighbourhood values.\n If no parameters has been exchanged just one will select and exchanged with it's neighbourhood value.\n\n :param previous_x_curr: A set of parameters\n :param rep: Position in DDS loop\n :param r: neighbourhood size perturbation parameter\n :return: next parameter set\n \"\"\"\n amount_params = len(previous_x_curr)\n new_x_curr = previous_x_curr.copy() # define new_x_curr initially as current (previous_x_curr for greedy)\n\n randompar = self.np_random.rand(amount_params)\n probability_neighborhood = 1.0 - np.log(rep + 1) / np.log(rep_limit)\n dvn_count = 0 # counter for how many decision variables vary in neighbour\n\n for j in range(amount_params):\n if randompar[j] < probability_neighborhood: # then j th DV selected to vary in neighbour\n dvn_count = dvn_count + 1\n new_value = self.dds_generator.neigh_value_mixed(previous_x_curr, r, j, self.min_bound[j],self.max_bound[j])\n new_x_curr[j] = new_value # change relevant dec var value in x_curr\n\n if dvn_count == 0: # no DVs selected at random, so select ONE\n dec_var = np.int(np.ceil(amount_params * self.np_random.rand())) - 1\n new_value = self.dds_generator.neigh_value_mixed(previous_x_curr, r, dec_var, self.min_bound[dec_var],\n self.max_bound[dec_var])\n new_x_curr[dec_var] = new_value # change relevant decision variable value in s_test\n\n return new_x_curr\n" ]
[ [ "numpy.random.uniform", "numpy.array" ], [ "numpy.log", "numpy.round", "numpy.all", "numpy.int", "numpy.array" ] ]
dulkith/gradio
[ "cf3923e307930965dfc200068e0399bc4ab103f7" ]
[ "gradio/processing_utils.py" ]
[ "from PIL import Image, ImageOps\nfrom io import BytesIO\nimport base64\nimport tempfile\nimport scipy.io.wavfile\nfrom scipy.fftpack import dct\nimport numpy as np\nimport skimage\n\n\n#########################\n# IMAGE PRE-PROCESSING\n#########################\ndef decode_base64_to_image(encoding):\n content = encoding.split(';')[1]\n image_encoded = content.split(',')[1]\n return Image.open(BytesIO(base64.b64decode(image_encoded)))\n\n\ndef encode_file_to_base64(f, type=\"image\", ext=None, header=True):\n with open(f, \"rb\") as file:\n encoded_string = base64.b64encode(file.read())\n base64_str = str(encoded_string, 'utf-8')\n if not header:\n return base64_str\n if ext is None:\n ext = f.split(\".\")[-1]\n return \"data:\" + type + \"/\" + ext + \";base64,\" + base64_str\n\n\ndef encode_plot_to_base64(plt):\n with BytesIO() as output_bytes:\n plt.savefig(output_bytes, format=\"png\")\n bytes_data = output_bytes.getvalue()\n base64_str = str(base64.b64encode(bytes_data), 'utf-8')\n return \"data:image/png;base64,\" + base64_str\n\ndef encode_array_to_base64(image_array):\n with BytesIO() as output_bytes:\n PIL_image = Image.fromarray(skimage.img_as_ubyte(image_array))\n PIL_image.save(output_bytes, 'PNG')\n bytes_data = output_bytes.getvalue()\n base64_str = str(base64.b64encode(bytes_data), 'utf-8')\n return \"data:image/png;base64,\" + base64_str\n\n\ndef resize_and_crop(img, size, crop_type='center'):\n \"\"\"\n Resize and crop an image to fit the specified size.\n args:\n size: `(width, height)` tuple.\n crop_type: can be 'top', 'middle' or 'bottom', depending on this\n value, the image will cropped getting the 'top/left', 'middle' or\n 'bottom/right' of the image to fit the size.\n raises:\n ValueError: if an invalid `crop_type` is provided.\n \"\"\"\n if crop_type == \"top\":\n center = (0, 0)\n elif crop_type == \"center\":\n center = (0.5, 0.5)\n else:\n raise ValueError\n return ImageOps.fit(img, size, centering=center) \n\n##################\n# OUTPUT\n##################\n\ndef decode_base64_to_binary(encoding):\n extension = None\n if \",\" in encoding:\n header, data = encoding.split(\",\")\n header = header[5:]\n if \";base64\" in header:\n header = header[0:header.index(\";base64\")]\n if \"/\" in header:\n extension = header[header.index(\"/\") + 1:]\n else:\n data = encoding\n return base64.b64decode(data), extension\n\ndef decode_base64_to_file(encoding):\n data, extension = decode_base64_to_binary(encoding)\n if extension is None:\n file_obj = tempfile.NamedTemporaryFile(delete=False)\n else:\n file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=\".\"+extension)\n file_obj.write(data)\n file_obj.flush()\n return file_obj\n\n##################\n# AUDIO FILES\n##################\n\ndef generate_mfcc_features_from_audio_file(wav_filename=None,\n pre_emphasis=0.95,\n frame_size= 0.025,\n frame_stride=0.01,\n NFFT=512,\n nfilt=40,\n num_ceps=12,\n cep_lifter=22,\n sample_rate=None,\n signal=None,\n downsample_to=None):\n \"\"\"\n Loads and preprocesses a .wav audio file (or alternatively, a sample rate & signal) into mfcc coefficients, the typical inputs to models.\n Adapted from: https://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html\n :param wav_filename: string name of audio file to process.\n :param pre_emphasis: a float factor, typically 0.95 or 0.97, which amplifies high frequencies.\n :param frame_size: a float that is the length, in seconds, of time frame over which to take the fft.\n :param frame_stride: a float that is the offset, in seconds, between consecutive time frames.\n :param NFFT: The number of points in the short-time fft for each time frame.\n :param nfilt: The number of filters on the Mel-scale to extract frequency bands.\n :param num_ceps: the number of cepstral coefficients to retrain.\n :param cep_lifter: the int factor, by which to de-emphasize higher-frequency.\n :param sample_rate: optional param represnting sample rate that is used if `wav_filename` is not provided\n :param signal: optional param representing sample data that is used if `wav_filename` is not provided\n :param downsample_to: optional param. If provided, audio file is downsampled to this many frames. \n :return: a 3D numpy array of mfcc coefficients, of the shape 1 x num_frames x num_coeffs.\n \"\"\"\n if (wav_filename is None) and (sample_rate is None or signal is None):\n raise ValueError(\"Either a wav_filename must be provdied or a sample_rate and signal\") \n elif wav_filename is None:\n pass\n else:\n sample_rate, signal = scipy.io.wavfile.read(wav_filename)\n\n if not(downsample_to is None):\n signal = scipy.signal.resample(signal, downsample_to) \n\n emphasized_signal = np.append(signal[0], signal[1:] - pre_emphasis * signal[:-1])\n\n frame_length, frame_step = frame_size * sample_rate, frame_stride * sample_rate # Convert from seconds to samples\n signal_length = len(emphasized_signal)\n frame_length = int(round(frame_length))\n frame_step = int(round(frame_step))\n num_frames = int(np.ceil(float(np.abs(signal_length - frame_length)) / frame_step)) # Make sure that we have at least 1 frame\n\n pad_signal_length = num_frames * frame_step + frame_length\n z = np.zeros((pad_signal_length - signal_length))\n pad_signal = np.append(emphasized_signal, z) # Pad Signal to make sure that all frames have equal number of samples without truncating any samples from the original signal\n\n indices = np.tile(np.arange(0, frame_length), (num_frames, 1)) + np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T\n frames = pad_signal[indices.astype(np.int32, copy=False)]\n\n frames *= np.hamming(frame_length)\n mag_frames = np.absolute(np.fft.rfft(frames, NFFT)) # Magnitude of the FFT\n pow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2)) # Power Spectrum\n\n low_freq_mel = 0\n high_freq_mel = (2595 * np.log10(1 + (sample_rate / 2) / 700)) # Convert Hz to Mel\n mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale\n hz_points = (700 * (10**(mel_points / 2595) - 1)) # Convert Mel to Hz\n bin = np.floor((NFFT + 1) * hz_points / sample_rate)\n\n fbank = np.zeros((nfilt, int(np.floor(NFFT / 2 + 1))))\n for m in range(1, nfilt + 1):\n f_m_minus = int(bin[m - 1]) # left\n f_m = int(bin[m]) # center\n f_m_plus = int(bin[m + 1]) # right\n\n for k in range(f_m_minus, f_m):\n fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])\n for k in range(f_m, f_m_plus):\n fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])\n filter_banks = np.dot(pow_frames, fbank.T)\n filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # Numerical Stability\n filter_banks = 20 * np.log10(filter_banks) # dB\n\n mfcc = dct(filter_banks, type=2, axis=1, norm='ortho')[:, 0: (num_ceps + 1)] # Keep filters 1-13 by default.\n (nframes, ncoeff) = mfcc.shape\n n = np.arange(ncoeff)\n lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter)\n mfcc *= lift\n\n filter_banks -= (np.mean(filter_banks, axis=0) + 1e-8)\n mfcc -= (np.mean(mfcc, axis=0) + 1e-8)\n return mfcc[np.newaxis, :, :] # Create a batch dimension.\n\n\n" ]
[ [ "numpy.dot", "numpy.abs", "numpy.linspace", "numpy.fft.rfft", "numpy.arange", "numpy.finfo", "numpy.sin", "scipy.fftpack.dct", "numpy.append", "numpy.log10", "numpy.hamming", "numpy.mean", "numpy.floor", "numpy.zeros" ] ]
CodeOfCognition/Twitter_Vaxx_Sentiment_Analysis
[ "e3cf50c01acf7ce08810e8fa478a599034377264" ]
[ "src/analyze_sentiment.py" ]
[ "import argparse\nimport json\nimport pandas\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', help='<annotated_data.csv>', required=True)\n args = parser.parse_args()\n\n return args.i\n\n#checks whether there are any invalid labels\ndef check_label_validity(df, labels):\n df_invalid = df.loc[~df.topic.isin(labels)]\n numInvalidEntries = len(df_invalid)\n if numInvalidEntries > 0:\n print(f\"{numInvalidEntries} The following entries have invalid labels:\")\n print(df_invalid)\n return -1\n else:\n return 0\n\ndef get_counts(df, labels):\n counts = dict()\n for l in labels:\n counts[l] = len(df.loc[df.topic == l])\n return counts\n\ndef get_sentiments(df, labels):\n sentiments = dict()\n for l in labels:\n sentiments[l] = dict()\n for l in labels:\n numEntries = len(df.loc[df.topic == l])\n sentiments[l][\"negative\"] = len(df[(df.topic == l) & (df.sentiment == -1)]) / numEntries\n sentiments[l][\"neutral\"] = len(df[(df.topic == l) & (df.sentiment == 0)]) / numEntries\n sentiments[l][\"positive\"] = len(df[(df.topic == l) & (df.sentiment == 1)]) / numEntries\n sentiments[l][\"average\"] = (sentiments[l][\"positive\"] - sentiments[l][\"negative\"])\n return sentiments\n\ndef main():\n # inputFile = parse_args()\n inputFile = \"data/raw/annotated_data.tsv\"\n outputFile = \"data/results/sentiment_results.json\"\n df = pandas.read_csv(inputFile, sep=\"\\t\")\n df[\"topic\"] = df[\"topic\"].str.lower()\n\n labels = [\"g\", \"s\", \"p\", \"a\", \"m\", \"o\", \"ov\"]\n check_label_validity(df, labels)\n counts = get_counts(df, labels)\n sentiment = get_sentiments(df, labels)\n\n output = {\"counts\" : counts, \"sentiment analysis\": sentiment}\n\n with open(outputFile, \"wt\") as f:\n json.dump(output, f)\n\n\n \n\n\n\nif __name__ == \"__main__\":\n main()\n\n" ]
[ [ "pandas.read_csv" ] ]
LiChenyang-Github/mmclassification
[ "557a364d256b069978ad068adc74ce39a2e375c2" ]
[ "mmcls/datasets/pipelines/transforms.py" ]
[ "import math\nimport random\n\nimport mmcv\nimport numpy as np\n\nfrom ..builder import PIPELINES\n\n\n@PIPELINES.register_module()\nclass RandomCrop(object):\n \"\"\"Crop the given Image at a random location.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n padding (int or sequence, optional): Optional padding on each border\n of the image. If a sequence of length 4 is provided, it is used to\n pad left, top, right, bottom borders respectively. If a sequence\n of length 2 is provided, it is used to pad left/right, top/bottom\n borders, respectively. Default: None, which means no padding.\n pad_if_needed (boolean): It will pad the image if smaller than the\n desired size to avoid raising an exception. Since cropping is done\n after padding, the padding seems to be done at a random offset.\n Default: False.\n pad_val (Number | Sequence[Number]): Pixel pad_val value for constant\n fill. If a tuple of length 3, it is used to pad_val R, G, B\n channels respectively. Default: 0.\n padding_mode (str): Type of padding. Should be: constant, edge,\n reflect or symmetric. Default: constant.\n -constant: Pads with a constant value, this value is specified\n with pad_val.\n -edge: pads with the last value at the edge of the image.\n -reflect: Pads with reflection of image without repeating the\n last value on the edge. For example, padding [1, 2, 3, 4]\n with 2 elements on both sides in reflect mode will result\n in [3, 2, 1, 2, 3, 4, 3, 2].\n -symmetric: Pads with reflection of image repeating the last\n value on the edge. For example, padding [1, 2, 3, 4] with\n 2 elements on both sides in symmetric mode will result in\n [2, 1, 1, 2, 3, 4, 4, 3].\n \"\"\"\n\n def __init__(self,\n size,\n padding=None,\n pad_if_needed=False,\n pad_val=0,\n padding_mode='constant'):\n if isinstance(size, (tuple, list)):\n self.size = size\n else:\n self.size = (size, size)\n # check padding mode\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']\n self.padding = padding\n self.pad_if_needed = pad_if_needed\n self.pad_val = pad_val\n self.padding_mode = padding_mode\n\n @staticmethod\n def get_params(img, output_size):\n \"\"\"Get parameters for ``crop`` for a random crop.\n\n Args:\n img (ndarray): Image to be cropped.\n output_size (tuple): Expected output size of the crop.\n\n Returns:\n tuple: Params (xmin, ymin, target_height, target_width) to be\n passed to ``crop`` for random crop.\n \"\"\"\n height = img.shape[0]\n width = img.shape[1]\n target_height, target_width = output_size\n if width == target_width and height == target_height:\n return 0, 0, height, width\n\n xmin = random.randint(0, height - target_height)\n ymin = random.randint(0, width - target_width)\n return xmin, ymin, target_height, target_width\n\n def __call__(self, results):\n \"\"\"\n Args:\n img (ndarray): Image to be cropped.\n \"\"\"\n for key in results.get('img_fields', ['img']):\n img = results[key]\n if self.padding is not None:\n img = mmcv.impad(\n img, padding=self.padding, pad_val=self.pad_val)\n\n # pad the height if needed\n if self.pad_if_needed and img.shape[0] < self.size[0]:\n img = mmcv.impad(\n img,\n padding=(0, self.size[0] - img.shape[0], 0,\n self.size[0] - img.shape[0]),\n pad_val=self.pad_val,\n padding_mode=self.padding_mode)\n\n # pad the width if needed\n if self.pad_if_needed and img.shape[1] < self.size[1]:\n img = mmcv.impad(\n img,\n padding=(self.size[1] - img.shape[1], 0,\n self.size[1] - img.shape[1], 0),\n pad_val=self.pad_val,\n padding_mode=self.padding_mode)\n\n xmin, ymin, height, width = self.get_params(img, self.size)\n results[key] = mmcv.imcrop(\n img,\n np.array([ymin, xmin, ymin + width - 1, xmin + height - 1]))\n return results\n\n def __repr__(self):\n return (self.__class__.__name__ +\n f'(size={self.size}, padding={self.padding})')\n\n\n@PIPELINES.register_module()\nclass RandomResizedCrop(object):\n \"\"\"Crop the given image to random size and aspect ratio.\n\n A crop of random size (default: of 0.08 to 1.0) of the original size and a\n random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio\n is made. This crop is finally resized to given size.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n scale (tuple): Range of the random size of the cropped image compared\n to the original image. Default: (0.08, 1.0).\n ratio (tuple): Range of the random aspect ratio of the cropped image\n compared to the original image. Default: (3. / 4., 4. / 3.).\n interpolation (str): Interpolation method, accepted values are\n 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Default:\n 'bilinear'.\n \"\"\"\n\n def __init__(self,\n size,\n scale=(0.08, 1.0),\n ratio=(3. / 4., 4. / 3.),\n interpolation='bilinear'):\n if isinstance(size, (tuple, list)):\n self.size = size\n else:\n self.size = (size, size)\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n raise ValueError(\"range should be of kind (min, max). \"\n f\"But received {scale}\")\n\n self.interpolation = interpolation\n self.scale = scale\n self.ratio = ratio\n\n @staticmethod\n def get_params(img, scale, ratio):\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (ndarray): Image to be cropped.\n scale (tuple): Range of the random size of the cropped image\n compared to the original image size.\n ratio (tuple): Range of the random aspect ratio of the cropped\n image compared to the original image area.\n\n Returns:\n tuple: Params (xmin, ymin, target_height, target_width) to be\n passed to ``crop`` for a random sized crop.\n \"\"\"\n height = img.shape[0]\n width = img.shape[1]\n area = height * width\n\n for _ in range(10):\n target_area = random.uniform(*scale) * area\n log_ratio = (math.log(ratio[0]), math.log(ratio[1]))\n aspect_ratio = math.exp(random.uniform(*log_ratio))\n\n target_width = int(round(math.sqrt(target_area * aspect_ratio)))\n target_height = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if 0 < target_width <= width and 0 < target_height <= height:\n xmin = random.randint(0, height - target_height)\n ymin = random.randint(0, width - target_width)\n return xmin, ymin, target_height, target_width\n\n # Fallback to central crop\n in_ratio = float(width) / float(height)\n if in_ratio < min(ratio):\n target_width = width\n target_height = int(round(target_width / min(ratio)))\n elif in_ratio > max(ratio):\n target_height = height\n target_width = int(round(target_height * max(ratio)))\n else: # whole image\n target_width = width\n target_height = height\n xmin = (height - target_height) // 2\n ymin = (width - target_width) // 2\n return xmin, ymin, target_height, target_width\n\n def __call__(self, results):\n \"\"\"\n Args:\n img (ndarray): Image to be cropped and resized.\n\n Returns:\n ndarray: Randomly cropped and resized image.\n \"\"\"\n for key in results.get('img_fields', ['img']):\n img = results[key]\n xmin, ymin, target_height, target_width = self.get_params(\n img, self.scale, self.ratio)\n img = mmcv.imcrop(\n img,\n np.array([\n ymin, xmin, ymin + target_width - 1,\n xmin + target_height - 1\n ]))\n results[key] = mmcv.imresize(\n img, tuple(self.size[::-1]), interpolation=self.interpolation)\n return results\n\n def __repr__(self):\n format_string = self.__class__.__name__ + f'(size={self.size}'\n format_string += f', scale={tuple(round(s, 4) for s in self.scale)}'\n format_string += f', ratio={tuple(round(r, 4) for r in self.ratio)}'\n format_string += f', interpolation={self.interpolation})'\n return format_string\n\n\n@PIPELINES.register_module()\nclass RandomGrayscale(object):\n \"\"\"Randomly convert image to grayscale with a probability of gray_prob.\n\n Args:\n gray_prob (float): Probability that image should be converted to\n grayscale. Default: 0.1.\n\n Returns:\n ndarray: Grayscale version of the input image with probability\n gray_prob and unchanged with probability (1-gray_prob).\n - If input image is 1 channel: grayscale version is 1 channel.\n - If input image is 3 channel: grayscale version is 3 channel\n with r == g == b.\n\n \"\"\"\n\n def __init__(self, gray_prob=0.1):\n self.gray_prob = gray_prob\n\n def __call__(self, results):\n \"\"\"\n Args:\n img (ndarray): Image to be converted to grayscale.\n\n Returns:\n ndarray: Randomly grayscaled image.\n \"\"\"\n for key in results.get('img_fields', ['img']):\n img = results[key]\n num_output_channels = img.shape[2]\n if random.random() < self.gray_prob:\n if num_output_channels > 1:\n img = mmcv.rgb2gray(img)[:, :, None]\n results[key] = np.dstack(\n [img for _ in range(num_output_channels)])\n return results\n results[key] = img\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(gray_prob={self.gray_prob})'\n\n\n@PIPELINES.register_module()\nclass RandomFlip(object):\n \"\"\"Flip the image randomly.\n\n Flip the image randomly based on flip probaility and flip direction.\n\n Args:\n flip_prob (float): probability of the image being flipped. Default: 0.5\n direction (str, optional): The flipping direction. Options are\n 'horizontal' and 'vertical'. Default: 'horizontal'.\n \"\"\"\n\n def __init__(self, flip_prob=0.5, direction='horizontal'):\n assert 0 <= flip_prob <= 1\n assert direction in ['horizontal', 'vertical']\n self.flip_prob = flip_prob\n self.direction = direction\n\n def __call__(self, results):\n \"\"\"Call function to flip image.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Flipped results, 'flip', 'flip_direction' keys are added into\n result dict.\n \"\"\"\n flip = True if np.random.rand() < self.flip_prob else False\n results['flip'] = flip\n results['flip_direction'] = self.direction\n if results['flip']:\n # flip image\n for key in results.get('img_fields', ['img']):\n results[key] = mmcv.imflip(\n results[key], direction=results['flip_direction'])\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(flip_prob={self.flip_prob})'\n\n\n@PIPELINES.register_module()\nclass Resize(object):\n \"\"\"Resize images.\n\n Args:\n size (int | tuple): Images scales for resizing (h, w).\n interpolation (str): Interpolation method, accepted values are\n \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\".\n More details can be found in `mmcv.image.geometric`.\n \"\"\"\n\n def __init__(self, size, interpolation='bilinear'):\n assert isinstance(size, int) or (isinstance(size, tuple)\n and len(size) == 2)\n if isinstance(size, int):\n size = (size, size)\n assert size[0] > 0 and size[1] > 0\n assert interpolation in (\"nearest\", \"bilinear\", \"bicubic\", \"area\",\n \"lanczos\")\n\n self.height = size[0]\n self.width = size[1]\n self.size = size\n self.interpolation = interpolation\n\n def _resize_img(self, results):\n for key in results.get('img_fields', ['img']):\n img = mmcv.imresize(\n results[key],\n size=(self.width, self.height),\n interpolation=self.interpolation,\n return_scale=False)\n results[key] = img\n results['img_shape'] = img.shape\n\n def __call__(self, results):\n self._resize_img(results)\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(size={self.size}, '\n repr_str += f'interpolation={self.interpolation})'\n return repr_str\n\n\n@PIPELINES.register_module()\nclass CenterCrop(object):\n \"\"\"Center crop the image.\n\n Args:\n crop_size (int | tuple): Expected size after cropping, (h, w).\n\n Notes:\n If the image is smaller than the crop size, return the original image\n \"\"\"\n\n def __init__(self, crop_size):\n assert isinstance(crop_size, int) or (isinstance(crop_size, tuple)\n and len(crop_size) == 2)\n if isinstance(crop_size, int):\n crop_size = (crop_size, crop_size)\n assert crop_size[0] > 0 and crop_size[1] > 0\n self.crop_size = crop_size\n\n def __call__(self, results):\n crop_height, crop_width = self.crop_size[0], self.crop_size[1]\n for key in results.get('img_fields', ['img']):\n img = results[key]\n img_height, img_width, _ = img.shape\n\n y1 = max(0, int(round((img_height - crop_height) / 2.)))\n x1 = max(0, int(round((img_width - crop_width) / 2.)))\n y2 = min(img_height, y1 + crop_height) - 1\n x2 = min(img_width, x1 + crop_width) - 1\n\n # crop the image\n img = mmcv.imcrop(img, bboxes=np.array([x1, y1, x2, y2]))\n img_shape = img.shape\n results[key] = img\n results['img_shape'] = img_shape\n\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(crop_size={self.crop_size})'\n\n\n@PIPELINES.register_module()\nclass Normalize(object):\n \"\"\"Normalize the image.\n\n Args:\n mean (sequence): Mean values of 3 channels.\n std (sequence): Std values of 3 channels.\n to_rgb (bool): Whether to convert the image from BGR to RGB,\n default is true.\n \"\"\"\n\n def __init__(self, mean, std, to_rgb=True):\n self.mean = np.array(mean, dtype=np.float32)\n self.std = np.array(std, dtype=np.float32)\n self.to_rgb = to_rgb\n\n def __call__(self, results):\n for key in results.get('img_fields', ['img']):\n results[key] = mmcv.imnormalize(results[key], self.mean, self.std,\n self.to_rgb)\n results['img_norm_cfg'] = dict(\n mean=self.mean, std=self.std, to_rgb=self.to_rgb)\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(mean={list(self.mean)}, '\n repr_str += f'std={list(self.std)}, '\n repr_str += f'to_rgb={self.to_rgb})'\n return repr_str\n" ]
[ [ "numpy.array", "numpy.random.rand" ] ]
mdehollander/atlas
[ "805af79ecda60c9a0e86502cacda52206fa9ee10" ]
[ "atlas/report/assembly_report.py" ]
[ "import argparse\nimport os,sys\nf = open(os.devnull, 'w'); sys.stdout = f # block cufflinks to plot strange code\nimport pandas as pd\nimport plotly.graph_objs as go\nfrom plotly import offline\nfrom cufflinks import iplot\nfrom snakemake.utils import report\n\nPLOTLY_PARAMS = dict(\n include_plotlyjs=False, show_link=False, output_type=\"div\", image_height=700\n)\n\natlas_dir= os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))\nsys.path.append(os.path.join(atlas_dir,'scripts'))\nfrom utils.parsers_bbmap import parse_bbmap_log_file\n\n\n\n\ndef parse_map_stats(sample_data, out_tsv):\n stats_df = pd.DataFrame()\n for sample in sample_data.keys():\n df = pd.read_csv(sample_data[sample][\"contig_stats\"],sep='\\t')\n assert df.shape[0] == 1, \"Assumed only one row in file {}; found {}\".format(\n sample_data[sample][\"contig_stats\"], df.iloc[0]\n )\n df = df.iloc[0]\n df.name = sample\n genes_df = pd.read_csv(sample_data[sample][\"gene_table\"], index_col=0,sep='\\t')\n df[\"N_Predicted_Genes\"] = genes_df.shape[0]\n used_reads,mapped_reads= parse_bbmap_log_file(sample_data[sample][\"mapping_log\"])\n df[\"Assembled_Reads\"] = mapped_reads\n df[\"Percent_Assembled_Reads\"] = mapped_reads/used_reads *100\n\n stats_df = stats_df.append(df)\n stats_df = stats_df.loc[:, ~ stats_df.columns.str.startswith(\"scaf_\")]\n stats_df.columns = stats_df.columns.str.replace(\"ctg_\", \"\")\n stats_df.to_csv(out_tsv, sep=\"\\t\")\n return stats_df\n\n\ndef main(samples, contig_stats, gene_tables, mapping_logs, report_out, combined_stats):\n sample_data = {}\n for sample in samples:\n sample_data[sample] = {}\n for c_stat in contig_stats:\n # underscore version was for simplified local testing\n # if \"%s_\" % sample in c_stat:\n if \"%s/\" % sample in c_stat:\n sample_data[sample][\"contig_stats\"] = c_stat\n for g_table in gene_tables:\n # if \"%s_\" % sample in g_table:\n if \"%s/\" % sample in g_table:\n sample_data[sample][\"gene_table\"] = g_table\n for mapping_log in mapping_logs:\n # if \"%s_\" % sample in mapping_log:\n if \"%s/\" % sample in mapping_log:\n sample_data[sample][\"mapping_log\"] = mapping_log\n df = parse_map_stats(sample_data, combined_stats)\n div = {}\n labels = {\n \"Percent_Assembled_Reads\": \"Percent of Assembled Reads\",\n \"contig_bp\": \"Total BP\",\n \"n_contigs\": \"Contigs (count)\",\n \"N_Predicted_Genes\": \"Predicted Genes (count)\",\n }\n for variable in [\n \"Percent_Assembled_Reads\", \"contig_bp\", \"n_contigs\", \"N_Predicted_Genes\"\n ]:\n y_axis_label = labels[variable]\n div[variable] = offline.plot(\n df[variable].iplot(\n asFigure=True,\n kind=\"bar\",\n xTitle=\"Samples\",\n layout=go.Layout(\n xaxis=dict(tickangle=45), yaxis=dict(title=y_axis_label)\n ),\n ),\n **PLOTLY_PARAMS,\n )\n div[\"N50\"] = offline.plot(\n df[[\"N50\", \"N90\"]].iplot(\n asFigure=True,\n kind=\"bar\",\n xTitle=\"Samples\",\n layout=go.Layout(xaxis=dict(tickangle=45), yaxis=(dict(title=\"Bases\"))),\n ),\n **PLOTLY_PARAMS,\n )\n report_str = \"\"\"\n\n.. raw:: html\n\n <script src=\"https://cdn.plot.ly/plotly-latest.min.js\"></script>\n\n\n=============================================================\nATLAS_ - Assembly Summary\n=============================================================\n\n.. _ATLAS: https://github.com/metagenome-atlas/atlas\n\n.. contents::\n :backlinks: none\n\n\nSummary\n-------\n\nN50\n***\n\n.. raw:: html\n\n {div[N50]}\n\n\nAssembly Length\n***************\n\n.. raw:: html\n\n {div[contig_bp]}\n\n\nNumber of Contigs\n*****************\n\n.. raw:: html\n\n {div[n_contigs]}\n\n\nNumber of Predicted Genes\n*************************\n\n.. raw:: html\n\n {div[N_Predicted_Genes]}\n\n\nPercent of Assembled Reads\n**************************\n\n.. raw:: html\n\n {div[Percent_Assembled_Reads]}\n\n\nFor more information see Table_1_\n\n\nDownloads\n---------\n\n\"\"\"\n report(report_str, report_out, Table_1=combined_stats, stylesheet=os.path.join(atlas_dir,'report', \"report.css\"))\n\n\n\n\nif __name__ == \"__main__\":\n\n try:\n main(\n samples=snakemake.params.samples,\n contig_stats=snakemake.input.contig_stats,\n gene_tables=snakemake.input.gene_tables,\n mapping_logs=snakemake.input.mapping_logs,\n report_out=snakemake.output.report,\n combined_stats=snakemake.output.combined_contig_stats\n )\n\n except NameError:\n\n p = argparse.ArgumentParser()\n p.add_argument(\"--samples\", nargs=\"+\")\n p.add_argument(\"--contig-stats\", nargs=\"+\")\n p.add_argument(\"--gene-tables\", nargs=\"+\")\n p.add_argument(\"--mapping-logs\", nargs=\"+\")\n p.add_argument(\"--report-out\")\n p.add_argument(\"--combined-stats\")\n args = p.parse_args()\n main(\n args.samples,\n args.contig_stats,\n args.gene_tables,\n args.mapping_logs,\n args.report_out,\n args.combined_stats,\n )\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
klarman-cell-observatory/pegasus
[ "d308540453cd1614ae564e466060fd0d332d6b7e" ]
[ "pegasus/tools/batch_correction.py" ]
[ "import time\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_categorical_dtype\nfrom scipy.sparse import issparse\nfrom pegasusio import MultimodalData\n\nfrom pegasus.tools import estimate_feature_statistics, select_features, X_from_rep\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom pegasusio import timer\n\n\n\ndef set_group_attribute(data: MultimodalData, attribute_string: str) -> None:\n \"\"\"Set group attributes used in batch correction.\n\n Batch correction assumes the differences in gene expression between channels are due to batch effects. However, in many cases, we know that channels can be partitioned into several groups and each group is biologically different from others. In this case, *pegasus* will only perform batch correction for channels within each group.\n\n Parameters\n ----------\n data: ``pegasusio.MultimodalData``\n Annotated data matrix with rows for cells and columns for genes.\n\n attribute_string: ``str``\n Attributes used to construct groups:\n\n * ``None``\n Assume all channels are from one group.\n\n * ``attr``\n Define groups by sample attribute ``attr``, which is a keyword in ``data.obs``.\n\n * ``att1+att2+...+attrn``\n Define groups by the Cartesian product of these *n* attributes, which are keywords in ``data.obs``.\n\n * ``attr=value_11,...value_1n_1;value_21,...value_2n_2;...;value_m1,...,value_mn_m``\n In this form, there will be *(m+1)* groups. A cell belongs to group *i* (*i > 1*) if and only if its sample attribute ``attr``, which is a keyword in ``data.obs``, has a value among ``value_i1``, ... ``value_in_i``. A cell belongs to group 0 if it does not belong to any other groups.\n\n Returns\n -------\n None\n\n Update ``data.obs``:\n\n * ``data.obs[\"Group\"]``: Group ID for each cell.\n\n Examples\n --------\n\n >>> pg.set_group_attribute(data, attr_string = \"Individual\")\n\n >>> pg.set_group_attribute(data, attr_string = \"Individual+assignment\")\n\n >>> pg.set_group_attribute(data, attr_string = \"Channel=1,3,5;2,4,6,8\")\n \"\"\"\n\n if attribute_string.find(\"=\") >= 0:\n attr, value_str = attribute_string.split(\"=\")\n assert attr in data.obs.columns\n values = value_str.split(\";\")\n data.obs[\"Group\"] = \"0\"\n for group_id, value in enumerate(values):\n vals = value.split(\",\")\n idx = np.isin(data.obs[attr], vals)\n data.obs.loc[idx, \"Group\"] = str(group_id + 1)\n elif attribute_string.find(\"+\") >= 0:\n attrs = attribute_string.split(\"+\")\n assert np.isin(attrs, data.obs.columns).sum() == len(attrs)\n data.obs[\"Group\"] = data.obs[attrs].apply(lambda x: \"+\".join(x), axis=1)\n else:\n assert attribute_string in data.obs.columns\n data.obs[\"Group\"] = data.obs[attribute_string]\n\n\ndef estimate_adjustment_matrices(data: MultimodalData) -> bool:\n \"\"\" Estimate adjustment matrices\n \"\"\"\n\n if \"plus\" in data.varm.keys() or \"muls\" in data.varm.keys():\n # This only happens if this is for subclustering. Thus do not calculate factors, using factors calculated from parent for batch correction.\n assert \"plus\" in data.varm.keys() and \"muls\" in data.varm.keys()\n return True\n\n if (\"gmeans\" not in data.varm) or (\"gstds\" not in data.varm):\n estimate_feature_statistics(data, True)\n\n if data.uns[\"Channels\"].size == 1:\n logger.warning(\n \"Warning: data only contains 1 channel. Batch correction disabled!\"\n )\n return False\n\n nchannel = data.uns[\"Channels\"].size\n\n plus = np.zeros((data.shape[1], nchannel))\n muls = np.zeros((data.shape[1], nchannel))\n\n ncells = data.uns[\"ncells\"]\n means = data.varm[\"means\"]\n partial_sum = data.varm[\"partial_sum\"]\n gmeans = data.varm[\"gmeans\"]\n gstds = data.varm[\"gstds\"]\n c2gid = data.uns[\"c2gid\"]\n for i in range(data.uns[\"Channels\"].size):\n if ncells[i] > 1:\n muls[:, i] = (partial_sum[:, i] / (ncells[i] - 1.0)) ** 0.5\n outliers = muls[:, i] < 1e-6\n normals = np.logical_not(outliers)\n muls[outliers, i] = 1.0\n muls[normals, i] = gstds[normals, c2gid[i]] / muls[normals, i]\n plus[:, i] = gmeans[:, c2gid[i]] - muls[:, i] * means[:, i]\n\n data.varm[\"plus\"] = plus\n data.varm[\"muls\"] = muls\n\n return True\n\ndef correct_batch_effects(data: MultimodalData, keyword: str, features: str = None) -> None:\n \"\"\" Apply calculated plus and muls to correct batch effects for a dense matrix\n \"\"\"\n X = data.uns[keyword]\n m = X.shape[1]\n if features is not None:\n selected = data.var[features].values\n plus = data.varm[\"plus\"][selected, :]\n muls = data.varm[\"muls\"][selected, :]\n else:\n selected = np.ones(data.shape[1], dtype=bool)\n plus = data.varm[\"plus\"]\n muls = data.varm[\"muls\"]\n\n for i, channel in enumerate(data.uns[\"Channels\"]):\n idx = np.isin(data.obs[\"Channel\"], channel)\n if idx.sum() == 0:\n continue\n X[idx] = X[idx] * np.reshape(muls[:, i], newshape=(1, m)) + np.reshape(\n plus[:, i], newshape=(1, m)\n )\n data.uns[\"_tmp_ls_\" + str(features)] = True\n\n\ndef correct_batch(data: MultimodalData, features: str = None) -> None:\n \"\"\"Batch correction on data using Location-Scale (L/S) Adjustment method. ([Li-and-Wong03]_, [Li20]_). If L/S adjustment method is used, users must call this function every time before they call the pca function.\n\n Parameters\n ----------\n data: ``pegasusio.MultimodalData``\n Annotated data matrix with rows for cells and columns for genes.\n\n features: `str`, optional, default: ``None``\n Features to be included in batch correction computation. If ``None``, simply consider all features.\n\n Returns\n -------\n ``None``\n\n Update ``data.X`` by the corrected count matrix.\n\n Examples\n --------\n >>> pg.correct_batch(data, features = \"highly_variable_features\")\n \"\"\"\n\n tot_seconds = 0.0\n\n # estimate adjustment parameters\n start = time.perf_counter()\n can_correct = estimate_adjustment_matrices(data)\n end = time.perf_counter()\n tot_seconds += end - start\n logger.info(\"Adjustment parameters are estimated.\")\n\n # select dense matrix\n keyword = select_features(data, features=features, standardize=False, max_value=None) # do not standardize or truncate max_value\n logger.info(\"Features are selected.\")\n\n if can_correct:\n start = time.perf_counter()\n correct_batch_effects(data, keyword, features)\n end = time.perf_counter()\n tot_seconds += end - start\n logger.info(\n \"Batch correction is finished. Time spent = {:.2f}s.\".format(tot_seconds)\n )\n\n\n@timer(logger=logger)\ndef run_harmony(\n data: MultimodalData,\n batch: str = 'Channel',\n rep: str = 'pca',\n n_jobs: int = -1,\n n_clusters: int = None,\n random_state: int = 0,\n use_gpu: bool = False,\n) -> str:\n \"\"\"Batch correction on PCs using Harmony.\n\n This is a wrapper of `harmony-pytorch <https://github.com/lilab-bcb/harmony-pytorch>`_ package, which is a Pytorch implementation of Harmony algorithm [Korsunsky19]_.\n\n Parameters\n ----------\n data: ``MultimodalData``.\n Annotated data matrix with rows for cells and columns for genes.\n\n batch: ``str``, optional, default: ``\"Channel\"``.\n Which attribute in data.obs field represents batches, default is \"Channel\".\n\n rep: ``str``, optional, default: ``\"pca\"``.\n Which representation to use as input of Harmony, default is PCA.\n\n n_jobs : ``int``, optional, default: ``-1``.\n Number of threads to use in Harmony. ``-1`` refers to using all physical CPU cores.\n\n n_clusters: ``int``, optional, default: ``None``.\n Number of Harmony clusters. Default is ``None``, which asks Harmony to estimate this number from the data.\n\n random_state: ``int``, optional, default: ``0``.\n Seed for random number generator\n\n use_gpu: ``bool``, optional, default: ``False``.\n If ``True``, use GPU if available. Otherwise, use CPU only.\n\n Returns\n -------\n out_rep: ``str``\n The keyword in ``data.obsm`` referring to the embedding calculated by Harmony algorithm.\n\n This keyword is ``rep + '_harmony'``, where ``rep`` is the input parameter above.\n\n Update ``data.obsm``:\n * ``data.obsm['X_' + out_rep]``: The embedding calculated by Harmony algorithm.\n\n Examples\n --------\n >>> pg.run_harmony(data, rep = \"pca\", n_jobs = 10, random_state = 25)\n \"\"\"\n if not is_categorical_dtype(data.obs[batch]):\n data.obs[batch] = pd.Categorical(data.obs[batch])\n if data.obs[batch].cat.categories.size == 1:\n logger.warning(\"Warning: data only contains 1 batch. Cannot apply Harmony!\")\n return rep\n\n try:\n from harmony import harmonize\n except ImportError as e:\n import sys\n logger.error(f\"{e}\\nNeed Harmony! Try 'pip install harmony-pytorch'.\")\n sys.exit(-1)\n\n logger.info(\"Start integration using Harmony.\")\n out_rep = rep + '_harmony'\n data.obsm['X_' + out_rep] = harmonize(\n X_from_rep(data, rep),\n data.obs,\n batch,\n n_clusters = n_clusters,\n n_jobs = n_jobs,\n random_state = random_state,\n use_gpu = use_gpu,\n )\n return out_rep\n\n\n@timer(logger=logger)\ndef run_scanorama(\n data: MultimodalData,\n batch: str = 'Channel',\n n_components: int = 50,\n features: str = \"highly_variable_features\",\n standardize: bool = True,\n max_value: float = 10.0,\n random_state: int = 0,\n) -> str:\n \"\"\"Batch correction using Scanorama.\n\n This is a wrapper of `Scanorama <https://github.com/brianhie/scanorama>`_ package. See [Hie19]_ for details on the algorithm.\n\n Parameters\n ----------\n data: ``MultimodalData``.\n Annotated data matrix with rows for cells and columns for genes.\n\n batch: ``str``, optional, default: ``\"Channel\"``.\n Which attribute in data.obs field represents batches, default is \"Channel\".\n\n n_components: ``int``, optional default: ``50``.\n Number of integrated embedding components to keep. This sets Scanorama's dimred parameter.\n\n features: ``str``, optional, default: ``\"highly_variable_features\"``.\n Keyword in ``data.var`` to specify features used for Scanorama.\n\n standardize: ``bool``, optional, default: ``True``.\n Whether to scale the data to unit variance and zero mean.\n\n max_value: ``float``, optional, default: ``10``.\n The threshold to truncate data after scaling. If ``None``, do not truncate.\n\n random_state: ``int``, optional, default: ``0``.\n Seed for random number generator.\n\n Returns\n -------\n out_rep: ``str``\n The keyword in ``data.obsm`` referring to the embedding calculated by Scanorama algorithm. out_rep is always equal to \"scanorama\"\n\n Update ``data.obsm``:\n * ``data.obsm['X_scanorama']``: The embedding calculated by Scanorama algorithm.\n\n Examples\n --------\n >>> pg.run_scanorama(data, random_state = 25)\n \"\"\"\n if not is_categorical_dtype(data.obs[batch]):\n data.obs[batch] = pd.Categorical(data.obs[batch])\n if data.obs[batch].cat.categories.size == 1:\n logger.warning(\"Warning: data only contains 1 batch. Cannot apply Scanorama!\")\n return 'pca'\n\n try:\n from scanorama import integrate\n except ImportError as e:\n import sys\n logger.error(f\"{e}\\nNeed Scanorama! Try 'pip install scanorama'.\")\n sys.exit(-1)\n\n logger.info(\"Start integration using Scanorama.\")\n\n rep = 'scanorama'\n keyword = select_features(data, features=features, standardize=standardize, max_value=max_value)\n X = data.uns[keyword]\n\n datasets = []\n for channel in data.obs[batch].cat.categories:\n idx = (data.obs[batch] == channel).values\n assert idx.sum() > 0\n datasets.append(X[idx, :])\n genes_list = [[str(i) for i in range(X.shape[1])]] * data.obs[batch].cat.categories.size\n\n integrated, genes = integrate(datasets, genes_list, dimred = n_components, seed = random_state)\n data.obsm[f'X_{rep}'] = np.concatenate(integrated, axis = 0)\n\n return rep\n" ]
[ [ "numpy.logical_not", "pandas.api.types.is_categorical_dtype", "numpy.reshape", "pandas.Categorical", "numpy.ones", "numpy.concatenate", "numpy.zeros", "numpy.isin" ] ]
TomHodson/scipy
[ "8de4fa75b126416627978baaf137c05cb00f847e" ]
[ "scipy/stats/__init__.py" ]
[ "\"\"\"\n.. _statsrefmanual:\n\n==========================================\nStatistical functions (:mod:`scipy.stats`)\n==========================================\n\n.. currentmodule:: scipy.stats\n\nThis module contains a large number of probability distributions as\nwell as a growing library of statistical functions.\n\nEach univariate distribution is an instance of a subclass of `rv_continuous`\n(`rv_discrete` for discrete distributions):\n\n.. autosummary::\n :toctree: generated/\n\n rv_continuous\n rv_discrete\n rv_histogram\n\nContinuous distributions\n========================\n\n.. autosummary::\n :toctree: generated/\n\n alpha -- Alpha\n anglit -- Anglit\n arcsine -- Arcsine\n argus -- Argus\n beta -- Beta\n betaprime -- Beta Prime\n bradford -- Bradford\n burr -- Burr (Type III)\n burr12 -- Burr (Type XII)\n cauchy -- Cauchy\n chi -- Chi\n chi2 -- Chi-squared\n cosine -- Cosine\n crystalball -- Crystalball\n dgamma -- Double Gamma\n dweibull -- Double Weibull\n erlang -- Erlang\n expon -- Exponential\n exponnorm -- Exponentially Modified Normal\n exponweib -- Exponentiated Weibull\n exponpow -- Exponential Power\n f -- F (Snecdor F)\n fatiguelife -- Fatigue Life (Birnbaum-Saunders)\n fisk -- Fisk\n foldcauchy -- Folded Cauchy\n foldnorm -- Folded Normal\n genlogistic -- Generalized Logistic\n gennorm -- Generalized normal\n genpareto -- Generalized Pareto\n genexpon -- Generalized Exponential\n genextreme -- Generalized Extreme Value\n gausshyper -- Gauss Hypergeometric\n gamma -- Gamma\n gengamma -- Generalized gamma\n genhalflogistic -- Generalized Half Logistic\n geninvgauss -- Generalized Inverse Gaussian\n gilbrat -- Gilbrat\n gompertz -- Gompertz (Truncated Gumbel)\n gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I\n gumbel_l -- Left Sided Gumbel, etc.\n halfcauchy -- Half Cauchy\n halflogistic -- Half Logistic\n halfnorm -- Half Normal\n halfgennorm -- Generalized Half Normal\n hypsecant -- Hyperbolic Secant\n invgamma -- Inverse Gamma\n invgauss -- Inverse Gaussian\n invweibull -- Inverse Weibull\n johnsonsb -- Johnson SB\n johnsonsu -- Johnson SU\n kappa4 -- Kappa 4 parameter\n kappa3 -- Kappa 3 parameter\n ksone -- Distribution of Kolmogorov-Smirnov one-sided test statistic\n kstwo -- Distribution of Kolmogorov-Smirnov two-sided test statistic\n kstwobign -- Limiting Distribution of scaled Kolmogorov-Smirnov two-sided test statistic.\n laplace -- Laplace\n levy -- Levy\n levy_l\n levy_stable\n logistic -- Logistic\n loggamma -- Log-Gamma\n loglaplace -- Log-Laplace (Log Double Exponential)\n lognorm -- Log-Normal\n loguniform -- Log-Uniform\n lomax -- Lomax (Pareto of the second kind)\n maxwell -- Maxwell\n mielke -- Mielke's Beta-Kappa\n moyal -- Moyal\n nakagami -- Nakagami\n ncx2 -- Non-central chi-squared\n ncf -- Non-central F\n nct -- Non-central Student's T\n norm -- Normal (Gaussian)\n norminvgauss -- Normal Inverse Gaussian\n pareto -- Pareto\n pearson3 -- Pearson type III\n powerlaw -- Power-function\n powerlognorm -- Power log normal\n powernorm -- Power normal\n rdist -- R-distribution\n rayleigh -- Rayleigh\n rice -- Rice\n recipinvgauss -- Reciprocal Inverse Gaussian\n semicircular -- Semicircular\n skewnorm -- Skew normal\n t -- Student's T\n trapz -- Trapezoidal\n triang -- Triangular\n truncexpon -- Truncated Exponential\n truncnorm -- Truncated Normal\n tukeylambda -- Tukey-Lambda\n uniform -- Uniform\n vonmises -- Von-Mises (Circular)\n vonmises_line -- Von-Mises (Line)\n wald -- Wald\n weibull_min -- Minimum Weibull (see Frechet)\n weibull_max -- Maximum Weibull (see Frechet)\n wrapcauchy -- Wrapped Cauchy\n\nMultivariate distributions\n==========================\n\n.. autosummary::\n :toctree: generated/\n\n multivariate_normal -- Multivariate normal distribution\n matrix_normal -- Matrix normal distribution\n dirichlet -- Dirichlet\n wishart -- Wishart\n invwishart -- Inverse Wishart\n multinomial -- Multinomial distribution\n special_ortho_group -- SO(N) group\n ortho_group -- O(N) group\n unitary_group -- U(N) group\n random_correlation -- random correlation matrices\n\nDiscrete distributions\n======================\n\n.. autosummary::\n :toctree: generated/\n\n bernoulli -- Bernoulli\n betabinom -- Beta-Binomial\n binom -- Binomial\n boltzmann -- Boltzmann (Truncated Discrete Exponential)\n dlaplace -- Discrete Laplacian\n geom -- Geometric\n hypergeom -- Hypergeometric\n logser -- Logarithmic (Log-Series, Series)\n nbinom -- Negative Binomial\n nhypergeom -- Negative Hypergeometric\n planck -- Planck (Discrete Exponential)\n poisson -- Poisson\n randint -- Discrete Uniform\n skellam -- Skellam\n zipf -- Zipf\n yulesimon -- Yule-Simon\n\nAn overview of statistical functions is given below.\nSeveral of these functions have a similar version in\n`scipy.stats.mstats` which work for masked arrays.\n\nSummary statistics\n==================\n\n.. autosummary::\n :toctree: generated/\n\n describe -- Descriptive statistics\n gmean -- Geometric mean\n hmean -- Harmonic mean\n kurtosis -- Fisher or Pearson kurtosis\n mode -- Modal value\n moment -- Central moment\n skew -- Skewness\n kstat --\n kstatvar --\n tmean -- Truncated arithmetic mean\n tvar -- Truncated variance\n tmin --\n tmax --\n tstd --\n tsem --\n variation -- Coefficient of variation\n find_repeats\n trim_mean\n gstd -- Geometric Standard Deviation\n iqr\n sem\n bayes_mvs\n mvsdist\n entropy\n median_absolute_deviation\n median_abs_deviation\n\nFrequency statistics\n====================\n\n.. autosummary::\n :toctree: generated/\n\n cumfreq\n itemfreq\n percentileofscore\n scoreatpercentile\n relfreq\n\n.. autosummary::\n :toctree: generated/\n\n binned_statistic -- Compute a binned statistic for a set of data.\n binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.\n binned_statistic_dd -- Compute a d-D binned statistic for a set of data.\n\nCorrelation functions\n=====================\n\n.. autosummary::\n :toctree: generated/\n\n f_oneway\n pearsonr\n spearmanr\n pointbiserialr\n kendalltau\n weightedtau\n linregress\n siegelslopes\n theilslopes\n multiscale_graphcorr\n\nStatistical tests\n=================\n\n.. autosummary::\n :toctree: generated/\n\n ttest_1samp\n ttest_ind\n ttest_ind_from_stats\n ttest_rel\n chisquare\n cramervonmises\n power_divergence\n kstest\n ks_1samp\n ks_2samp\n epps_singleton_2samp\n mannwhitneyu\n tiecorrect\n rankdata\n ranksums\n wilcoxon\n kruskal\n friedmanchisquare\n brunnermunzel\n combine_pvalues\n jarque_bera\n\n.. autosummary::\n :toctree: generated/\n\n ansari\n bartlett\n levene\n shapiro\n anderson\n anderson_ksamp\n binom_test\n fligner\n median_test\n mood\n skewtest\n kurtosistest\n normaltest\n\nTransformations\n===============\n\n.. autosummary::\n :toctree: generated/\n\n boxcox\n boxcox_normmax\n boxcox_llf\n yeojohnson\n yeojohnson_normmax\n yeojohnson_llf\n obrientransform\n sigmaclip\n trimboth\n trim1\n zmap\n zscore\n\nStatistical distances\n=====================\n\n.. autosummary::\n :toctree: generated/\n\n wasserstein_distance\n energy_distance\n\nRandom variate generation\n=========================\n\n.. autosummary::\n :toctree: generated/\n\n rvs_ratio_uniforms\n\nCircular statistical functions\n==============================\n\n.. autosummary::\n :toctree: generated/\n\n circmean\n circvar\n circstd\n\nContingency table functions\n===========================\n\n.. autosummary::\n :toctree: generated/\n\n chi2_contingency\n contingency.expected_freq\n contingency.margins\n fisher_exact\n\nPlot-tests\n==========\n\n.. autosummary::\n :toctree: generated/\n\n ppcc_max\n ppcc_plot\n probplot\n boxcox_normplot\n yeojohnson_normplot\n\n\nMasked statistics functions\n===========================\n\n.. toctree::\n\n stats.mstats\n\n\nUnivariate and multivariate kernel density estimation\n=====================================================\n\n.. autosummary::\n :toctree: generated/\n\n gaussian_kde\n\nWarnings used in :mod:`scipy.stats`\n===================================\n\n.. autosummary::\n :toctree: generated/\n\n F_onewayConstantInputWarning\n F_onewayBadInputSizesWarning\n PearsonRConstantInputWarning\n PearsonRNearConstantInputWarning\n SpearmanRConstantInputWarning\n\nFor many more stat related functions install the software R and the\ninterface package rpy.\n\n\"\"\"\nfrom .stats import *\nfrom .distributions import *\nfrom .morestats import *\nfrom ._binned_statistic import *\nfrom .kde import gaussian_kde\nfrom . import mstats\nfrom .contingency import chi2_contingency\nfrom ._multivariate import *\n\n__all__ = [s for s in dir() if not s.startswith(\"_\")] # Remove dunders.\n\nfrom scipy._lib._testutils import PytestTester\ntest = PytestTester(__name__)\ndel PytestTester\n" ]
[ [ "scipy._lib._testutils.PytestTester" ] ]
YZNIU/Cirq
[ "6c996e9fd57d0898c31c8ebbe7fe23f88aa96cf9" ]
[ "cirq/ops/partial_reflection_gate_test.py" ]
[ "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Union\n\nimport numpy as np\n\nimport cirq\nfrom cirq.ops.partial_reflection_gate import PartialReflectionGate\nfrom cirq.study import ParamResolver\nfrom cirq.testing import EqualsTester\nfrom cirq.value import Symbol\n\n\nclass DummyGate(PartialReflectionGate):\n\n def _with_half_turns(self, half_turns: Union[Symbol, float] = 1.0):\n return DummyGate(half_turns=half_turns)\n\n def text_diagram_wire_symbols(self, **kwargs):\n return 'D',\n\n def _reflection_matrix(self):\n return np.diag([1, -1])\n\n\ndef test_partial_reflection_gate_init():\n assert DummyGate(half_turns=0.5).half_turns == 0.5\n assert DummyGate(half_turns=5).half_turns == 1\n\n\ndef test_partial_reflection_gate_eq():\n eq = EqualsTester()\n eq.add_equality_group(DummyGate(), DummyGate(half_turns=1))\n eq.add_equality_group(DummyGate(half_turns=3.5), DummyGate(half_turns=-0.5))\n eq.make_equality_pair(lambda: DummyGate(half_turns=Symbol('a')))\n eq.make_equality_pair(lambda: DummyGate(half_turns=Symbol('b')))\n eq.make_equality_pair(lambda: DummyGate(half_turns=0))\n eq.add_equality_group(DummyGate(half_turns=0.5),\n DummyGate(rads=np.pi / 2),\n DummyGate(degs=90))\n\n\ndef test_partial_reflection_gate_extrapolate():\n assert (DummyGate(half_turns=1).extrapolate_effect(0.5) ==\n DummyGate(half_turns=0.5))\n assert DummyGate()**-0.25 == DummyGate(half_turns=1.75)\n\n\ndef test_partial_reflection_gate_inverse():\n assert DummyGate().inverse() == DummyGate(half_turns=-1)\n assert DummyGate(half_turns=0.25).inverse() == DummyGate(half_turns=-0.25)\n\n\ndef test_partial_reflection_as_self_inverse():\n ex = cirq.Extensions()\n h0 = DummyGate(half_turns=0)\n h1 = DummyGate(half_turns=1)\n\n assert ex.try_cast(h1, cirq.SelfInverseGate) is h1\n assert ex.try_cast(h0, cirq.SelfInverseGate) is h0\n assert ex.try_cast(DummyGate(half_turns=0.5),\n cirq.SelfInverseGate) is None\n assert ex.try_cast(DummyGate(half_turns=-0.5),\n cirq.SelfInverseGate) is None\n\n\ndef test_partial_reflection_gate_str():\n assert str(DummyGate(half_turns=.25)) == 'D**0.25'\n\n\ndef test_partial_reflection_gate_trace_bound():\n assert DummyGate(half_turns=.001).trace_distance_bound() < 0.01\n assert DummyGate(half_turns=cirq.Symbol('a')).trace_distance_bound() >= 1\n\n\ndef test_partial_reflection_gate_with_parameters_resolved_by():\n gate = DummyGate(half_turns=Symbol('a'))\n resolver = ParamResolver({'a': 0.1})\n resolved_gate = gate.with_parameters_resolved_by(resolver)\n assert resolved_gate.half_turns == 0.1\n\n\ndef test_partial_reflection_gate_matrix():\n np.testing.assert_allclose(DummyGate(half_turns=1).matrix(),\n np.diag([1, -1]),\n atol=1e-8)\n\n np.testing.assert_allclose(DummyGate(half_turns=0.5).matrix(),\n np.diag([1, 1j]),\n atol=1e-8)\n\n np.testing.assert_allclose(DummyGate(half_turns=-0.5).matrix(),\n np.diag([1, -1j]),\n atol=1e-8)\n" ]
[ [ "numpy.diag" ] ]
danielwilczak101/EasyNN
[ "89319e974c324dda228c6ecff7c39d723eda3ca2" ]
[ "EasyNN/model/activation/abc.py" ]
[ "from __future__ import annotations\nfrom abc import ABC\nimport numpy as np\nfrom EasyNN.model.abc import Model_1D\nfrom EasyNN.typing import Array1D\n\n\nclass Activation(Model_1D, ABC):\n \"\"\"\n Activation Models typically involve vectorized\n functions applied over the last dimension.\n \"\"\"\n _parameters: Array1D = np.empty(0, dtype=float)\n _derivatives: Array1D = np.empty(0, dtype=float)\n\n def __setup__(self: Activation) -> None:\n pass\n" ]
[ [ "numpy.empty" ] ]
Europium248/captum
[ "ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc", "ac02fae2651b8d68a44bcb9d03b91cbb3959f2fc" ]
[ "captum/attr/_core/guided_grad_cam.py", "tests/attr/neuron/test_neuron_conductance.py" ]
[ "#!/usr/bin/env python3\nimport warnings\nfrom typing import Any, List, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Module\n\nfrom captum.log import log_usage\n\nfrom ..._utils.common import _format_input, _format_output, _is_tuple\nfrom ..._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric\nfrom .._utils.attribution import GradientAttribution, LayerAttribution\nfrom .guided_backprop_deconvnet import GuidedBackprop\nfrom .layer.grad_cam import LayerGradCam\n\n\nclass GuidedGradCam(GradientAttribution):\n r\"\"\"\n Computes element-wise product of guided backpropagation attributions\n with upsampled (non-negative) GradCAM attributions.\n GradCAM attributions are computed with respect to the layer\n provided in the constructor, and attributions\n are upsampled to match the input size. GradCAM is designed for\n convolutional neural networks, and is usually applied to the last\n convolutional layer.\n\n Note that if multiple input tensors are provided, attributions for\n each input tensor are computed by upsampling the GradCAM\n attributions to match that input's dimensions. If interpolation is\n not possible for the input tensor dimensions and interpolation mode,\n then an empty tensor is returned in the attributions for the\n corresponding position of that input tensor. This can occur if the\n input tensor does not have the same number of dimensions as the chosen\n layer's output or is not either 3D, 4D or 5D.\n\n Note that attributions are only meaningful for input tensors\n which are spatially alligned with the chosen layer, e.g. an input\n image tensor for a convolutional layer.\n\n More details regarding GuidedGradCAM can be found in the original\n GradCAM paper here:\n https://arxiv.org/pdf/1610.02391.pdf\n\n Warning: Ensure that all ReLU operations in the forward function of the\n given model are performed using a module (nn.module.ReLU).\n If nn.functional.ReLU is used, gradients are not overridden appropriately.\n \"\"\"\n\n def __init__(\n self, model: Module, layer: Module, device_ids: Union[None, List[int]] = None\n ) -> None:\n r\"\"\"\n Args:\n\n model (nn.Module): The reference to PyTorch model instance.\n layer (torch.nn.Module): Layer for which GradCAM attributions are computed.\n Currently, only layers with a single tensor output are\n supported.\n device_ids (list(int)): Device ID list, necessary only if forward_func\n applies a DataParallel model. This allows reconstruction of\n intermediate outputs from batched results across devices.\n If forward_func is given as the DataParallel model itself,\n then it is not necessary to provide this argument.\n \"\"\"\n GradientAttribution.__init__(self, model)\n self.grad_cam = LayerGradCam(model, layer, device_ids)\n self.guided_backprop = GuidedBackprop(model)\n\n @log_usage()\n def attribute(\n self,\n inputs: TensorOrTupleOfTensorsGeneric,\n target: TargetType = None,\n additional_forward_args: Any = None,\n interpolate_mode: str = \"nearest\",\n attribute_to_layer_input: bool = False,\n ) -> TensorOrTupleOfTensorsGeneric:\n r\"\"\"\n Args:\n\n inputs (tensor or tuple of tensors): Input for which attributions\n are computed. If forward_func takes a single\n tensor as input, a single input tensor should be provided.\n If forward_func takes multiple tensors as input, a tuple\n of the input tensors should be provided. It is assumed\n that for all given input tensors, dimension 0 corresponds\n to the number of examples, and if multiple input tensors\n are provided, the examples must be aligned appropriately.\n target (int, tuple, tensor or list, optional): Output indices for\n which gradients are computed (for classification cases,\n this is usually the target class).\n If the network returns a scalar value per example,\n no target index is necessary.\n For general 2D outputs, targets can be either:\n\n - a single integer or a tensor containing a single\n integer, which is applied to all input examples\n\n - a list of integers or a 1D tensor, with length matching\n the number of examples in inputs (dim 0). Each integer\n is applied as the target for the corresponding example.\n\n For outputs with > 2 dimensions, targets can be either:\n\n - A single tuple, which contains #output_dims - 1\n elements. This target index is applied to all examples.\n\n - A list of tuples with length equal to the number of\n examples in inputs (dim 0), and each tuple containing\n #output_dims - 1 elements. Each tuple is applied as the\n target for the corresponding example.\n\n Default: None\n additional_forward_args (any, optional): If the forward function\n requires additional arguments other than the inputs for\n which attributions should not be computed, this argument\n can be provided. It must be either a single additional\n argument of a Tensor or arbitrary (non-tuple) type or a\n tuple containing multiple additional arguments including\n tensors or any arbitrary python types. These arguments\n are provided to forward_func in order following the\n arguments in inputs.\n Note that attributions are not computed with respect\n to these arguments.\n Default: None\n interpolate_mode (str, optional): Method for interpolation, which\n must be a valid input interpolation mode for\n torch.nn.functional. These methods are\n \"nearest\", \"area\", \"linear\" (3D-only), \"bilinear\"\n (4D-only), \"bicubic\" (4D-only), \"trilinear\" (5D-only)\n based on the number of dimensions of the chosen layer\n output (which must also match the number of\n dimensions for the input tensor). Note that\n the original GradCAM paper uses \"bilinear\"\n interpolation, but we default to \"nearest\" for\n applicability to any of 3D, 4D or 5D tensors.\n Default: \"nearest\"\n attribute_to_layer_input (bool, optional): Indicates whether to\n compute the attribution with respect to the layer input\n or output in `LayerGradCam`.\n If `attribute_to_layer_input` is set to True\n then the attributions will be computed with respect to\n layer inputs, otherwise it will be computed with respect\n to layer outputs.\n Note that currently it is assumed that either the input\n or the output of internal layer, depending on whether we\n attribute to the input or output, is a single tensor.\n Support for multiple tensors will be added later.\n Default: False\n\n Returns:\n *tensor* of **attributions**:\n - **attributions** (*tensor*):\n Element-wise product of (upsampled) GradCAM\n and Guided Backprop attributions.\n If a single tensor is provided as inputs, a single tensor is\n returned. If a tuple is provided for inputs, a tuple of\n corresponding sized tensors is returned.\n Attributions will be the same size as the provided inputs,\n with each value providing the attribution of the\n corresponding input index.\n If the GradCAM attributions cannot be upsampled to the shape\n of a given input tensor, None is returned in the corresponding\n index position.\n\n\n Examples::\n\n >>> # ImageClassifier takes a single input tensor of images Nx3x32x32,\n >>> # and returns an Nx10 tensor of class probabilities.\n >>> # It contains an attribute conv4, which is an instance of nn.conv2d,\n >>> # and the output of this layer has dimensions Nx50x8x8.\n >>> # It is the last convolution layer, which is the recommended\n >>> # use case for GuidedGradCAM.\n >>> net = ImageClassifier()\n >>> guided_gc = GuidedGradCam(net, net.conv4)\n >>> input = torch.randn(2, 3, 32, 32, requires_grad=True)\n >>> # Computes guided GradCAM attributions for class 3.\n >>> # attribution size matches input size, Nx3x32x32\n >>> attribution = guided_gc.attribute(input, 3)\n \"\"\"\n is_inputs_tuple = _is_tuple(inputs)\n inputs = _format_input(inputs)\n grad_cam_attr = self.grad_cam.attribute.__wrapped__(\n self.grad_cam, # self\n inputs=inputs,\n target=target,\n additional_forward_args=additional_forward_args,\n attribute_to_layer_input=attribute_to_layer_input,\n relu_attributions=True,\n )\n if isinstance(grad_cam_attr, tuple):\n assert len(grad_cam_attr) == 1, (\n \"GuidedGradCAM attributions for layer with multiple inputs / \"\n \"outputs is not supported.\"\n )\n grad_cam_attr = grad_cam_attr[0]\n guided_backprop_attr = self.guided_backprop.attribute.__wrapped__(\n self.guided_backprop, # self\n inputs=inputs,\n target=target,\n additional_forward_args=additional_forward_args,\n )\n output_attr: List[Tensor] = []\n for i in range(len(inputs)):\n try:\n output_attr.append(\n guided_backprop_attr[i]\n * LayerAttribution.interpolate(\n grad_cam_attr,\n inputs[i].shape[2:],\n interpolate_mode=interpolate_mode,\n )\n )\n except Exception:\n warnings.warn(\n \"Couldn't appropriately interpolate GradCAM attributions for some \"\n \"input tensors, returning empty tensor for corresponding \"\n \"attributions.\"\n )\n output_attr.append(torch.empty(0))\n\n return _format_output(is_inputs_tuple, tuple(output_attr))\n", "#!/usr/bin/env python3\n\nimport unittest\nfrom typing import Any, List, Tuple, Union, cast\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Module\n\nfrom captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric\nfrom captum.attr._core.layer.layer_conductance import LayerConductance\nfrom captum.attr._core.neuron.neuron_conductance import NeuronConductance\n\nfrom ...helpers.basic import BaseTest, assertArraysAlmostEqual\nfrom ...helpers.basic_models import (\n BasicModel_ConvNet,\n BasicModel_MultiLayer,\n BasicModel_MultiLayer_MultiInput,\n)\n\n\nclass Test(BaseTest):\n def test_simple_conductance_input_linear2(self) -> None:\n net = BasicModel_MultiLayer()\n inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)\n self._conductance_input_test_assert(\n net, net.linear2, inp, (0,), [0.0, 390.0, 0.0]\n )\n\n def test_simple_conductance_input_linear2_wo_mult_by_inputs(self) -> None:\n net = BasicModel_MultiLayer()\n inp = torch.tensor([[100.0, 100.0, 100.0]], requires_grad=True)\n self._conductance_input_test_assert(\n net, net.linear2, inp, (0,), [3.96, 3.96, 3.96], multiply_by_inputs=False,\n )\n\n def test_simple_conductance_input_linear1(self) -> None:\n net = BasicModel_MultiLayer()\n inp = torch.tensor([[0.0, 100.0, 0.0]])\n self._conductance_input_test_assert(net, net.linear1, inp, 0, [0.0, 90.0, 0.0])\n\n def test_simple_conductance_input_relu(self) -> None:\n net = BasicModel_MultiLayer()\n inp = torch.tensor([[0.0, 70.0, 30.0]], requires_grad=True)\n self._conductance_input_test_assert(net, net.relu, inp, (3,), [0.0, 70.0, 30.0])\n\n def test_simple_conductance_multi_input_linear2(self) -> None:\n net = BasicModel_MultiLayer_MultiInput()\n inp1 = torch.tensor([[0.0, 10.0, 0.0]])\n inp2 = torch.tensor([[0.0, 10.0, 0.0]])\n inp3 = torch.tensor([[0.0, 5.0, 0.0]])\n self._conductance_input_test_assert(\n net,\n net.model.linear2,\n (inp1, inp2, inp3),\n (0,),\n ([[0.0, 156.0, 0.0]], [[0.0, 156.0, 0.0]], [[0.0, 78.0, 0.0]]),\n (4,),\n )\n\n def test_simple_conductance_multi_input_relu(self) -> None:\n net = BasicModel_MultiLayer_MultiInput()\n inp1 = torch.tensor([[0.0, 10.0, 1.0]])\n inp2 = torch.tensor([[0.0, 4.0, 5.0]])\n inp3 = torch.tensor([[0.0, 0.0, 0.0]])\n self._conductance_input_test_assert(\n net,\n net.model.relu,\n (inp1, inp2),\n (3,),\n ([[0.0, 50.0, 5.0]], [[0.0, 20.0, 25.0]]),\n (inp3, 5),\n )\n\n def test_simple_conductance_multi_input_batch_relu(self) -> None:\n net = BasicModel_MultiLayer_MultiInput()\n inp1 = torch.tensor([[0.0, 10.0, 1.0], [0.0, 0.0, 10.0]])\n inp2 = torch.tensor([[0.0, 4.0, 5.0], [0.0, 0.0, 10.0]])\n inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 5.0]])\n self._conductance_input_test_assert(\n net,\n net.model.relu,\n (inp1, inp2),\n (3,),\n (\n [[0.0, 50.0, 5.0], [0.0, 0.0, 50.0]],\n [[0.0, 20.0, 25.0], [0.0, 0.0, 50.0]],\n ),\n (inp3, 5),\n )\n\n def test_matching_conv2_multi_input_conductance(self) -> None:\n net = BasicModel_ConvNet()\n inp = 100 * torch.randn(2, 1, 10, 10)\n self._conductance_input_sum_test_assert(net, net.conv2, inp, 0.0)\n\n # trying different baseline\n self._conductance_input_sum_test_assert(net, net.conv2, inp, 0.000001)\n\n def test_matching_relu2_multi_input_conductance(self) -> None:\n net = BasicModel_ConvNet()\n inp = 100 * torch.randn(3, 1, 10, 10, requires_grad=True)\n baseline = 20 * torch.randn(3, 1, 10, 10, requires_grad=True)\n self._conductance_input_sum_test_assert(net, net.relu2, inp, baseline)\n\n def test_matching_relu2_with_scalar_base_multi_input_conductance(self) -> None:\n net = BasicModel_ConvNet()\n inp = 100 * torch.randn(3, 1, 10, 10, requires_grad=True)\n self._conductance_input_sum_test_assert(net, net.relu2, inp, 0.0)\n\n def test_matching_pool2_multi_input_conductance(self) -> None:\n net = BasicModel_ConvNet()\n inp = 100 * torch.randn(1, 1, 10, 10)\n baseline = 20 * torch.randn(1, 1, 10, 10, requires_grad=True)\n self._conductance_input_sum_test_assert(net, net.pool2, inp, baseline)\n\n def _conductance_input_test_assert(\n self,\n model: Module,\n target_layer: Module,\n test_input: TensorOrTupleOfTensorsGeneric,\n test_neuron: Union[int, Tuple[int, ...]],\n expected_input_conductance: Union[List[float], Tuple[List[List[float]], ...]],\n additional_input: Any = None,\n multiply_by_inputs: bool = True,\n ) -> None:\n for internal_batch_size in (None, 5, 20):\n cond = NeuronConductance(\n model, target_layer, multiply_by_inputs=multiply_by_inputs,\n )\n self.assertEquals(cond.multiplies_by_inputs, multiply_by_inputs)\n attributions = cond.attribute(\n test_input,\n test_neuron,\n target=0,\n n_steps=500,\n method=\"gausslegendre\",\n additional_forward_args=additional_input,\n internal_batch_size=internal_batch_size,\n )\n if isinstance(expected_input_conductance, tuple):\n for i in range(len(expected_input_conductance)):\n for j in range(len(expected_input_conductance[i])):\n assertArraysAlmostEqual(\n attributions[i][j : j + 1].squeeze(0).tolist(),\n expected_input_conductance[i][j],\n delta=0.1,\n )\n else:\n if isinstance(attributions, Tensor):\n assertArraysAlmostEqual(\n attributions.squeeze(0).tolist(),\n expected_input_conductance,\n delta=0.1,\n )\n else:\n raise AssertionError(\n \"Attributions not returning a Tensor when expected.\"\n )\n\n def _conductance_input_sum_test_assert(\n self,\n model: Module,\n target_layer: Module,\n test_input: TensorOrTupleOfTensorsGeneric,\n test_baseline: BaselineType = None,\n ):\n layer_cond = LayerConductance(model, target_layer)\n attributions = cast(\n Tensor,\n layer_cond.attribute(\n test_input,\n baselines=test_baseline,\n target=0,\n n_steps=500,\n method=\"gausslegendre\",\n ),\n )\n neuron_cond = NeuronConductance(model, target_layer)\n attr_shape = cast(Tuple[int, ...], attributions.shape)\n for i in range(attr_shape[1]):\n for j in range(attr_shape[2]):\n for k in range(attr_shape[3]):\n neuron_vals = neuron_cond.attribute(\n test_input,\n (i, j, k),\n baselines=test_baseline,\n target=0,\n n_steps=500,\n )\n for n in range(attributions.shape[0]):\n self.assertAlmostEqual(\n torch.sum(neuron_vals[n]).item(),\n attributions[n, i, j, k].item(),\n delta=0.005,\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.empty" ], [ "torch.randn", "torch.sum", "torch.tensor" ] ]
mahaarbo/casclik
[ "21689c8f09388ef41b95943fa7c9e9bf07ae9207" ]
[ "examples/notebooks/common_plots.py" ]
[ "from matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport casadi as cs\n\n\ndef joints(simres, axs=None, max_speed=None, label_suffix=\"\", lstyle=\"-\"):\n \"\"\"Plot joint states wrt time from a simres dictionary.\n\n If given 2 axes, it will plot joints in the first and joint speeds\n in the second, else it will make a new figure. If given a single\n float max speed, it will show the limits.\n \"\"\"\n nq = simres[\"q_sim\"].shape[1]\n if axs is None:\n fig, axs = plt.subplots(2, 1)\n for i in range(nq):\n axs[0].plot(simres[\"t_sim\"], simres[\"q_sim\"][:, i],\n label=\"q\"+str(i)+label_suffix,ls=lstyle)\n axs[1].plot(simres[\"t_sim\"], simres[\"dq_sim\"][:, i],\n label=\"dq\"+str(i)+label_suffix,ls=lstyle)\n axs[0].set_ylabel(\"Position [rad]\")\n axs[0].set_xlabel(\"time [s]\")\n axs[0].legend()\n axs[1].set_ylabel(\"Speed [rad/s]\")\n axs[0].set_ylabel(\"position [rad]\")\n if max_speed is not None:\n axs[1].plot([min(simres[\"t_sim\"]), max(simres[\"t_sim\"])],\n [max_speed, max_speed], 'k--')\n axs[1].plot([min(simres[\"t_sim\"]), max(simres[\"t_sim\"])],\n [-max_speed, -max_speed], 'k--')\n return axs\n\n\ndef pos_point(simres, ax=None, p_des=None, label_suffix=\"\", lstyle=\"-\"):\n \"\"\"Plot position wrt time from a simres dictionary.\"\"\"\n ns = simres[\"p_sim\"].shape[1]\n cmap = plt.get_cmap(\"tab10\")\n if ax is None:\n fig, ax = plt.subplots(1, 1)\n for i in range(ns):\n ax.plot(simres[\"t_sim\"], simres[\"p_sim\"][:, i],\n color=cmap(i), label=chr(ord(\"x\")+i)+label_suffix,\n ls=lstyle)\n if p_des is not None:\n if not isinstance(p_des, cs.Function):\n ax.plot([min(simres[\"t_sim\"]), max(simres[\"t_sim\"])],\n [p_des[i], p_des[i]], color=cmap(i), ls=\"--\",\n label=chr(ord(\"x\")+i)+\"_des\")\n else:\n p_des_sim = [p_des(t).toarray()[i] for t in simres[\"t_sim\"]]\n ax.plot(simres[\"t_sim\"], p_des_sim, color=cmap(i),\n ls=\"--\", label=chr(ord(\"x\")+i)+\"_des\")\n ax.legend()\n return ax\n\n\ndef pos_point_3d(simres, ax=None, p_des=None):\n \"\"\"Plot 3D of converging to the desired point\"\"\"\n if ax is None:\n ax = Axes3D(plt.figure())\n ax.plot(simres[\"p_sim\"][:, 0],\n simres[\"p_sim\"][:, 1],\n simres[\"p_sim\"][:, 2])\n if p_des is not None:\n if isinstance(p_des,cs.Function):\n p_x = [p_des(t).toarray()[0] for t in simres[\"t_sim\"]]\n p_y = [p_des(t).toarray()[1] for t in simres[\"t_sim\"]]\n p_z = [p_des(t).toarray()[2] for t in simres[\"t_sim\"]]\n ax.plot(p_x, p_y, p_z)\n else:\n ax.scatter(p_des[0], p_des[1], p_des[2], s=20, color=\"k\")\n ax.set_xlabel(\"x [m]\")\n ax.set_ylabel(\"y [m]\")\n ax.set_zlabel(\"z [m]\")\n return ax\n\n\ndef frame_3d(simres, ax=None, p_des=None, T_des=None):\n \"\"\"Plot 3D of converging to the desired frame.\"\"\"\n if ax is None:\n ax = Axes3D(plt.figure())\n ax.plot(simres[\"p_sim\"][:, 0],\n simres[\"p_sim\"][:, 1],\n simres[\"p_sim\"][:, 2], \"k\")\n # make the end point\n if p_des is not None:\n ax.scatter(p_des[0], p_des[1], p_des[2], s=20, c=\"k\")\n elif T_des is not None:\n ax.scatter(T_des[0, 3], T_des[1, 3], T_des[2, 3], s=20, c=\"k\")\n r_sim = cs.np.zeros_like(simres[\"p_sim\"])\n g_sim = cs.np.zeros_like(simres[\"p_sim\"])\n b_sim = cs.np.zeros_like(simres[\"p_sim\"])\n for i in range(len(simres[\"t_sim\"])):\n r_sim[i, :] = simres[\"p_sim\"][i, :] + cs.np.dot(simres[\"R_sim\"][i, :, :], cs.np.array([0.1, 0., 0.]))\n g_sim[i, :] = simres[\"p_sim\"][i, :] + cs.np.dot(simres[\"R_sim\"][i, :, :], cs.np.array([0., 0.1, 0.]))\n b_sim[i, :] = simres[\"p_sim\"][i, :] + cs.np.dot(simres[\"R_sim\"][i, :, :], cs.np.array([0., 0., 0.1]))\n ax.plot(r_sim[:, 0],\n r_sim[:, 1],\n r_sim[:, 2],\n \"r--\")\n ax.plot(g_sim[:, 0],\n g_sim[:, 1],\n g_sim[:, 2],\n \"g--\")\n ax.plot(b_sim[:, 0],\n b_sim[:, 1],\n b_sim[:, 2],\n \"b--\")\n if T_des is not None:\n r_des_x = T_des[0, 3] + T_des[0, 0]*.1\n r_des_y = T_des[1, 3] + T_des[1, 0]*.1\n r_des_z = T_des[2, 3] + T_des[2, 0]*.1\n g_des_x = T_des[0, 3] + T_des[0, 1]*.1\n g_des_y = T_des[1, 3] + T_des[1, 1]*.1\n g_des_z = T_des[2, 3] + T_des[2, 1]*.1 \n b_des_x = T_des[0, 3] + T_des[0, 2]*.1\n b_des_y = T_des[1, 3] + T_des[1, 2]*.1\n b_des_z = T_des[2, 3] + T_des[2, 2]*.1\n ax.scatter([r_des_x, g_des_x, b_des_x],\n [r_des_y, g_des_y, b_des_y],\n [r_des_z, g_des_z, b_des_z],\n s=20, c=[\"r\", \"g\", \"b\"])\n ax.set_xlabel(\"x [m]\")\n ax.set_ylabel(\"y [m]\")\n ax.set_zlabel(\"z [m]\")\n return ax\n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.figure" ] ]
steveschmidt95-neu/mcmi
[ "199029b1efc319850bb128bb19992082f28001d1" ]
[ "src/mcmi_step2.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 15 17:37:21 2021\n\n@author: stephenschmidt\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 6 13:12:13 2021\n\n@author: stephenschmidt\n\n\nuses the undertraining on the total dataset and then applies it to\n\n\"\"\"\n\n\n\nimport numpy as np\nimport tensorflow.compat.v1 as v1\nv1.disable_eager_execution()\nimport os\nfrom direct_cnn_dataset import DirectCNNDataset\nfrom load_train_data import H5MSI_Train\nfrom net1_adam import MSInet1\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport h5py\n\n\ndef single_to_one_hot(labels, num_classes):\n #diagnosis_dict = {'high': 1, 'CA': 2, 'low': 3, 'healthy': 4}\n # shifted to {'high': 0, 'CA': 1, 'low': 2, 'healthy': 3}\n one_hot_labels = np.zeros((labels.shape[0], num_classes))\n for hot_class in range(0, num_classes):\n class_locations = np.where(labels == hot_class)\n one_hot_labels[class_locations, hot_class] = 1\n return(one_hot_labels)\n\ndef one_hot_probability_to_single_label(pred_labels, num_classes): \n argmax_labels = np.argmax(pred_labels, axis=1)\n return(argmax_labels)\n\n\nclass MIL():\n \n def __init__(self, input_file = 'MICNN_Out', fc_units = 50, num_classes=4, width1=38, width2=18, width3=16, filters_layer1=12, \n filters_layer2=24, filters_layer3=48, batch_size=4, lr=.001, keep_prob=.8):\n \n self.train_dataset = DirectCNNDataset(num_classes=4, train_data_location=input_file)\n self.num_classes = num_classes\n \n self.h5_out_path = os.path.join(os.path.dirname(os.getcwd()), 'FinalOutputPredictions')\n self.image_out_path = os.path.join(os.path.dirname(os.getcwd()), 'FinalOutputImages')\n self.data_folder = os.path.join(os.path.dirname(os.getcwd()), 'Data')\n\n self.train_data_total = H5MSI_Train()\n self.train_data_total.one_hot_labels()\n \n self.train_core_spec = {}\n self.train_core_pred_sub_labels = {}\n self.train_positions = {}\n self.test_core_true_label = {}\n \n for core in self.train_data_total.cores_list:\n if 'Label' not in core and 'position' not in core:\n self.train_core_spec[core] = self.train_data_total.train_data[core]\n self.train_core_pred_sub_labels[core] = np.zeros((self.train_core_spec[core].shape[0]))\n self.train_positions[core] = self.get_positions_for_core(core)\n self.test_core_true_label[core] = int(self.train_data_total.train_data[core +'_Labels'][0])\n \n self.diagnosis_dict = {'high': 1, 'CA': 2, 'low': 3, 'healthy': 0}\n self.diagnosis_dict_reverse = {1: 'high', 2: 'CA', 3: 'low', 0:'healthy'}\n \n if num_classes == 2:\n self.diagnosis_dict = {'high': 1, 'healthy': 0}\n self.diagnosis_dict_reverse = {1: 'high', 0:'healthy'}\n \n self.batch_size = batch_size\n self.lr = lr\n self.keep_prob = keep_prob\n \n \n self.sample_shape = int(self.train_dataset.spec_dict[0].shape[1])\n\n self.net1 = MSInet1(data_shape = self.sample_shape, fc_units=fc_units, num_classes=num_classes, width1=width1, width2=width2, width3=width3, filters_layer1=filters_layer1, filters_layer2=filters_layer2, filters_layer3=filters_layer3, batch_size=batch_size*self.num_classes,lr=lr)\n self.net1.build_graph()\n \n \n def check_batch_size(self):\n for class_label in range(0, self.num_classes):\n for class_label in range(0, self.num_classes):\n label_spec_size = self.train_dataset.spec_dict[class_label].shape[0]\n if label_spec_size < self.batch_size*self.num_classes:\n print(\"Class: \", class_label, \" Has \", label_spec_size, \" Inputs\")\n assert False # Reduce batch Size\n \n for core in self.train_data_total.cores_list:\n if ((self.num_classes * self.batch_size) > self.train_core_spec[core].shape[0]):\n print(\"Core: \", core, \" Has \", self.train_core_spec[core].shape[0], \" Inputs\")\n assert False # Reduce batch Size\n \n \n \n \n def get_next_batch(self):\n \n random_batch = np.zeros((self.batch_size*self.num_classes,self.train_dataset.spec_dict[0].shape[1]))\n labels = np.zeros((self.batch_size*self.num_classes, self.num_classes))\n \n for class_label in range(0, self.num_classes):\n class_spec = self.train_dataset.spec_dict[class_label]\n random_batch_indeces = np.random.choice(class_spec.shape[0], self.batch_size)\n \n random_class_batch = class_spec[random_batch_indeces, :]\n random_batch[class_label*self.batch_size:(class_label*self.batch_size) +self.batch_size, :] = random_class_batch\n labels[class_label*self.batch_size:(class_label*self.batch_size) +self.batch_size, class_label] = 1\n \n random_batch = np.reshape(random_batch, (random_batch.shape[0], random_batch.shape[1], 1))\n \n return(random_batch, labels)\n\n def cnn_X_epoch(self, epochs):\n self.check_batch_size()\n for epoch in range(1, epochs):\n epoch_cost = 0\n for l in range(1, 50):\n \n train_input, train_labels = self.get_next_batch()\n cost, preds = self.net1.single_core_compute_params(train_input, train_labels, keep_prob=self.keep_prob)\n epoch_cost += cost\n \n print(\"Epoch \", epoch)\n print(\"Cost: \", epoch_cost)\n \n \n self.predict_all_cores()\n self.save_predictions_all_cores()\n \n self.save_ims_all_cores()\n \n \n def get_test_labels_single_core(self, core):\n batch_idx = 0\n total_input_vals = self.train_core_pred_sub_labels[core].shape[0]\n batch_size = self.batch_size * self.num_classes\n\n while (batch_idx+batch_size < total_input_vals):\n \n train_batch = self.train_core_spec[core][batch_idx:batch_idx+batch_size]\n train_batch = np.reshape(train_batch, (train_batch.shape[0], train_batch.shape[1], 1))\n \n preds = self.net1.single_core_predict_labels(train_batch, keep_prob=self.keep_prob)\n preds = one_hot_probability_to_single_label(preds, self.num_classes)\n \n self.train_core_pred_sub_labels[core][batch_idx:batch_idx+batch_size] = preds\n \n batch_idx += batch_size\n\n train_batch = self.train_core_spec[core][total_input_vals-batch_size:, :]\n train_batch = np.reshape(train_batch, (train_batch.shape[0], train_batch.shape[1], 1))\n \n preds = self.net1.single_core_predict_labels(train_batch, keep_prob=self.keep_prob)\n preds = one_hot_probability_to_single_label(preds, self.num_classes)\n self.train_core_pred_sub_labels[core][total_input_vals-batch_size:] = preds\n \n \n def viz_single_core_pred(self, core):\n\n positions = self.train_positions[core]\n \n xmax = np.max(positions[:, 0])\n xmin = np.min(positions[:, 0])\n ymax = np.max(positions[:, 1])\n ymin = np.min(positions[:, 1])\n image_array = np.zeros((xmax-xmin+1, ymax-ymin+1))\n image_array[:] = 4\n \n cmap = matplotlib.colors.ListedColormap(['black', 'red', 'blue', 'yellow', 'white'])\n bounds = [0, 1, 2, 3, 4,5]\n norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)\n \n for location in range(0, self.train_core_pred_sub_labels[core].shape[0]):\n label = self.train_core_pred_sub_labels[core][location]\n xloc = self.train_positions[core][location][0]- xmin\n yloc = self.train_positions[core][location][1] - ymin\n \n image_array[xloc, yloc] = label\n \n ax = plt.gca()\n ax.axes.xaxis.set_visible(False)\n ax.axes.yaxis.set_visible(False)\n plt.grid(True)\n plt.imshow(image_array, interpolation='nearest',cmap=cmap,norm=norm)\n\n title = \"Core Number: \" + str(core) + \" Label: \" + self.diagnosis_dict_reverse[self.test_core_true_label[core]]\n plt.title(title)\n plt.colorbar(cmap=cmap,norm=norm,boundaries=bounds,ticks=[0,1,2, 3])\n filename = os.path.join(self.image_out_path, core + '.png')\n \n plt.savefig(filename, pad_inches=0)\n plt.clf()\n \n \n def predict_all_cores(self):\n for core in self.train_data_total.cores_list:\n self.get_test_labels_single_core(core)\n \n \n def get_positions_for_core(self, core):\n positions_filename = os.path.join(self.data_folder, core + '_positions' + '.hdf5')\n with h5py.File(positions_filename, \"r\") as hf:\n dname = list(hf.keys())[0]\n n1 = hf.get(dname) \n positions_array = np.copy(n1)\n\n return(positions_array)\n\n \n def save_predictions_all_cores(self):\n print(\"Saving Core Predictions as H5 Files\")\n \n for core in self.train_data_total.cores_list:\n prev_labels = self.train_core_pred_sub_labels[core]\n \n labels_filename = os.path.join(self.h5_out_path, core + '_multiclass.hdf5')\n with h5py.File(labels_filename, \"w\") as f:\n dset = f.create_dataset(core + \"multiclass_labels\", data=prev_labels, dtype='f')\n \n def save_ims_all_cores(self):\n print(\"Saving Images . . .\")\n for core in self.train_data_total.cores_list:\n self.viz_single_core_pred(core)\n \nbatch_size = 4 # per \n\nnum_classes = 4\nnum_epochs=10\nlr=.001\n\nMIL = MIL(fc_units = 100, num_classes=num_classes, width1=38, width2=18, width3=16, filters_layer1=40, \n filters_layer2=60, filters_layer3=100, batch_size=batch_size, lr=lr, keep_prob=.99)\nMIL.cnn_X_epoch(num_epochs)\n\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.imshow", "matplotlib.colors.BoundaryNorm", "matplotlib.pyplot.title", "numpy.min", "numpy.reshape", "numpy.random.choice", "matplotlib.pyplot.savefig", "numpy.max", "matplotlib.pyplot.colorbar", "numpy.argmax", "matplotlib.colors.ListedColormap", "matplotlib.pyplot.clf", "matplotlib.pyplot.grid", "tensorflow.compat.v1.disable_eager_execution", "numpy.copy", "numpy.zeros", "numpy.where" ] ]
Gustavo029/GridReader
[ "7edc950c469b06c3de0093e5fd8bf6cfd59af354" ]
[ "workspace/geomechanics/cryer/cryer.py" ]
[ "print(\"\"\"\n\tESFERA DE CRYER DRENADO - SEM GRAVIDADE:\n\t(EXEMPLO DO RELATÓRIO)\n\t\t3D\n\t\tRaio = 1m\n\t\tMalha de Tetraedros\n\t\t1852 Nós\n\t\t8046 Elementos\n\n\t\t1MPa aplicado na superfície\n\t\t\n\t\ttimeStep = 10s\n\t\tfinalTime = 1000s\n\t\t100 time steps\n\"\"\")\n\nload = 1.0e+6\ngravity = 0.0 #+\n\nradius = 1.0\nmeshFilePath = \"{MESHES}/msh/3D/eight_sphere.msh\"\n\nrun = False\nplot = True\n\nimport sys,os\nsys.path.append(os.path.join(os.path.dirname(__file__), *[os.path.pardir]*3))\nimport PyEFVLib\nimport numpy as np\n\ndef geomechanics(problemData, times, timeSteps):\n\tpropertyData \t = problemData.propertyData\n\ttimeStep \t\t = problemData.timeStep\n\tgrid \t\t\t = problemData.grid\n\tnumberOfVertices = grid.numberOfVertices\n\tdimension \t\t = grid.dimension\n\n\tcsvSaver = PyEFVLib.CsvSaver(grid, problemData.outputFilePath, problemData.libraryPath)\n\tmeshioSaver = PyEFVLib.MeshioSaver(grid, problemData.outputFilePath, problemData.libraryPath, extension=\"xdmf\")\n\n\toldUField = np.concatenate((problemData.initialValues[\"u_x\"], problemData.initialValues[\"u_y\"], problemData.initialValues[\"u_z\"])) if dimension==3 else np.concatenate((problemData.initialValues[\"u_x\"], problemData.initialValues[\"u_y\"]))\n\tuField = np.repeat(0.0, dimension*numberOfVertices)\n\n\toldPField = problemData.initialValues[\"p\"].copy()\n\tpField = np.repeat(0.0, numberOfVertices)\n\n\tmatrix \t\t= np.zeros(((1+dimension)*numberOfVertices, (1+dimension)*numberOfVertices))\n\n\tcsvSaver.save(\"u_x\", oldUField[0*numberOfVertices:1*numberOfVertices], 0.0)\n\tcsvSaver.save(\"u_y\", oldUField[1*numberOfVertices:2*numberOfVertices], 0.0)\n\tif dimension == 3:\n\t\tcsvSaver.save(\"u_z\", oldUField[2*numberOfVertices:3*numberOfVertices], 0.0)\n\tcsvSaver.save(\"p\", oldPField, 0.0)\n\n\tmeshioSaver.save(\"u_x\", oldUField[0*numberOfVertices:1*numberOfVertices], 0.0)\n\tmeshioSaver.save(\"u_y\", oldUField[1*numberOfVertices:2*numberOfVertices], 0.0)\n\tif dimension == 3:\n\t\tmeshioSaver.save(\"u_z\", oldUField[2*numberOfVertices:3*numberOfVertices], 0.0)\n\tmeshioSaver.save(\"p\", oldPField, 0.0)\n\n\n\tdef getTransposedVoigtArea(innerFace):\n\t\tSx, Sy, Sz = innerFace.area.getCoordinates()\n\t\treturn np.array([[Sx,0,Sy],[0,Sy,Sx]]) if dimension==2 else np.array([[Sx,0,0,Sy,0,Sz],[0,Sy,0,Sx,Sz,0],[0,0,Sz,0,Sy,Sx]])\n\n\tdef getVoigtGradientOperator(globalDerivatives):\n\t\tif len(globalDerivatives) == 2:\n\t\t\tNx,Ny = globalDerivatives\n\t\t\tzero=np.zeros(Nx.size)\n\t\t\treturn np.array([[Nx,zero],[zero,Ny],[Ny,Nx]])\n\n\t\tif len(globalDerivatives) == 3:\n\t\t\tNx,Ny,Nz = globalDerivatives\n\t\t\tzero=np.zeros(Nx.size)\n\t\t\treturn np.array([[Nx,zero,zero],[zero,Ny,zero],[zero,zero,Nz],[Ny,Nx,zero],[zero,Nz,Ny],[Nz,zero,Nx]])\n\n\tdef assembleMatrix():\n\t\tmatrix \t\t= np.zeros(((1+dimension)*numberOfVertices, (1+dimension)*numberOfVertices))\n\n\t\tfor region in grid.regions:\n\t\t\tnu = propertyData.get(region.handle, \"nu\")\n\t\t\tG = propertyData.get(region.handle, \"G\")\n\t\t\tcs = propertyData.get(region.handle, \"cs\")\n\t\t\tphi = propertyData.get(region.handle, \"phi\")\n\t\t\tk = propertyData.get(region.handle, \"k\")\n\t\t\tcf = propertyData.get(region.handle, \"cf\")\n\t\t\tmu = propertyData.get(region.handle, \"mu\")\n\t\t\trhos = propertyData.get(region.handle, \"rhos\")\n\t\t\trhof = propertyData.get(region.handle, \"rhof\")\n\n\t\t\tg = np.array([0.0, -gravity, 0.0])[:dimension]\n\t\t\tlame = 2*G*nu/(1-2*nu)\n\t\t\tCe = np.array([[2*G+lame, lame, 0], [lame, 2*G+lame, 0], [0, 0, G]]) if dimension==2 else np.array([[2*G+lame, lame, lame, 0, 0, 0], [lame, 2*G+lame, lame, 0, 0, 0], [lame, lame, 2*G+lame, 0, 0, 0], [0, 0, 0, G, 0, 0], [0, 0, 0, 0, G, 0], [0, 0, 0, 0, 0, G]])\n\t\t\trho = phi * rhof + (1-phi) * rhos\n\t\t\tK = 2*G*(1 + nu) / 3*(1-2*nu)\n\t\t\tcb = 1 / K\n\t\t\talpha = 1 - cs / cb\n\t\t\tS = (phi * cf + (alpha-phi) * cs)\n\n\t\t\tfor element in region.elements:\n\t\t\t\t# S * (1/timeStep) * p\n\t\t\t\tfor vertex in element.vertices:\n\t\t\t\t\tmatrix[vertex.handle+(dimension)*numberOfVertices][vertex.handle+(dimension)*numberOfVertices] += vertex.getSubElementVolume(element) * S * (1/timeStep) \n\n\t\t\t\t# (-1) * alpha * grad(p)\n\t\t\t\tfor face in element.innerFaces:\n\t\t\t\t\tarea = face.area.getCoordinates()[:dimension]\n\t\t\t\t\tm = element.vertices.size\n\t\t\t\t\ttransposedVoigtArea = getTransposedVoigtArea(face)\n\t\t\t\t\tshapeFunctions = face.getShapeFunctions()\n\t\t\t\t\tidentityShapeFunctionMatrix = np.array([shapeFunctions, shapeFunctions, shapeFunctions, np.zeros(m), np.zeros(m), np.zeros(m)]) if dimension == 3 else np.array([shapeFunctions, shapeFunctions, np.zeros(m)])\n\t\t\t\t\tmatrixCoefficients = (-1) * alpha * np.matmul(transposedVoigtArea, identityShapeFunctionMatrix)\n\t\t\t\t\tbackwardsHandle, forwardHandle = face.getNeighborVerticesHandles()\n\t\t\t\t\tfor coord in range(dimension):\n\t\t\t\t\t\tfor local, vertex in enumerate(element.vertices):\n\t\t\t\t\t\t\tmatrix[backwardsHandle+(coord+0)*numberOfVertices][vertex.handle+(dimension)*numberOfVertices] += matrixCoefficients[coord][local]\n\t\t\t\t\t\t\tmatrix[forwardHandle+(coord+0)*numberOfVertices][vertex.handle+(dimension)*numberOfVertices] += -matrixCoefficients[coord][local]\n\n\t\t\t\t# (-1) * k * mu * grad(p)\n\t\t\t\tfor face in element.innerFaces:\n\t\t\t\t\tarea = face.area.getCoordinates()[:dimension]\n\t\t\t\t\tmatrixCoefficients = (-1) * k * mu * np.matmul( area.T, face.globalDerivatives )\n\t\t\t\t\tbackwardsHandle, forwardHandle = face.getNeighborVerticesHandles()\n\t\t\t\t\tfor local, vertex in enumerate(element.vertices):\n\t\t\t\t\t\tmatrix[backwardsHandle+(dimension)*numberOfVertices][vertex.handle+(dimension)*numberOfVertices] += matrixCoefficients[local]\n\t\t\t\t\t\tmatrix[forwardHandle+(dimension)*numberOfVertices][vertex.handle+(dimension)*numberOfVertices] += -matrixCoefficients[local]\n\n\t\t\t\t# Ce * grad_s(u)\n\t\t\t\tfor face in element.innerFaces:\n\t\t\t\t\tarea = face.area.getCoordinates()[:dimension]\n\t\t\t\t\ttransposedVoigtArea = getTransposedVoigtArea(face)\n\t\t\t\t\tvoigtGradientOperator = getVoigtGradientOperator(face.globalDerivatives)\n\t\t\t\t\tcoeff = Ce\n\t\t\t\t\tmatrixCoefficients = np.einsum(\"ij,jk,kmn->imn\", transposedVoigtArea, coeff, voigtGradientOperator)\n\t\t\t\t\tbackwardsHandle, forwardHandle = face.getNeighborVerticesHandles()\n\t\t\t\t\tfor i in range(dimension):\n\t\t\t\t\t\tfor j in range(dimension):\n\t\t\t\t\t\t\tfor local, vertex in enumerate(element.vertices):\n\t\t\t\t\t\t\t\tmatrix[backwardsHandle+(i+0)*numberOfVertices][vertex.handle+(j+0)*numberOfVertices] += matrixCoefficients[i][j][local]\n\t\t\t\t\t\t\t\tmatrix[forwardHandle+(i+0)*numberOfVertices][vertex.handle+(j+0)*numberOfVertices] += -matrixCoefficients[i][j][local]\n\n\t\t\t\t# alpha * (1/timeStep) * u\n\t\t\t\tfor face in element.faces:\n\t\t\t\t\tarea = face.area.getCoordinates()[:dimension]\n\t\t\t\t\tshapeFunctions = face.getShapeFunctions()\n\t\t\t\t\tfor coord in range(dimension):\n\t\t\t\t\t\tfor local, vertex in enumerate(element.vertices):\n\t\t\t\t\t\t\tif type(face) == PyEFVLib.InnerFace:\n\t\t\t\t\t\t\t\tbackwardsHandle, forwardHandle = face.getNeighborVerticesHandles()\n\n\t\t\t\t\t\t\t\tmatrix[backwardsHandle+(dimension)*numberOfVertices][vertex.handle+(coord+0)*numberOfVertices] += alpha * (1/timeStep) * shapeFunctions[local] * area[coord]\n\t\t\t\t\t\t\t\tmatrix[forwardHandle+(dimension)*numberOfVertices][vertex.handle+(coord+0)*numberOfVertices] += -alpha * (1/timeStep) * shapeFunctions[local] * area[coord]\n\n\t\t\t\t\t\t\telif type(face) == PyEFVLib.OuterFace:\n\t\t\t\t\t\t\t\tmatrix[face.vertex.handle+(dimension)*numberOfVertices][vertex.handle+(coord+0)*numberOfVertices] += alpha * (1/timeStep) * shapeFunctions[local] * area[coord]\n\n\t\t# Dirichlet Boundary Conditions\n\t\tfor bCondition in problemData.dirichletBoundaries[\"u_x\"]:\n\t\t\tfor vertex in bCondition.boundary.vertices:\n\t\t\t\tmatrix[vertex.handle+(0)*numberOfVertices] = np.zeros((1+dimension)*numberOfVertices)\n\t\t\t\tmatrix[vertex.handle+(0)*numberOfVertices][vertex.handle+(0)*numberOfVertices] = 1.0\n\n\t\tfor bCondition in problemData.dirichletBoundaries[\"u_y\"]:\n\t\t\tfor vertex in bCondition.boundary.vertices:\n\t\t\t\tmatrix[vertex.handle+(1)*numberOfVertices] = np.zeros((1+dimension)*numberOfVertices)\n\t\t\t\tmatrix[vertex.handle+(1)*numberOfVertices][vertex.handle+(1)*numberOfVertices] = 1.0\n\n\t\tif dimension == 3:\n\t\t\tfor bCondition in problemData.dirichletBoundaries[\"u_z\"]:\n\t\t\t\tfor vertex in bCondition.boundary.vertices:\n\t\t\t\t\tmatrix[vertex.handle+(2)*numberOfVertices] = np.zeros((1+dimension)*numberOfVertices)\n\t\t\t\t\tmatrix[vertex.handle+(2)*numberOfVertices][vertex.handle+(2)*numberOfVertices] = 1.0\n\n\t\tfor bCondition in problemData.dirichletBoundaries[\"p\"]:\n\t\t\tfor vertex in bCondition.boundary.vertices:\n\t\t\t\tmatrix[vertex.handle+(dimension)*numberOfVertices] = np.zeros((1+dimension)*numberOfVertices)\n\t\t\t\tmatrix[vertex.handle+(dimension)*numberOfVertices][vertex.handle+(dimension)*numberOfVertices] = 1.0\n\n\t\treturn matrix\n\n\tdef assembleIndependent():\n\t\tindependent = np.zeros((1+dimension)*numberOfVertices)\n\n\t\tfor region in grid.regions:\n\t\t\tnu = propertyData.get(region.handle, \"nu\")\n\t\t\tG = propertyData.get(region.handle, \"G\")\n\t\t\tcs = propertyData.get(region.handle, \"cs\")\n\t\t\tphi = propertyData.get(region.handle, \"phi\")\n\t\t\tk = propertyData.get(region.handle, \"k\")\n\t\t\tcf = propertyData.get(region.handle, \"cf\")\n\t\t\tmu = propertyData.get(region.handle, \"mu\")\n\t\t\trhos = propertyData.get(region.handle, \"rhos\")\n\t\t\trhof = propertyData.get(region.handle, \"rhof\")\n\n\t\t\tg = np.array([0.0, -gravity, 0.0])[:dimension]\n\t\t\tlame = 2*G*nu/(1-2*nu)\n\t\t\tCe = np.array([[2*G+lame, lame, 0], [lame, 2*G+lame, 0], [0, 0, G]]) if dimension==2 else np.array([[2*G+lame, lame, lame, 0, 0, 0], [lame, 2*G+lame, lame, 0, 0, 0], [lame, lame, 2*G+lame, 0, 0, 0], [0, 0, 0, G, 0, 0], [0, 0, 0, 0, G, 0], [0, 0, 0, 0, 0, G]])\n\t\t\trho = phi * rhof + (1-phi) * rhos\n\t\t\tK = 2*G*(1 + nu) / 3*(1-2*nu)\n\t\t\tcb = 1 / K\n\t\t\talpha = 1 - cs / cb\n\t\t\tS = (phi * cf + (alpha-phi) * cs)\n\n\t\t\tfor element in region.elements:\n\t\t\t\t# (-1) * rho * g\n\t\t\t\tfor vertex in element.vertices:\n\t\t\t\t\tfor coord in range(dimension):\n\t\t\t\t\t\tindependent[vertex.handle+coord*numberOfVertices] += vertex.getSubElementVolume(element) * (-1) * rho * g[coord]\n\n\t\t\t\t# S * (1/timeStep) * p_old\n\t\t\t\tfor vertex in element.vertices:\n\t\t\t\t\tindependent[vertex.handle+(dimension)*numberOfVertices] += vertex.getSubElementVolume(element) * S * (1/timeStep) * oldPField[vertex.handle]\n\n\t\t\t\t# (-1) * k * mu * rho * g\n\t\t\t\tfor innerFace in element.innerFaces:\n\t\t\t\t\tarea = innerFace.area.getCoordinates()[:dimension]\n\t\t\t\t\tcoefficient = (-1) * k * mu * rho * g\n\t\t\t\t\tcoefficient = np.matmul(area.T, coefficient)\n\n\t\t\t\t\tbackwardsHandle, forwardHandle = innerFace.getNeighborVerticesHandles()\n\t\t\t\t\tindependent[backwardsHandle+(dimension)*numberOfVertices] += coefficient\n\t\t\t\t\tindependent[forwardHandle+(dimension)*numberOfVertices] -= coefficient\n\n\t\t\t\t# alpha * (1/timeStep) * u_old\n\t\t\t\tfor face in element.faces:\n\t\t\t\t\tarea = face.area.getCoordinates()[:dimension]\n\t\t\t\t\tshapeFunctions = face.getShapeFunctions()\n\n\t\t\t\t\tfor coord in range(dimension):\n\t\t\t\t\t\tfor local, vertex in enumerate(element.vertices):\n\t\t\t\t\t\t\tif type(face) == PyEFVLib.InnerFace:\n\t\t\t\t\t\t\t\tbackwardsHandle, forwardHandle = face.getNeighborVerticesHandles()\n\t\t\t\t\t\t\t\tindependent[backwardsHandle+(dimension)*numberOfVertices] += alpha * (1/timeStep) * shapeFunctions[local] * area[coord] * oldUField[vertex.handle + (coord)*numberOfVertices]\n\t\t\t\t\t\t\t\tindependent[forwardHandle+(dimension)*numberOfVertices] -= alpha * (1/timeStep) * shapeFunctions[local] * area[coord] * oldUField[vertex.handle + (coord)*numberOfVertices]\n\n\t\t\t\t\t\t\telif type(face) == PyEFVLib.OuterFace:\n\t\t\t\t\t\t\t\tindependent[face.vertex.handle+(dimension)*numberOfVertices] += alpha * (1/timeStep) * shapeFunctions[local] * area[coord] * oldUField[vertex.handle + (coord)*numberOfVertices]\n\n\t\t# Neumann Boundary Condition\n\t\tfor bCondition in problemData.neumannBoundaries[\"u_x\"]:\n\t\t\tfor facet in bCondition.boundary.facets:\n\t\t\t\tfor outerFace in facet.outerFaces:\n\t\t\t\t\tindependent[outerFace.vertex.handle+(0)*numberOfVertices] += bCondition.getValue(outerFace.handle) * np.dot(outerFace.area.getCoordinates(), [1,0,0])\n\n\t\tfor bCondition in problemData.neumannBoundaries[\"u_y\"]:\n\t\t\tfor facet in bCondition.boundary.facets:\n\t\t\t\tfor outerFace in facet.outerFaces:\n\t\t\t\t\tindependent[outerFace.vertex.handle+(1)*numberOfVertices] += bCondition.getValue(outerFace.handle) * np.dot(outerFace.area.getCoordinates(), [0,1,0])\n\n\t\tif dimension == 3:\n\t\t\tfor bCondition in problemData.neumannBoundaries[\"u_z\"]:\n\t\t\t\tfor facet in bCondition.boundary.facets:\n\t\t\t\t\tfor outerFace in facet.outerFaces:\n\t\t\t\t\t\tindependent[outerFace.vertex.handle+(2)*numberOfVertices] += bCondition.getValue(outerFace.handle) * np.dot(outerFace.area.getCoordinates(), [0,0,1])\n\n\t\tfor bCondition in problemData.neumannBoundaries[\"p\"]:\n\t\t\tfor facet in bCondition.boundary.facets:\n\t\t\t\tfor outerFace in facet.outerFaces:\n\t\t\t\t\tindependent[outerFace.vertex.handle+(dimension)*numberOfVertices] += bCondition.getValue(outerFace.handle) * np.linalg.norm(outerFace.area.getCoordinates())\n\n\t\t# Dirichlet Boundary Condition\n\t\tfor bCondition in problemData.dirichletBoundaries[\"u_x\"]:\n\t\t\tfor vertex in bCondition.boundary.vertices:\n\t\t\t\tindependent[vertex.handle+(0)*numberOfVertices] = bCondition.getValue(vertex.handle)\n\n\t\tfor bCondition in problemData.dirichletBoundaries[\"u_y\"]:\n\t\t\tfor vertex in bCondition.boundary.vertices:\n\t\t\t\tindependent[vertex.handle+(1)*numberOfVertices] = bCondition.getValue(vertex.handle)\n\n\t\tif dimension == 3:\n\t\t\tfor bCondition in problemData.dirichletBoundaries[\"u_z\"]:\n\t\t\t\tfor vertex in bCondition.boundary.vertices:\n\t\t\t\t\tindependent[vertex.handle+(2)*numberOfVertices] = bCondition.getValue(vertex.handle)\n\n\t\tfor bCondition in problemData.dirichletBoundaries[\"p\"]:\n\t\t\tfor vertex in bCondition.boundary.vertices:\n\t\t\t\tindependent[vertex.handle+(dimension)*numberOfVertices] = bCondition.getValue(vertex.handle)\n\n\t\treturn independent\n\n\ttolerance = problemData.tolerance\n\tdifference = 2*tolerance\n\titeration = 0\n\tcurrentTime = 0.0\n\tconverged = False\n\n\twhile not converged:\n\t\tcurrentTime = times[iteration]\n\t\ttimeStep = timeSteps[iteration]\n\n\t\tmatrix = assembleMatrix()\n\t\tindependent = assembleIndependent()\n\n\t\tresults = np.linalg.solve(matrix, independent)\n\t\tuField = results[(0)*numberOfVertices:(0+dimension)*numberOfVertices]\n\t\tpField = results[(dimension)*numberOfVertices:(dimension+1)*numberOfVertices]\n\n\t\tdifference = max( max(abs(pField - oldPField)), max(abs(uField - oldUField)) )\n\n\t\toldPField = pField.copy()\n\t\toldUField = uField.copy()\n\n\t\tcurrentTime += timeStep\n\t\titeration += 1\n\n\t\tcsvSaver.save(\"u_x\", uField[0*numberOfVertices:1*numberOfVertices], currentTime)\n\t\tcsvSaver.save(\"u_y\", uField[1*numberOfVertices:2*numberOfVertices], currentTime)\n\t\tif dimension == 3:\n\t\t\tcsvSaver.save(\"u_z\", uField[2*numberOfVertices:3*numberOfVertices], currentTime)\n\t\tcsvSaver.save(\"p\", pField, currentTime)\n\n\t\tmeshioSaver.save(\"u_x\", uField[0*numberOfVertices:1*numberOfVertices], currentTime)\n\t\tmeshioSaver.save(\"u_y\", uField[1*numberOfVertices:2*numberOfVertices], currentTime)\n\t\tif dimension == 3:\n\t\t\tmeshioSaver.save(\"u_z\", uField[2*numberOfVertices:3*numberOfVertices], currentTime)\n\t\tmeshioSaver.save(\"p\", pField, currentTime)\n\n\t\tprint(\"{:>9}\t{:>14.2e}\t{:>14.2e}\t{:>14.2e}\".format(iteration, currentTime, timeStep, difference))\n\t\tconverged = ( difference <= tolerance ) or ( currentTime >= problemData.finalTime ) or ( iteration >= problemData.maxNumberOfIterations )\n\n\t\tif iteration >= problemData.maxNumberOfIterations:\n\t\t\tbreak\n\n\tcsvSaver.finalize(i0=0)\n\tmeshioSaver.finalize()\n\n\treturn (pField, uField[0*numberOfVertices:1*numberOfVertices], uField[1*numberOfVertices:2*numberOfVertices], uField[2*numberOfVertices:3*numberOfVertices])\n\ndef main_run():\n\tpropertyData = PyEFVLib.PropertyData({\n\t\t'Body':\n\t\t{\n\t\t\t'nu': 0.2,\n\t\t\t'G': 6000000000.0,\n\t\t\t'cs': 0.0,\n\t\t\t'phi': 0.19,\n\t\t\t'k': 1.9e-15,\n\t\t\t'cf': 3.0303e-10,\n\t\t\t'mu': 1000.0,\n\t\t\t'rhos': 2700.0,\n\t\t\t'rhof': 1000.0,\n\t\t},\n\t})\n\tboundaryConditionsDict = {\n\t\t'u_x': {\n\t\t\t'InitialValue': 0.0,\n\t\t\t'InnerX': { 'condition' : PyEFVLib.Dirichlet, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'InnerY': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'InnerZ': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'Outer': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : load },\n\t\t},\n\t\t'u_y': {\n\t\t\t'InitialValue': 0.0,\n\t\t\t'InnerX': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'InnerY': { 'condition' : PyEFVLib.Dirichlet, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'InnerZ': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'Outer': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : load },\n\t\t},\n\t\t'u_z': {\n\t\t\t'InitialValue': 0.0,\n\t\t\t'InnerX': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'InnerY': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'InnerZ': { 'condition' : PyEFVLib.Dirichlet, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'Outer': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : load },\n\t\t},\n\t\t'p': {\n\t\t\t'InitialValue': 0.0,\n\t\t\t'InnerX': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'InnerY': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'InnerZ': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t\t'Outer': { 'condition' : PyEFVLib.Neumann, 'type' : PyEFVLib.Constant, 'value' : 0.0 },\n\t\t},\n\t}\n\n\tproblemData = PyEFVLib.ProblemData(\n\t\tmeshFilePath = meshFilePath,\n\t\toutputFilePath = \"{RESULTS}/geomechanics\",\n\t\tnumericalSettings = PyEFVLib.NumericalSettings( timeStep = 10, maxNumberOfIterations = 20 ),\n\t\tpropertyData = propertyData,\n\t\tboundaryConditions = PyEFVLib.BoundaryConditions(boundaryConditionsDict),\n\t)\n\n\ttimes = np.linspace(0,200,20)\n\ttimeSteps = times[1:]-times[:-1]\n\n\tpField, uxField, uyField, uzField = geomechanics( problemData, times, timeSteps )\n\n\tboundaryConditionsDict[\"u_x\"][\"InitialValue\"] = uxField\n\tboundaryConditionsDict[\"u_y\"][\"InitialValue\"] = uyField\n\tboundaryConditionsDict[\"u_z\"][\"InitialValue\"] = uzField\n\tboundaryConditionsDict[\"p\"][\"InitialValue\"] = pField\n\tboundaryConditionsDict[\"p\"][\"Outer\"][\"condition\"] = PyEFVLib.Dirichlet\n\n\tproblemData = PyEFVLib.ProblemData(\n\t\tmeshFilePath = meshFilePath,\n\t\toutputFilePath = \"{RESULTS}/geomechanics\",\n\t\tnumericalSettings = PyEFVLib.NumericalSettings( timeStep = 10, finalTime = 200, maxNumberOfIterations = 64 ),\n\t\tpropertyData = propertyData,\n\t\tboundaryConditions = PyEFVLib.BoundaryConditions(boundaryConditionsDict),\n\t)\n\n\ttimes = np.logspace(-1,2,65) - 1e-1\n\ttimeSteps = times[1:]-times[:-1]\n\n\tgeomechanics( problemData, times, timeSteps )\n\n\nif run and __name__ == '__main__':\n\tmain_run()\n\n\n################################################################################################################################################################\n################################################################################################################################################################\n################################################################################################################################################################\n################################################################################################################################################################\n################################################################################################################################################################\n################################################################################################################################################################\n################################################################################################################################################################\n################################################################################################################################################################\n\n\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport json\nimport sys, os\n\ndirname = os.path.realpath( os.path.dirname(__file__) )\nsys.path.append(dirname + \"/../../../../solgeom\")\nfrom solgeom.Cryer import Solution\n\nfilePath = dirname+\"/../../../results/geomechanics/Results.csv\"\n\ndf = pd.read_csv(filePath)\nX = df[\"X\"]\nY = df[\"Y\"]\nZ = df[\"Z\"]\n\nnumberOfTimeSteps = int(df.keys()[-1].split(\" - \")[0].replace(\"TimeStep\",\"\")) + 1\n\n\nmm = 1000.\nkPa = 1/1000.\n\ndef main_plot():\n# if True:\n\trock = json.load(open(dirname+\"/../../../../solgeom/examples/solid.json\", \"r\"))\n\tfluid = json.load(open(dirname+\"/../../../../solgeom/examples/fluid.json\", \"r\"))\n\n\tcryer = Solution(radius, load, rock, fluid)\n\n\ttimes = (np.logspace(-1,2,65) - 1e-1)[:numberOfTimeSteps]\n\tc_idx = [idx for idx,(x,y,z) in enumerate(zip(X,Y,Z)) if x==0.0 and y==0.0 and z==0.0][0]\n\n\tp_a = cryer.getPressureValues(times)\n\tp_n = np.array([df[f\"TimeStep{s} - p\"][c_idx] for s in range(numberOfTimeSteps)])\n\tinitialPressure_n = df[f\"TimeStep0 - p\"][c_idx]\n\n\tfig, ax = plt.subplots()\n\n\tprint(initialPressure_n)\n\tprint(cryer.initialPressure)\n\tprint(initialPressure_n/cryer.initialPressure)\n\n\tax.semilogx(times, p_a/cryer.initialPressure, color='k')\n\tax.scatter(times, p_n/initialPressure_n, marker='.', color='r')\n\tax.set_xlabel(\"Time (s)\", size=12)\n\tax.set_ylabel(\"Normalized pressure %s\"%(r\"$(p/p_0)$\"), size=12)\n\tax.grid(True)\n\n\tplt.show()\n\nif plot and __name__ == '__main__':\n\tmain_plot()" ]
[ [ "numpy.array", "pandas.read_csv", "numpy.linalg.solve", "numpy.linspace", "numpy.einsum", "numpy.logspace", "numpy.matmul", "matplotlib.pyplot.subplots", "numpy.concatenate", "numpy.repeat", "matplotlib.pyplot.show", "numpy.zeros" ] ]
Shadownox/cogsci-beliefmodeling
[ "8fda3a03207745cfa99435295c87d7aa8695872b" ]
[ "models/Random.py" ]
[ "import ccobra\nimport numpy as np\n\nclass RandomModel(ccobra.CCobraModel):\n def __init__(self, name='Random'):\n super(RandomModel, self).__init__(name, ['syllogistic-belief'], ['verify'])\n\n def predict(self, item, **kwargs):\n return np.random.choice([True, False])\n\n def predict_rating(self, item, **kwargs):\n return int(np.random.randint(1, 7))" ]
[ [ "numpy.random.randint", "numpy.random.choice" ] ]
james-muriithi/coding-jobs-scrapper
[ "f47c9f402f37756a785317e3e12998d1c93e0f88" ]
[ "brightermodays/main.py" ]
[ "# import packages\nimport requests\nimport pandas as pd\nimport time\nfrom .functions import *\nfrom datetime import datetime\nimport os\nimport json\n\n# give the filename the name of the current folder\nfolder = os.path.join(os.getcwd(), 'jobs')\nfile_name = os.path.join(folder, 'brightermondays-' +\n datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'.csv')\n\ndomain = 'https://www.brightermonday.co.ke/jobs/it-software'\n\n# dataframe\ndf = pd.DataFrame(columns=['title', 'location',\n 'company', 'summary', 'salary', 'link', 'post_date', 'full_text', 'fetch_date'])\n\n\ndef scrap_jobs():\n print('scrapping jobs from '+domain + '....')\n\n # get dom\n page = requests.get(domain)\n\n #ensuring at least 1 second between page grabs\n time.sleep(1)\n\n #fetch data\n soup = get_soup(page.text)\n divs = soup.find_all(name=\"article\", attrs={\"class\": \"search-result\"})\n\n # for all jobs on a page\n for div in divs:\n #specifying row num for index of job posting in dataframe\n num = (len(df) + 1)\n link = extract_link(div)\n\n #job data after parsing\n job_post = []\n\n #grabbing job title\n job_post.append(extract_job_title(div))\n\n #grabbing location name\n job_post.append(extract_location(div))\n\n #grabbing company\n job_post.append(extract_company(div))\n\n #grabbing summary text\n job_post.append(extract_summary(link))\n\n #grabbing salary\n job_post.append(extract_salary(div))\n\n #grabbing link\n job_post.append(link)\n\n #grabbing date\n job_post.append(extract_date(div))\n\n #grabbing full_text\n job_post.append(extract_fulltext(link))\n\n # current time\n job_post.append(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n #appending list of job post info to dataframe at index num\n df.loc[num] = job_post\n\n postJob(df.loc[num].to_dict())\n \n saveCSV()\n\ndef saveCSV():\n #saving df as a local csv file\n if not os.path.isdir(folder):\n os.makedirs(folder)\n\n df.to_csv(file_name, encoding='utf-8')\n\ndef postJob(data):\n token = os.environ['ACCESS_TOKEN']\n endpoint = os.environ['API_ENDPOINT']\n\n try:\n x = requests.post(endpoint, headers={\n 'Authorization': 'Bearer {}'.format(token)}, json=data)\n print(x.json())\n except Exception as error:\n print(error)\n pass\n\n\ndef main():\n scrap_jobs()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame" ] ]
qilei123/DAFNe
[ "6ae6c17ecef6b88e21843969e456fc83b34da0fe" ]
[ "dafne/data/datasets/dafne_dataset_mapper.py" ]
[ "#!/usr/bin/env python3\n\nfrom detectron2.data.detection_utils import filter_empty_instances\nfrom detectron2.structures.instances import Instances\nimport numpy as np\nimport torch\n\nfrom detectron2.config import configurable\nfrom detectron2.data import DatasetMapper\nfrom dafne.utils.sort_corners import sort_quadrilateral\n\n\nclass DAFNeDatasetMapper(DatasetMapper):\n def __init__(self, cfg, **kwargs):\n super().__init__(cfg, **kwargs)\n self._cfg = cfg\n\n def __call__(self, dataset_dict):\n result = super().__call__(dataset_dict)\n if \"instances\" in result:\n instances = result[\"instances\"]\n\n\n # Iterate over polygons and check that they are still valid\n if instances.has(\"gt_masks\") and len(instances.gt_masks.polygons) > 0:\n for i, ps in enumerate(instances.gt_masks):\n instances.gt_masks.polygons[i] = [p for p in ps if p.shape[0] == 8]\n\n\n instances = filter_empty_instances(instances, by_mask=True)\n\n if instances.has(\"gt_masks\") and len(instances.gt_masks) > 0:\n gt_masks = np.stack(instances.gt_masks).squeeze(1)\n gt_corners = torch.tensor(gt_masks, dtype=instances.gt_boxes.tensor.dtype)\n\n # Sort corners\n if self._cfg.MODEL.DAFNE.SORT_CORNERS_DATALOADER:\n gt_corners = sort_quadrilateral(gt_corners)\n\n instances.gt_corners = gt_corners\n instances.gt_corners_area = instances.gt_masks.area().float()\n else:\n instances.gt_corners = torch.empty(0, 8)\n instances.gt_corners_area = torch.empty(0)\n result[\"instances\"] = instances\n\n return result\n" ]
[ [ "torch.empty", "numpy.stack", "torch.tensor" ] ]
MaXXXXfeng/flask-scaffolding
[ "5edf6bb0b28dafc22a492ab085d5720a489c92e1" ]
[ "flask_scaffolding/scaffoldings/basic/proj/handlers/main.py" ]
[ "# -*- coding: utf-8 -*-\nimport urllib.parse\n\nfrom flask import Blueprint, request, make_response\nimport pandas as pd\nimport tablib\n\nfrom proj.utils import ok_jsonify, fail_jsonify, check_file_suffix\nfrom proj.tasks.download import download_result\n\nbp_main = Blueprint('main', __name__, url_prefix=None)\n\n\n@bp_main.route('/index')\ndef index():\n return ok_jsonify()\n\n\n@bp_main.route('/files', methods=['POST'])\ndef read_file():\n '''从请求读取文件'''\n if 'file' not in request.files.keys():\n return fail_jsonify(reason='file is null')\n\n file = request.files['file']\n file_name = file.filename\n if not check_file_suffix(file_name):\n return fail_jsonify(reason=\"仅支持xlx/csv/xlsx格式文件\")\n\n if file_name.split('.')[1] == 'csv':\n df = pd.read_csv(file)\n elif file_name.split('.')[1] in ['xlsx', 'xls']:\n df = pd.read_excel(file)\n\n return ok_jsonify()\n\n\n@bp_main.route('/export', methods=['GET'])\ndef export_file():\n '''返回Excel文件'''\n filename = 'test_file'\n\n test_data1 = [['name1', 'age1', 'sex1'], ['name2', 'age2', 'sex2'], ['name3', 'age3', 'sex3']]\n data1 = tablib.Dataset(*test_data1, headers=['name', 'name', 'sex'], title='test1')\n\n test_data2 = [['address1', 'email1'], ['address2', 'email2']]\n data2 = tablib.Dataset(*test_data2, headers=['address', 'email'], title='test2')\n ds = tablib.Databook((data1, data2)) # Databook 用来合并多个sheet , 只有一个sheet可以直接使用 tablib.Dataset\n # 生成文件 并返回\n response = make_response(ds.xls, 200, {'mimetype': 'application/vnd.ms-excel'})\n response.headers['Content-Disposition'] = \"attachment; filename={}.xls\" \\\n .format(urllib.parse.quote(filename))\n return response\n\n\n@bp_main.route('/celery', methods=['POST'])\ndef download_file():\n '''异步下载文件'''\n param = request.json\n download_url = param.get('result_url')\n email = 'tester@gmail.com'\n download_result.delay(download_url, email)\n return ok_jsonify()\n" ]
[ [ "pandas.read_excel", "pandas.read_csv" ] ]
eribean/girth_mcmc
[ "911376eabf693b517683af8372984b589d6f7e09" ]
[ "girth_mcmc/polytomous/multidimensional_pcm.py" ]
[ "import pymc3 as pm\nfrom numpy import linspace, zeros, unique\n\nimport theano\nfrom theano import tensor as tt\n\nfrom girth.multidimensional import initial_guess_md\nfrom girth_mcmc.utils import get_discrimination_indices\nfrom girth_mcmc.distributions import PartialCredit\n\n\n__all__= [\"multidimensional_credit_model\"]\n\n\ndef multidimensional_credit_model(dataset, n_categories, n_factors):\n \"\"\"Defines the mcmc model for the multidimensional partial credit model.\n \n Args:\n dataset: [n_items, n_participants] 2d array of measured responses\n n_categories: (int) number of polytomous values (i.e. Number of Likert Levels)\n n_factors: (int) number of factors to extract\n\n Returns:\n model: PyMC3 model to run\n \"\"\"\n if n_factors < 2:\n raise AssertionError(f\"Multidimensional GRM model requires \"\n f\"two or more factors specified!\")\n\n n_items, n_people = dataset.shape\n n_levels = n_categories - 1\n\n # Need small deviation in offset to\n # fit into pymc framework\n mu_value = linspace(-0.05, 0.05, n_levels)\n\n # Run through 0, K - 1\n observed = dataset - dataset.min()\n\n diagonal_indices, lower_indices = get_discrimination_indices(n_items, n_factors)\n lower_length = lower_indices[0].shape[0]\n\n graded_mcmc_model = pm.Model()\n \n with graded_mcmc_model:\n # Ability Parameters\n ability = pm.Normal(\"Ability\", mu=0, sigma=1, shape=(n_factors, n_people))\n \n # Multidimensional Discrimination\n discrimination = tt.zeros((n_items, n_factors), dtype=theano.config.floatX)\n diagonal_discrimination = pm.Lognormal('Diagonal Discrimination', mu=0, \n sigma=0.25, shape=n_factors)\n lower_discrimination = pm.Normal('Lower Discrimination', sigma=1, \n shape=lower_length)\n discrimination = tt.set_subtensor(discrimination[diagonal_indices], \n diagonal_discrimination)\n\n discrimination = tt.set_subtensor(discrimination[lower_indices], \n lower_discrimination)\n \n # Threshold multilevel prior\n sigma_difficulty = pm.HalfNormal('Difficulty_SD', sigma=1, shape=1)\n for ndx in range(n_items):\n thresholds = pm.Normal(f\"Thresholds{ndx}\", mu=mu_value, \n sigma=sigma_difficulty, shape=n_levels)\n\n # Compute the log likelihood\n kernel = pm.math.dot(discrimination[ndx], ability)\n probabilities = PartialCredit(f'Log_Likelihood{ndx}', cutpoints=thresholds, \n eta=kernel, observed=observed[ndx])\n\n return graded_mcmc_model\n" ]
[ [ "numpy.linspace" ] ]
Shubham18091998/Supply-Bot
[ "971c3615697d5c3137caaa3aa9d96faf91968cb9" ]
[ "SB#2726_Task1/Task1.3/Codes/aruco_generation.py" ]
[ "import numpy as np\nimport cv2\nimport cv2.aruco as aruco\n \n \n'''\n drawMarker(...)\n drawMarker(dictionary, id, sidePixels[, img[, borderBits]]) -> img\n'''\n \naruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_50) #creating aruco dictionary...250 markers and a marker size of 6x6 bits\nprint(aruco_dict)\n# second parameter is id number\n# last parameter is total image size\nimg = aruco.drawMarker(aruco_dict, 25, 600)# 2-- marker id, as the chose dictionary is upto 250...so the id no ranges from 0 to 249....and 700x700 is the pixel size\n# cv2.imwrite(\"Aruco_ids/test_marker11_big.jpg\", img)\nprint (img.shape)\nr,c = img.shape\nr = r+50\nc = c+50\nimg2 = np.ones((r,c), np.uint8)*255\nimg2[25:425,25:425] = img\ncv2.imshow('frame',img2)\ncv2.imwrite(\"aruco25.jpg\", img2)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.ones" ] ]
abdur4373/ROS_depth_pred
[ "63ed4d97df8b49a43aad53c4c6bf01441f05153d" ]
[ "pytorch/resize_test.py" ]
[ "import numpy as np\nimport skimage.io as io\n# from depth import*\nfrom predict import *\nfrom scipy import interpolate\n\n\ndef resize_depth_pred(out_img_pred_np):\n x = [x for x in range(160)]\n y = [y for y in range(128)]\n\n f = interpolate.interp2d(x, y, out_img_pred_np)\n\n xnew = np.linspace(0, 160, 608)\n\n ynew = np.linspace(0, 128, 456)\n\n depth_pred_inter = f(xnew, ynew)\n\n # print(\"Predicted depth values of size 608,456 {0}\".format(depth_pred_inter))\n # print('shape')\n # print(depth_pred_inter.shape)\n depth_pred_inter_diff = depth_pred_inter - np.amin(depth_pred_inter)\n depth_pred_inter_norm = depth_pred_inter_diff / np.amax(depth_pred_inter_diff)\n depth_pred_inter_ = np.empty([456, 608, 3])\n depth_pred_inter_[:, :, 0] = depth_pred_inter_norm[:, :]\n depth_pred_inter_[:, :, 1] = depth_pred_inter_norm[:, :]\n depth_pred_inter_[:, :, 2] = depth_pred_inter_norm[:, :]\n # io.imshow(depth_pred_inter_ / 4.0)\n io.imsave('depth_pred_inter.jpg', depth_pred_inter_)\n\n # io.show()\n\n return depth_pred_inter\n" ]
[ [ "numpy.amax", "numpy.linspace", "numpy.amin", "scipy.interpolate.interp2d", "numpy.empty" ] ]
theompek/Autonomous_Vehicle
[ "5566d54e9379335919841d5dc32c7dd37117d348" ]
[ "src/src/control/src/controller.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import print_function\n\"\"\"\nThe module corresponds to the low level control system of the vehicle, contains the path following algorithms so at to the\nvehicle follows the determined path and the algorithms for the speed control\n\"\"\"\n\n# ==============================================================================\n# --General imports ------------------------------------------------------------\n# ==============================================================================\n\nfrom time import sleep\nimport time\nimport numpy as np\nimport copy\nimport matplotlib.pyplot as plt\nimport math\nimport time\nfrom scipy.signal import butter, lfilter, freqz\nimport random\n\n# ==============================================================================\n# -- ROS imports ---------------------------------------------------------------\n# ==============================================================================\nimport rospy\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))\nfrom control.msg import VehicleCmd\nfrom perception.msg import Object\nfrom perception.srv import EgoVehicleGeometry\nfrom local_path_planner.msg import LocalOptimalPath\nfrom std_msgs.msg import Float64\n\n# ==============================================================================\n# -- Implementation ------------------------------------------------------------\n# ==============================================================================\n# Global variables\nLOOK_AHEAD = 6 # Pure pursuit follow the point that is LOOK_AHEAD points ahead the vehicle\n\n\ndef point_in_distance(x, y, dist=3.0):\n x_c, y_c = x[0], y[0]\n for i in range(1, len(x)):\n if np.hypot(x_c-x[i], y_c-y[i]) > dist:\n return i-1\n\n return len(x)-1\n\n\ndef calc_closer_point(x_base, y_base, x_v, y_v, x0=0, offset=1000000, step=1):\n if len(x_base) != len(y_base):\n return None\n t1 = x0 - offset if (x0 - offset) > 0 else 0\n t2 = x0 + offset if (x0 + offset) < (len(x_base) - 1) else len(x_base)\n\n min_dist = float(\"inf\")\n index = x0\n for i in range(0, t2 - t1, step):\n dist = math.sqrt((x_base[t1 + i] - x_v) ** 2 + (y_base[t1 + i] - y_v) ** 2)\n if min_dist > dist:\n min_dist = dist\n index = t1 + i\n\n return index\n\n\ndef pure_pursuit(tx, ty, vx, vy, v_yaw, L):\n \"\"\"\n :param L: The length between front and rear wheels axis.\n :param v_yaw: The direction of the vehicle related to global coordinates\n :param vy: Vehicle y global position\n :param vx: Vehicle x global position\n :type ty: Target point y global position\n :param tx: Target point x global position\n\n \"\"\"\n # Calculate pure pursuit\n a = math.atan2(ty - vy, tx - vx) - math.radians(v_yaw)\n ld = math.sqrt((ty - vy) ** 2 + (tx - vx) ** 2)\n delta = np.arctan(2 * L * math.sin(a) / ld)\n\n return delta\n\n\nclass SpeedController:\n def __init__(self):\n self.previous_e = 0.0\n self.previous_throttle = 0.0\n self.previous_brake = 0.0\n self.integral_time = 100\n self.saved_errors = [0.0]*self.integral_time\n self.Kp = 0.0025\n self.Kd = 2.2\n self.Ki = 0.00003\n self.Ke = 0.00095\n self.Kbr1 = 0.000035\n self.Kbr2 = 0.00075\n self.prev_time = time.time()\n self.time_step = 0.1\n\n def velocity_control(self, target_speed, current_speed):\n if time.time() - self.prev_time < self.time_step:\n self.prev_time = time.time()\n return self.previous_throttle, self.previous_brake\n # PD controller\n # Proportional\n e = target_speed - current_speed\n self.saved_errors.pop(0)\n self.saved_errors.append(e)\n integral = sum(self.saved_errors)\n # Derivative\n de = e - self.previous_e\n self.previous_e = e\n Uk = self.Ke*(self.Kp*e + self.Kd*de + self.Ki*integral)\n if 0.0 < abs(e) < 2.0:\n e = 0.0\n v_con_thr = self.previous_throttle + abs(e)*Uk\n v_con_br = 0.0\n if e < 0.0:\n v_con_thr = self.previous_throttle - abs(e*Uk)\n v_con_br = self.previous_brake + self.Kbr1*abs(e*Uk)*current_speed**2\n if target_speed < 3.0 and e < 0:\n v_con_thr = 0.0\n v_con_br = self.previous_brake + self.Kbr2*abs(e*Uk)*current_speed**2\n if target_speed == 0.0 and current_speed < 1:\n v_con_thr = 0.0\n v_con_br = 1\n\n v_con_thr = min(max(v_con_thr, 0.0), 1.0)\n v_con_br = min(max(v_con_br, 0.0), 1.0)\n self.previous_throttle = v_con_thr\n self.previous_brake = v_con_br\n return v_con_thr, v_con_br\n\n\nclass LP_Filter:\n def __init__(self):\n # Filter requirements.\n self.prev_time = time.time()\n self.data_length_throttle = 55\n self.data_length_brake = 10\n self.order = 2\n self.fs = 1200.0 # sample rate, Hz\n self.cutoff = 2.0 # desired cutoff frequency of the filter, Hz\n\n def butter_low_pass(self):\n self.fs = (self.fs + 1/(time.time()-self.prev_time+0.000001))/2\n self.cutoff = self.fs / 10\n self.prev_time = time.time()\n nyq = 0.5 * self.fs\n normal_cutoff = self.cutoff / nyq\n b, a = butter(self.order, normal_cutoff, btype='low', analog=False)\n return b, a\n\n def butter_low_pass_filter(self, data):\n b, a = self.butter_low_pass()\n y = lfilter(b, a, data)\n # print(self.fs)\n return min(max(0, y[-1]), 1)\n\n\nclass Control:\n def __init__(self):\n # --- General---\n self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data_info_save.txt\")\n with open(self.file_path, \"a+\") as file:\n file.truncate()\n self.steer_angle = 0.0\n self.veh_throttle = 0.0\n self.veh_brake = 0.0\n self.speed_controller = SpeedController()\n self.lp_filter = LP_Filter()\n self.throttle_data = [0]*self.lp_filter.data_length_throttle\n self.brake_data = [0]*self.lp_filter.data_length_brake\n self.last_simulator_time = 0.0\n self.current_simulator_time = 0.0\n # Save info for plots\n self.prev_time = time.time()\n self.time_step = 0.1\n self.dist_step = 0.1\n self.prev_veh_pos = [0, 0]\n\n # Distance between front and rear wheels and max steering angle\n self.L, self.max_angle = 2.0, 70.0\n # --- ROS ---\n rospy.init_node('Controller_node', anonymous=True)\n self.ego_vehicle = Object()\n self.optimal_local_path = LocalOptimalPath()\n self.subscriber_ego_vehicle = rospy.Subscriber(\"ego_vehicle_msg\", Object, self.callback_ego_vehicle, queue_size=1)\n self.subscriber_optimal_local_path = rospy.Subscriber(\"optimal_local_path_msg\", LocalOptimalPath,\n self.callback_optimal_local_path, queue_size=1)\n self.subscriber_simulator_time_instance = rospy.Subscriber(\"simulator_time_instance_msg\", Float64,\n self.callback_simulator_time_instance, queue_size=1)\n self.last_simulator_time = self.current_simulator_time\n self.client_ego_vehicle_geometry()\n self.pub_vehicle_controller = rospy.Publisher('vehicle_controller_msg', VehicleCmd, queue_size=1)\n\n while len(self.optimal_local_path.x) == 0 and not rospy.is_shutdown():\n print(\"Control node here, please wait the local path planning system .....\")\n sleep(0.2)\n\n def control_vehicle(self):\n optimal_local_path = copy.deepcopy(self.optimal_local_path)\n emergency_stop = optimal_local_path.emergency_stop\n #print(\"emergency_stop\", emergency_stop)\n ego_vehicle = copy.deepcopy(self.ego_vehicle)\n target_speed_id = optimal_local_path.direct_target_speed * 3.6 if optimal_local_path.direct_control else optimal_local_path.s_d[-1] * 3.6\n speed_look_ahead = int(target_speed_id / 10.0)\n id_point = calc_closer_point(optimal_local_path.x, optimal_local_path.y, ego_vehicle.x, ego_vehicle.y) + LOOK_AHEAD + speed_look_ahead\n id_point = len(optimal_local_path.x)-1 if id_point >= len(optimal_local_path.x) else id_point\n if id_point <= 0:\n self.veh_throttle = 0.0\n self.veh_brake = 1.0\n self.steer_angle = 0.0\n self.publish_vehicle_control()\n return True\n # Calculate pure pursuit steering angle\n delta = pure_pursuit(optimal_local_path.x[id_point], optimal_local_path.y[id_point], ego_vehicle.x, ego_vehicle.y, ego_vehicle.yaw, self.L)\n steer_angle = np.degrees(delta)\n steer_angle = self.max_angle if steer_angle > self.max_angle else steer_angle\n steer_angle = -self.max_angle if steer_angle < -self.max_angle else steer_angle\n self.steer_angle = steer_angle / self.max_angle\n # Calculate throttle and brake for velocity keeping with PID controller\n target_speed = optimal_local_path.direct_target_speed*3.6 if optimal_local_path.direct_control else optimal_local_path.s_d[id_point] * 3.6 # m/s to Km/s\n current_speed = ego_vehicle.speed * 3.6\n self.veh_throttle, self.veh_brake = self.speed_controller.velocity_control(target_speed, current_speed)\n self.throttle_data = self.throttle_data[1:] + [self.veh_throttle]\n self.veh_throttle = self.lp_filter.butter_low_pass_filter(self.throttle_data)\n self.throttle_data[-1] = self.veh_throttle\n #self.throttle_data = self.throttle_data[1:] + [self.veh_throttle]\n self.brake_data = self.brake_data[1:] + [self.veh_brake]\n self.veh_brake = self.lp_filter.butter_low_pass_filter(self.brake_data)\n #self.brake_data = self.brake_data[1:] + [self.veh_brake]\n if emergency_stop or (target_speed == 0.0 and current_speed < 2):\n self.veh_throttle = 0.0\n self.veh_brake = 1.0\n self.steer_angle = 0.0\n if target_speed == 0.0 and current_speed < 8.0:\n self.steer_angle = 0.0\n self.publish_vehicle_control()\n # Save info for plots\n '''\n if time.time() - self.prev_time > self.time_step and False:\n self.prev_time = time.time()\n output = [str(ob_i) for ob_i in [round(target_speed, 2), round(current_speed, 2), round(self.veh_throttle, 2), round(self.veh_brake, 2)]]\n output_str = \"\"\n for i, st_i in enumerate(output):\n output_str += st_i + \",\" if i != len(output)-1 else st_i\n with open(self.file_path, \"a+\") as file:\n file.write(output_str + \"\\n\")\n if math.hypot(self.prev_veh_pos[0]-ego_vehicle.x, self.prev_veh_pos[1]-ego_vehicle.y) > self.dist_step:\n self.prev_veh_pos = [ego_vehicle.x, ego_vehicle.y]\n # output = [str(ob_i) for ob_i in [round(target_speed, 2), round(current_speed, 2), round(self.veh_throttle, 2), round(self.veh_brake, 2)]]\n output = [str(ob_i) for ob_i in\n [round(optimal_local_path.x[id_point], 2), round(optimal_local_path.y[id_point], 2),\n round(ego_vehicle.x, 2), round(ego_vehicle.y, 2)]]\n output_str = \"\"\n for i, st_i in enumerate(output):\n output_str += st_i + \",\" if i != len(output) - 1 else st_i\n with open(self.file_path, \"a+\") as file:\n file.write(output_str + \"\\n\")'''\n '''\n if draw_point_flag:\n self.local_map.draw_paths(paths=[[carla.Location(x=ego_vehicle.x, y=ego_vehicle.y)]], life_time=0.2,\n color=[250, 250, 0], same_color=True)\n self.local_map.draw_paths(paths=[[carla.Location(x=optimal_frenet_path.x[id_point], y=optimal_frenet_path.y[id_point])]]\n , life_time=0.2, color=[250, 250, 0], same_color=True)\n '''\n return True\n\n # -------- ROS functions ---------\n def callback_ego_vehicle(self, ros_data):\n self.ego_vehicle = ros_data\n #rospy.loginfo(ros_data)\n\n def callback_simulator_time_instance(self, ros_data):\n self.current_simulator_time = ros_data.data\n #rospy.loginfo(ros_data)\n\n def callback_optimal_local_path(self, ros_data):\n self.optimal_local_path = ros_data\n #rospy.loginfo(ros_data)\n\n def client_ego_vehicle_geometry(self):\n rospy.wait_for_service('ego_vehicle_geometry_srv')\n try:\n vehicle_geometry = rospy.ServiceProxy('ego_vehicle_geometry_srv', EgoVehicleGeometry)\n resp1 = vehicle_geometry(0)\n #rospy.loginfo(resp1)\n self.L, self.max_angle = resp1.L, resp1.max_angle\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\" % e)\n\n def publish_vehicle_control(self):\n pub = self.pub_vehicle_controller\n vehicle_controller = VehicleCmd()\n vehicle_controller.steer_angle = self.steer_angle\n vehicle_controller.veh_throttle = self.veh_throttle\n vehicle_controller.veh_brake = self.veh_brake\n #rospy.loginfo(vehicle_controller)\n pub.publish(vehicle_controller)\n\n\n'''\ndef client_apply_vehicle_control(throttle, steer_angle, brake):\n rospy.wait_for_service('vehicle_controller_srv')\n try:\n vehicle_controller = rospy.ServiceProxy('vehicle_controller_srv', VehicleController)\n control_values = VehicleControllerRequest()\n control_values.steer_angle = steer_angle\n control_values.veh_throttle = throttle\n control_values.veh_brake = brake\n resp1 = vehicle_controller(control_values)\n # rospy.loginfo(resp1)\n return resp1\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\" % e)\n '''\n\nif __name__ == '__main__':\n #sleep(1)\n controller = Control()\n try:\n while not rospy.is_shutdown():\n controller.control_vehicle()\n except rospy.ROSInterruptException:\n print(3)\n pass\n" ]
[ [ "scipy.signal.lfilter", "scipy.signal.butter", "numpy.hypot", "numpy.degrees" ] ]
FlySkyPigg/ShapeSegmentation
[ "1969de6ea884aa500cf6082e296cb986a2ac7c41" ]
[ "scripts_closerlook3d/eval_shapenetpart_ssg_pointnetpp.py" ]
[ "\"\"\"\nEvaluate the trained model and compute iou.\n\nAsk, what is iou? oh, my god.\n\nThe evaluation is proceeded in two-folds.\n1. Semantic class level iou.\n2. Instance level iou.\nAnd therefore we need to compute the semantic confusion matrix for each object.\n\"\"\"\n\nfrom datasets.ShapeNetPart import ShapeNetPartSeg\nimport argparse\nfrom torch.utils.data import DataLoader\nfrom models.pointnet2_part_seg_ssg import get_model\nimport torch\nimport os\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser('ShapeNetPart part-segmentation training')\nparser.add_argument('--data_root', type=str, default='data', metavar='PATH', help='root director of dataset')\nparser.add_argument('--num_workers', type=int, default=4, help='num of workers to use')\nparser.add_argument('--batch_size', type=int, default=32, help='batch_size')\nparser.add_argument('--base_lr', type=float, default=1e-3, help='base learning rate')\nparser.add_argument('--max_epoch', type=int, default=1000, help='number of training epochs')\nparser.add_argument('--epochs_per_eval', type=int, default=10, help='number of training epochs')\nparser.add_argument('--epochs_per_save', type=int, default=100, help='number of training epochs')\nparser.add_argument('--experiment_name', type=str, default='output_seg_ssg_pointnetpp')\nparser.add_argument('--checkpoint', type=str, default='200.pth')\n\nargs = parser.parse_args()\n\nnum_sem_class = 6\nnum_categories = 16\n\nif __name__ == \"__main__\":\n train_dataset = ShapeNetPartSeg(num_points=2048, data_root=args.data_root, categories=['Table'], split='train')\n train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=False)\n\n test_dataset = ShapeNetPartSeg(num_points=2048, data_root=args.data_root, categories=['Table'], split='test')\n test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False)\n\n # no point cloud normal.\n net = get_model(num_classes=num_sem_class, normal_channel=False).cuda()\n net.load_state_dict(torch.load(os.path.join(args.experiment_name, args.checkpoint)))\n net.eval()\n\n # sucheng: we can use data augmentation and voting to cope with this problem.\n conf_mats = []\n for pcs, masks, point_labels, labels in tqdm(train_dataloader):\n onehot_labels = torch.zeros(size=(labels.shape[0], num_categories))\n onehot_labels = onehot_labels.scatter_(1, torch.unsqueeze(labels, dim=1), 1).cuda()\n\n pcs = pcs.cuda()\n pcs = pcs.transpose(1, 2)\n\n with torch.no_grad():\n pred_sem, global_feat = net(pcs, onehot_labels)\n\n batch_logits = []\n batch_points_labels = []\n batch_shape_labels = []\n batch_masks = []\n\n point_labels = point_labels.cuda()\n\n for batch_idx in range(pred_sem.shape[0]):\n this_pred_sem = pred_sem[batch_idx]\n this_point_labels = point_labels[batch_idx]\n\n this_pred_sem = torch.argmax(this_pred_sem, dim=1).cpu().numpy()\n this_point_labels = this_point_labels.cpu().numpy()\n conf_mat = confusion_matrix(this_point_labels, this_pred_sem, labels=np.arange(num_sem_class))\n conf_mats.append(conf_mat)\n\n # let us compute instance level of confusion matrix's mIOU.\n # therefore, we compute iou for each semantic class and take the average.\n # 1. true positive.\n # 2. the number of points that is labeled as some semantic class.\n # 3. the number of points that is predicted as some semantic class.\n ious = []\n for mat in conf_mats:\n tp = np.diagonal(mat)\n label = np.sum(mat, axis=1)\n pred = np.sum(mat, axis=0)\n iou = tp / (label + pred - tp + 1e-6)\n ious.append(iou)\n instance_miou = np.mean(np.stack(ious, axis=0), axis=0)\n\n # semantic class level mIOU.\n # why not represent it as a confusion matrix?\n conf_mat = np.sum(np.stack(conf_mats, axis=0), axis=0)\n conf_mat = conf_mat / (np.sum(conf_mat, axis=1, keepdims=True) + 1e-6)\n print('training set instance iou\\t', instance_miou)\n print('training set semantic class conf mat\\t', conf_mat)\n\n conf_mats = []\n for pcs, masks, point_labels, labels in tqdm(test_dataloader):\n onehot_labels = torch.zeros(size=(labels.shape[0], num_categories))\n onehot_labels = onehot_labels.scatter_(1, torch.unsqueeze(labels, dim=1), 1).cuda()\n\n pcs = pcs.cuda()\n pcs = pcs.transpose(1, 2)\n\n with torch.no_grad():\n pred_sem, global_feat = net(pcs, onehot_labels)\n\n batch_logits = []\n batch_points_labels = []\n batch_shape_labels = []\n batch_masks = []\n\n point_labels = point_labels.cuda()\n\n for batch_idx in range(pred_sem.shape[0]):\n this_pred_sem = pred_sem[batch_idx]\n this_point_labels = point_labels[batch_idx]\n\n this_pred_sem = torch.argmax(this_pred_sem, dim=1).cpu().numpy()\n this_point_labels = this_point_labels.cpu().numpy()\n conf_mat = confusion_matrix(this_point_labels, this_pred_sem, labels=np.arange(num_sem_class))\n conf_mats.append(conf_mat)\n\n # let us compute instance level of confusion matrix's mIOU.\n # therefore, we compute iou for each semantic class and take the average.\n # 1. true positive.\n # 2. the number of points that is labeled as some semantic class.\n # 3. the number of points that is predicted as some semantic class.\n ious = []\n for mat in conf_mats:\n tp = np.diagonal(mat)\n label = np.sum(mat, axis=1)\n pred = np.sum(mat, axis=0)\n iou = tp / (label + pred - tp + 1e-6)\n ious.append(np.mean(iou))\n instance_miou = np.mean(np.array(ious))\n\n # semantic class level mIOU.\n # why not represent it as a confusion matrix?\n conf_mat = np.sum(np.stack(conf_mats, axis=0), axis=0)\n conf_mat = conf_mat / (np.sum(conf_mat, axis=1, keepdims=True) + 1e-6)\n print('testing set instance iou\\n', instance_miou)\n print('testing set semantic class conf mat\\n', conf_mat)\n" ]
[ [ "torch.zeros", "numpy.arange", "torch.utils.data.DataLoader", "numpy.stack", "torch.unsqueeze", "torch.no_grad", "numpy.mean", "numpy.array", "numpy.diagonal", "numpy.sum", "torch.argmax" ] ]
csj777/Captcha_Recognition
[ "6eb5aebe817305783560a623f50fdcba9166f2c6" ]
[ "predict.py" ]
[ "from keras.models import load_model\nfrom image_fit import resize_to_fit\nfrom imutils import paths\nimport numpy as np\nimport imutils\nimport cv2\nimport pickle\nimport os\n# import matplotlib.pyplot as plt\n\n\n# 计算邻域非白色个数\ndef calculate_noise_count(img_obj, w, h):\n \"\"\"\n 计算邻域非白色的个数\n Args:\n img_obj: img obj\n w: width\n h: height\n Returns:\n count (int)\n \"\"\"\n count = 0\n width, height, s = img_obj.shape\n for _w_ in [w - 1, w, w + 1]:\n for _h_ in [h - 1, h, h + 1]:\n if _w_ > width - 1:\n continue\n if _h_ > height - 1:\n continue\n if _w_ == w and _h_ == h:\n continue\n if (img_obj[_w_, _h_, 0] < 233) or (img_obj[_w_, _h_, 1] <\n 233) or (img_obj[_w_, _h_, 2] <\n 233):\n count += 1\n return count\n\n\n# k邻域降噪\ndef operate_img(img, k):\n w, h, s = img.shape\n # 从高度开始遍历\n for _w in range(w):\n # 遍历宽度\n for _h in range(h):\n if _h != 0 and _w != 0 and _w < w - 1 and _h < h - 1:\n if calculate_noise_count(img, _w, _h) < k:\n img.itemset((_w, _h, 0), 255)\n img.itemset((_w, _h, 1), 255)\n img.itemset((_w, _h, 2), 255)\n\n return img\n\n\ndef around_white(img):\n w, h, s = img.shape\n for _w in range(w):\n for _h in range(h):\n if (_w <= 5) or (_h <= 5) or (_w >= w - 5) or (_h >= h - 5):\n img.itemset((_w, _h, 0), 255)\n img.itemset((_w, _h, 1), 255)\n img.itemset((_w, _h, 2), 255)\n return img\n\n\n# 邻域非同色降噪\ndef noise_unsome_piexl(img):\n '''\n 查找像素点上下左右相邻点的颜色,如果是非白色的非像素点颜色,则填充为白色\n :param img:\n :return:\n '''\n # print(img.shape)\n w, h, s = img.shape\n for _w in range(w):\n for _h in range(h):\n if _h != 0 and _w != 0 and _w < w - 1 and _h < h - 1: # 剔除顶点、底点\n center_color = img[_w, _h] # 当前坐标颜色\n # print(center_color)\n top_color = img[_w, _h + 1]\n bottom_color = img[_w, _h - 1]\n left_color = img[_w - 1, _h]\n right_color = img[_w + 1, _h]\n cnt = 0\n if all(top_color == center_color):\n cnt += 1\n if all(bottom_color == center_color):\n cnt += 1\n if all(left_color == center_color):\n cnt += 1\n if all(right_color == center_color):\n cnt += 1\n if cnt < 1:\n img.itemset((_w, _h, 0), 255)\n img.itemset((_w, _h, 1), 255)\n img.itemset((_w, _h, 2), 255)\n return img\n\n\nMODEL_FILENAME = \"captcha_model.hdf5\"\nMODEL_LABELS_FILENAME = \"model_labels.dat\"\nCAPTCHA_IMAGE_FOLDER = \"train\"\n\n# 加载模型标签(以便我们可以将模型预测转换为实际字母)\nwith open(MODEL_LABELS_FILENAME, \"rb\") as f:\n lb = pickle.load(f)\n\n# 加载训练好的神经网络\nmodel = load_model(MODEL_FILENAME)\n\n# 随机获取一些验证码图像进行测试。\ncaptcha_image_files = list(paths.list_images(CAPTCHA_IMAGE_FOLDER))\ncaptcha_image_files = np.random.choice(captcha_image_files, size=(20, ), replace=False)\n# captcha_image_files = ['1/1.jpg']\n\ncount1 = 0\ntotal = 0\n# 在图像路径上循环\nfor image_file in captcha_image_files:\n # 加载图像并将其转换为灰度\n image = cv2.imread(image_file)\n # plt.subplot(121), plt.imshow(image)\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # 在图像周围添加一些额外的填充\n # image = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_REPLICATE)\n\n # 设置图像阈值(将其转换为纯黑白)\n thresh = cv2.threshold(image, 180, 255, cv2.THRESH_BINARY)[1]\n for i in range(2):\n thresh = operate_img(thresh, 4)\n img = around_white(thresh)\n img = noise_unsome_piexl(img)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gray = cv2.copyMakeBorder(gray, 8, 8, 8, 8, cv2.BORDER_REPLICATE)\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n # plt.subplot(122), plt.imshow(thresh)\n # plt.show()\n # 找到图像的轮廓(连续的像素点)\n contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # Hack与不同OpenCV版本的兼容性\n # contours = contours[0] if imutils.is_cv2() else contours[1]\n contours = contours[1] if imutils.is_cv3() else contours[0]\n\n letter_image_regions = []\n\n # 现在我们可以遍历四个轮廓中的每一个并提取每个轮廓中的字母\n for contour in contours:\n # 获取包含轮廓的矩形\n (x, y, w, h) = cv2.boundingRect(contour)\n if w + h < 40 or h < 10 or w < 5:\n continue\n # 比较轮廓的宽度和高度以检测连成一块的字母\n '''if w / h > 0.8 and len(letter_image_regions) < 4:\n # 这个轮廓太宽了,不可能是一个字母!把它分成两个字母区域\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))'''\n # 这是一封普通的字母\n letter_image_regions.append((x, y, w, h))\n\n # 如果我们在验证码中发现多于或少于4个字母,我们的字母提取工作不正常。跳过图像而不是保存错误的训练数据!\n if len(letter_image_regions) != 4:\n size_regions = len(letter_image_regions)\n if size_regions < 4:\n if size_regions == 0:\n continue\n elif size_regions == 1:\n (x, y, w, h) = letter_image_regions.pop()\n half_width = int(w / 4)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n letter_image_regions.append(\n (x + 2 * half_width, y, half_width, h))\n letter_image_regions.append(\n (x + 3 * half_width, y, half_width, h))\n elif size_regions == 2:\n letter_image_regions = sorted(letter_image_regions, key=lambda w: w[0])\n (x1, y1, w1, h1) = letter_image_regions.pop()\n (x2, y2, w2, h2) = letter_image_regions.pop()\n if w1 > 2 * w2:\n half_width = int(w1 / 3)\n letter_image_regions.append((x2, y2, w2, h2))\n letter_image_regions.append((x1, y1, half_width, h1))\n letter_image_regions.append(\n (x1 + half_width, y1, half_width, h1))\n letter_image_regions.append(\n (x1 + 2 * half_width, y1, half_width, h1))\n else:\n half_width = int(w1 / 2)\n letter_image_regions.append((x1, y1, half_width, h1))\n letter_image_regions.append(\n (x1 + half_width, y1, half_width, h1))\n half_width = int(w2 / 2)\n letter_image_regions.append((x2, y2, half_width, h2))\n letter_image_regions.append(\n (x2 + half_width, y2, half_width, h2))\n elif size_regions == 3:\n letter_image_regions = sorted(letter_image_regions, key=lambda w: w[0])\n (x, y, w, h) = letter_image_regions.pop()\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n else:\n letter_image_regions = sorted(letter_image_regions, reverse=True, key=lambda w: w[0])\n for i in range(size_regions - 4):\n (x, y, w, h) = letter_image_regions.pop()\n for i in range(3):\n (x1, y1, w1, h1) = letter_image_regions.pop()\n if w1 / w < 1.5:\n break\n else:\n x = x1, y = y1, w = w1, h = h1\n letter_image_regions.append((x1, y1, w1, h1))\n size_regions = len(letter_image_regions)\n if size_regions < 4:\n if size_regions == 0:\n continue\n elif size_regions == 1:\n (x, y, w, h) = letter_image_regions.pop()\n half_width = int(w / 4)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n letter_image_regions.append(\n (x + 2 * half_width, y, half_width, h))\n letter_image_regions.append(\n (x + 3 * half_width, y, half_width, h))\n elif size_regions == 2:\n letter_image_regions = sorted(letter_image_regions, key=lambda w: w[0])\n (x1, y1, w1, h1) = letter_image_regions.pop()\n (x2, y2, w2, h2) = letter_image_regions.pop()\n if w1 > 2 * w2:\n half_width = int(w1 / 3)\n letter_image_regions.append((x2, y2, w2, h2))\n letter_image_regions.append((x1, y1, half_width, h1))\n letter_image_regions.append(\n (x1 + half_width, y1, half_width, h1))\n letter_image_regions.append(\n (x1 + 2 * half_width, y1, half_width, h1))\n else:\n half_width = int(w1 / 2)\n letter_image_regions.append((x1, y1, half_width, h1))\n letter_image_regions.append(\n (x1 + half_width, y1, half_width, h1))\n half_width = int(w2 / 2)\n letter_image_regions.append((x2, y2, half_width, h2))\n letter_image_regions.append(\n (x2 + half_width, y2, half_width, h2))\n elif size_regions == 3:\n letter_image_regions = sorted(letter_image_regions, key=lambda w: w[0])\n (x, y, w, h) = letter_image_regions.pop()\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n total = total + 4\n # 根据x坐标对检测到的字母图像进行排序,以确保从左到右进行处理,以便将图像与字母匹配\n letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])\n\n # 创建一个输出图像和一个列表来保存我们预测的字母\n output = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n output = cv2.merge([output] * 3)\n predictions = []\n\n # 循环\n for letter_bounding_box in letter_image_regions:\n # 获取图像中字母的坐标\n x, y, w, h = letter_bounding_box\n if y > 2:\n y = y - 2\n else:\n y = 0\n if x > 2:\n x = x - 2\n else:\n x = 0\n # 从边缘有2个像素边距的原始图像中提取字母\n letter_image = gray[y:y + h + 4, x:x + w + 4]\n if letter_image.size == 0:\n continue\n\n # 将字母图像的大小重新调整为20x20像素,以匹配训练数据\n letter_image = resize_to_fit(letter_image, 20, 20)\n\n # 将单个图像转换为4d图像列表以适配Keras\n letter_image = np.expand_dims(letter_image, axis=2)\n letter_image = np.expand_dims(letter_image, axis=0)\n\n # 让神经网络做出预测\n prediction = model.predict(letter_image)\n\n # 将一个独热编码预测转换回正常字母\n letter = lb.inverse_transform(prediction)[0]\n predictions.append(letter)\n\n # 在输出图像上绘制预测\n cv2.rectangle(output, (x, y), (x + w + 4, y + h + 4), (0, 255, 0), 1)\n cv2.putText(output, letter, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)\n\n # 打印验证码文本\n count2 = 0\n captcha_text = \"\".join(predictions)\n print(\"CAPTCHA text is: {}\".format(captcha_text))\n captcha_correct_text = os.path.splitext(image_file)[0][-4:]\n print(\"CORRECT text is: {}\".format(captcha_correct_text))\n for i in range(4):\n if captcha_correct_text[i] == captcha_text[i]:\n count1 += 1\n count2 += 1\n print(\"Correct Number:{}\".format(count2))\n\nprint(\"Total Correct Numbers:{}\".format(count1))\nprint(\"Accuracy:{}\".format(count1/total))\n# 显示带批注的图像\n# cv2.imshow(\"Output\", output)\n# cv2.waitKey()\n" ]
[ [ "numpy.expand_dims", "numpy.random.choice" ] ]
ankane/faiss
[ "2f7fcd8766d348391f080d060dedb839b9f99c23" ]
[ "test/support/pca.py" ]
[ "import faiss\nimport numpy as np\n\nmt = np.random.rand(1000, 40).astype('float32')\nmat = faiss.PCAMatrix(40, 10)\nmat.train(mt)\ntr = mat.apply_py(mt)\n\nprint((tr ** 2).sum(0))\n" ]
[ [ "numpy.random.rand" ] ]
didriknielsen/argmax_flows
[ "4ffff4bd6f7b25e20292eff6bad2bf5a962e8d39" ]
[ "data/vocab.py" ]
[ "import os\nimport json\nimport warnings\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\n\nclass Vocab():\n\n def __init__(self, stoi={}):\n self.fill(stoi)\n\n def fill(self, stoi):\n self.stoi = stoi\n self.itos = {i:s for s,i in stoi.items()}\n\n def save_json(self, path):\n if not os.path.exists(path): os.makedirs(path)\n vocab_file = os.path.join(path, 'vocab.json')\n with open(vocab_file, 'w') as f:\n json.dump(self.stoi, f, indent=4)\n\n def load_json(self, path):\n vocab_file = os.path.join(path, 'vocab.json')\n with open(vocab_file, 'r') as f:\n stoi = json.load(f)\n self.fill(stoi)\n\n def string_to_idx(self, string):\n assert isinstance(string, str)\n return [self.stoi[s] for s in string]\n\n def idx_to_string(self, idx):\n assert isinstance(idx, list)\n count_err = np.sum([1 for i in idx if i not in self.itos])\n if count_err > 0:\n print(f'Warning, {count_err} decodings were not in vocab.')\n print(set([i for i in idx if i not in self.itos]))\n return ''.join([self.itos[i] for i in idx if i in self.itos])\n\n def encode(self, text, padding_value=0):\n assert isinstance(text, list)\n length = torch.tensor([len(string) for string in text])\n tensor_list = [torch.tensor(self.string_to_idx(string)) for string in text]\n tensor = nn.utils.rnn.pad_sequence(tensor_list, batch_first=True, padding_value=padding_value)\n return tensor, length\n\n def decode(self, tensor, length):\n assert torch.is_tensor(tensor)\n assert tensor.dim() == 2, 'Tensor should have shape (batch_size, seq_len)'\n text = [self.idx_to_string(tensor[b][:length[b]].tolist()) for b in range(tensor.shape[0])]\n return text\n" ]
[ [ "torch.is_tensor", "torch.nn.utils.rnn.pad_sequence", "numpy.sum" ] ]
atreyasha/mimic3_benchmarks_occlusion
[ "b80a92463757259fa9cba774b03d8b5b82fe863d" ]
[ "utils/readers.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport random\n\n\nclass Reader(object):\n def __init__(self, dataset_dir, listfile=None):\n self._dataset_dir = dataset_dir\n self._current_index = 0\n if listfile is None:\n listfile_path = os.path.join(dataset_dir, \"listfile.csv\")\n else:\n listfile_path = listfile\n with open(listfile_path, \"r\") as lfile:\n self._data = lfile.readlines()\n self._listfile_header = self._data[0]\n self._data = self._data[1:]\n\n def get_number_of_examples(self):\n return len(self._data)\n\n def random_shuffle(self, seed=None):\n if seed is not None:\n random.seed(seed)\n random.shuffle(self._data)\n\n def read_example(self, index):\n raise NotImplementedError()\n\n def read_next(self):\n to_read_index = self._current_index\n self._current_index += 1\n if self._current_index == self.get_number_of_examples():\n self._current_index = 0\n return self.read_example(to_read_index)\n\n\nclass DecompensationReader(Reader):\n def __init__(self, dataset_dir, listfile=None):\n \"\"\" Reader for decompensation prediction task.\n :param dataset_dir: Directory where timeseries files are stored.\n :param listfile: Path to a listfile. If this parameter is left `None` then\n `dataset_dir/listfile.csv` will be used.\n \"\"\"\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(x, float(t), int(y)) for (x, t, y) in self._data]\n\n def _read_timeseries(self, ts_filename, time_bound):\n ret = []\n with open(os.path.join(self._dataset_dir, ts_filename), \"r\") as tsfile:\n header = tsfile.readline().strip().split(',')\n assert header[0] == \"Hours\"\n for line in tsfile:\n mas = line.strip().split(',')\n t = float(mas[0])\n if t > time_bound + 1e-6:\n break\n ret.append(np.array(mas))\n return (np.stack(ret), header)\n\n def read_example(self, index):\n \"\"\" Read the example with given index.\n\n :param index: Index of the line of the listfile to read (counting starts from 0).\n :return: Directory with the following keys:\n X : np.array\n 2D array containing all events. Each row corresponds to a moment.\n First column is the time and other columns correspond to different\n variables.\n t : float\n Length of the data in hours. Note, in general, it is not equal to the\n timestamp of last event.\n y : int (0 or 1)\n Mortality within next 24 hours.\n header : array of strings\n Names of the columns. The ordering of the columns is always the same.\n name: Name of the sample.\n \"\"\"\n if index < 0 or index >= len(self._data):\n raise ValueError(\n \"Index must be from 0 (inclusive) to number of examples (exclusive).\"\n )\n\n name = self._data[index][0]\n t = self._data[index][1]\n y = self._data[index][2]\n (X, header) = self._read_timeseries(name, t)\n\n return {\"X\": X, \"t\": t, \"y\": y, \"header\": header, \"name\": name}\n\n\nclass InHospitalMortalityReader(Reader):\n def __init__(self, dataset_dir, listfile=None, period_length=48.0):\n \"\"\" Reader for in-hospital moratality prediction task.\n\n :param dataset_dir: Directory where timeseries files are stored.\n :param listfile: Path to a listfile. If this parameter is left `None` then\n `dataset_dir/listfile.csv` will be used.\n :param period_length: Length of the period (in hours) from which the prediction is done.\n \"\"\"\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(x, int(y)) for (x, y) in self._data]\n self._period_length = period_length\n\n def _read_timeseries(self, ts_filename):\n ret = []\n with open(os.path.join(self._dataset_dir, ts_filename), \"r\") as tsfile:\n header = tsfile.readline().strip().split(',')\n assert header[0] == \"Hours\"\n for line in tsfile:\n mas = line.strip().split(',')\n ret.append(np.array(mas))\n return (np.stack(ret), header)\n\n def read_example(self, index):\n \"\"\" Reads the example with given index.\n\n :param index: Index of the line of the listfile to read (counting starts from 0).\n :return: Dictionary with the following keys:\n X : np.array\n 2D array containing all events. Each row corresponds to a moment.\n First column is the time and other columns correspond to different\n variables.\n t : float\n Length of the data in hours. Note, in general, it is not equal to the\n timestamp of last event.\n y : int (0 or 1)\n In-hospital mortality.\n header : array of strings\n Names of the columns. The ordering of the columns is always the same.\n name: Name of the sample.\n \"\"\"\n if index < 0 or index >= len(self._data):\n raise ValueError(\n \"Index must be from 0 (inclusive) to number of lines (exclusive).\"\n )\n\n name = self._data[index][0]\n t = self._period_length\n y = self._data[index][1]\n (X, header) = self._read_timeseries(name)\n\n return {\"X\": X, \"t\": t, \"y\": y, \"header\": header, \"name\": name}\n\n\nclass LengthOfStayReader(Reader):\n def __init__(self, dataset_dir, listfile=None):\n \"\"\" Reader for length of stay prediction task.\n\n :param dataset_dir: Directory where timeseries files are stored.\n :param listfile: Path to a listfile. If this parameter is left `None` then\n `dataset_dir/listfile.csv` will be used.\n \"\"\"\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(x, float(t), float(y)) for (x, t, y) in self._data]\n\n def _read_timeseries(self, ts_filename, time_bound):\n ret = []\n with open(os.path.join(self._dataset_dir, ts_filename), \"r\") as tsfile:\n header = tsfile.readline().strip().split(',')\n assert header[0] == \"Hours\"\n for line in tsfile:\n mas = line.strip().split(',')\n t = float(mas[0])\n if t > time_bound + 1e-6:\n break\n ret.append(np.array(mas))\n return (np.stack(ret), header)\n\n def read_example(self, index):\n \"\"\" Reads the example with given index.\n\n :param index: Index of the line of the listfile to read (counting starts from 0).\n :return: Dictionary with the following keys:\n X : np.array\n 2D array containing all events. Each row corresponds to a moment.\n First column is the time and other columns correspond to different\n variables.\n t : float\n Length of the data in hours. Note, in general, it is not equal to the\n timestamp of last event.\n y : float\n Remaining time in ICU.\n header : array of strings\n Names of the columns. The ordering of the columns is always the same.\n name: Name of the sample.\n \"\"\"\n if index < 0 or index >= len(self._data):\n raise ValueError(\n \"Index must be from 0 (inclusive) to number of lines (exclusive).\"\n )\n\n name = self._data[index][0]\n t = self._data[index][1]\n y = self._data[index][2]\n (X, header) = self._read_timeseries(name, t)\n\n return {\"X\": X, \"t\": t, \"y\": y, \"header\": header, \"name\": name}\n\n\nclass PhenotypingReader(Reader):\n def __init__(self, dataset_dir, listfile=None):\n \"\"\" Reader for phenotype classification task.\n\n :param dataset_dir: Directory where timeseries files are stored.\n :param listfile: Path to a listfile. If this parameter is left `None` then\n `dataset_dir/listfile.csv` will be used.\n \"\"\"\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n self._data = [(mas[0], float(mas[1]), list(map(int, mas[2:])))\n for mas in self._data]\n\n def _read_timeseries(self, ts_filename):\n ret = []\n with open(os.path.join(self._dataset_dir, ts_filename), \"r\") as tsfile:\n header = tsfile.readline().strip().split(',')\n assert header[0] == \"Hours\"\n for line in tsfile:\n mas = line.strip().split(',')\n ret.append(np.array(mas))\n return (np.stack(ret), header)\n\n def read_example(self, index):\n \"\"\" Reads the example with given index.\n\n :param index: Index of the line of the listfile to read (counting starts from 0).\n :return: Dictionary with the following keys:\n X : np.array\n 2D array containing all events. Each row corresponds to a moment.\n First column is the time and other columns correspond to different\n variables.\n t : float\n Length of the data in hours. Note, in general, it is not equal to the\n timestamp of last event.\n y : array of ints\n Phenotype labels.\n header : array of strings\n Names of the columns. The ordering of the columns is always the same.\n name: Name of the sample.\n \"\"\"\n if index < 0 or index >= len(self._data):\n raise ValueError(\n \"Index must be from 0 (inclusive) to number of lines (exclusive).\"\n )\n\n name = self._data[index][0]\n t = self._data[index][1]\n y = self._data[index][2]\n (X, header) = self._read_timeseries(name)\n\n return {\"X\": X, \"t\": t, \"y\": y, \"header\": header, \"name\": name}\n\n\nclass MultitaskReader(Reader):\n def __init__(self, dataset_dir, listfile=None):\n \"\"\" Reader for multitask learning.\n\n :param dataset_dir: Directory where timeseries files are stored.\n :param listfile: Path to a listfile. If this parameter is left `None` then\n `dataset_dir/listfile.csv` will be used.\n \"\"\"\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n\n def process_ihm(x):\n return list(map(int, x.split(';')))\n\n def process_los(x):\n x = x.split(';')\n if x[0] == '':\n return ([], [])\n return (list(map(int, x[:len(x) // 2])),\n list(map(float, x[len(x) // 2:])))\n\n def process_ph(x):\n return list(map(int, x.split(';')))\n\n def process_decomp(x):\n x = x.split(';')\n if x[0] == '':\n return ([], [])\n return (list(map(int,\n x[:len(x) // 2])), list(map(int,\n x[len(x) // 2:])))\n\n self._data = [(fname, float(t), process_ihm(ihm), process_los(los),\n process_ph(pheno), process_decomp(decomp))\n for fname, t, ihm, los, pheno, decomp in self._data]\n\n def _read_timeseries(self, ts_filename):\n ret = []\n with open(os.path.join(self._dataset_dir, ts_filename), \"r\") as tsfile:\n header = tsfile.readline().strip().split(',')\n assert header[0] == \"Hours\"\n for line in tsfile:\n mas = line.strip().split(',')\n ret.append(np.array(mas))\n return (np.stack(ret), header)\n\n def read_example(self, index):\n \"\"\" Reads the example with given index.\n\n :param index: Index of the line of the listfile to read (counting starts from 0).\n :return: Return dictionary with the following keys:\n X : np.array\n 2D array containing all events. Each row corresponds to a moment.\n First column is the time and other columns correspond to different\n variables.\n t : float\n Length of the data in hours. Note, in general, it is not equal to the\n timestamp of last event.\n ihm : array\n Array of 3 integers: [pos, mask, label].\n los : array\n Array of 2 arrays: [masks, labels].\n pheno : array\n Array of 25 binary integers (phenotype labels).\n decomp : array\n Array of 2 arrays: [masks, labels].\n header : array of strings\n Names of the columns. The ordering of the columns is always the same.\n name: Name of the sample.\n \"\"\"\n if index < 0 or index >= len(self._data):\n raise ValueError(\n \"Index must be from 0 (inclusive) to number of lines (exclusive).\"\n )\n\n name = self._data[index][0]\n (X, header) = self._read_timeseries(name)\n\n return {\n \"X\": X,\n \"t\": self._data[index][1],\n \"ihm\": self._data[index][2],\n \"los\": self._data[index][3],\n \"pheno\": self._data[index][4],\n \"decomp\": self._data[index][5],\n \"header\": header,\n \"name\": name\n }\n" ]
[ [ "numpy.array", "numpy.stack" ] ]
yitongx/baconian-public
[ "a67e23c6bc6bfe7019ec9532a3d18f06aed6bbbb", "e84508da60877e387344133a11039edaac35c5bf" ]
[ "baconian/common/special.py", "baconian/common/spaces/dict.py" ]
[ "\"\"\"\nThis script is from garage\n\"\"\"\nimport gym.spaces\nimport numpy as np\nimport scipy\nimport scipy.signal\nfrom typeguard import typechecked\nimport baconian.common.spaces as mbrl_spaces\n\n\ndef weighted_sample(weights, objects):\n \"\"\"\n Return a random item from objects, with the weighting defined by weights\n (which must sum to 1).\n \"\"\"\n # An array of the weights, cumulatively summed.\n cs = np.cumsum(weights)\n # Find the index of the first weight over a random value.\n idx = sum(cs < np.random.rand())\n return objects[min(idx, len(objects) - 1)]\n\n\ndef weighted_sample_n(prob_matrix, items):\n s = prob_matrix.cumsum(axis=1)\n r = np.random.rand(prob_matrix.shape[0])\n k = (s < r.reshape((-1, 1))).sum(axis=1)\n n_items = len(items)\n return items[np.minimum(k, n_items - 1)]\n\n\n# compute softmax for each row\ndef softmax(x):\n shifted = x - np.max(x, axis=-1, keepdims=True)\n expx = np.exp(shifted)\n return expx / np.sum(expx, axis=-1, keepdims=True)\n\n\n# compute entropy for each row\ndef cat_entropy(x):\n return -np.sum(x * np.log(x), axis=-1)\n\n\n# compute perplexity for each row\ndef cat_perplexity(x):\n return np.exp(cat_entropy(x))\n\n\ndef explained_variance_1d(ypred, y):\n assert y.ndim == 1 and ypred.ndim == 1\n vary = np.var(y)\n if np.isclose(vary, 0):\n if np.var(ypred) > 0:\n return 0\n else:\n return 1\n return 1 - np.var(y - ypred) / (vary + 1e-8)\n\n\ndef to_onehot(ind, dim):\n ret = np.zeros(dim)\n ret[ind] = 1\n return ret\n\n\ndef to_onehot_n(inds, dim):\n ret = np.zeros((len(inds), dim))\n ret[np.arange(len(inds)), inds] = 1\n return ret\n\n\ndef from_onehot(v):\n return np.nonzero(v)[0][0]\n\n\ndef from_onehot_n(v):\n if ((isinstance(v, np.ndarray) and not v.size)\n or (isinstance(v, list) and not v)):\n return []\n return np.nonzero(v)[1]\n\n\ndef discount_cumsum(x, discount):\n # See https://docs.scipy.org/doc/scipy/reference/tutorial/signal.html#difference-equation-filtering # noqa: E501\n # Here, we have y[t] - discount*y[t+1] = x[t]\n # or rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]\n return scipy.signal.lfilter(\n [1], [1, float(-discount)], x[::-1], axis=0)[::-1]\n\n\ndef discount_return(x, discount):\n return np.sum(x * (discount ** np.arange(len(x))))\n\n\ndef rk4(derivs, y0, t, *args, **kwargs):\n \"\"\"\n Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.\n This is a toy implementation which may be useful if you find\n yourself stranded on a system w/o scipy. Otherwise use\n :func:`scipy.integrate`.\n\n *y0*\n initial state vector\n\n *t*\n sample times\n\n *derivs*\n returns the derivative of the system and has the\n signature ``dy = derivs(yi, ti)``\n\n *args*\n additional arguments passed to the derivative function\n\n *kwargs*\n additional keyword arguments passed to the derivative function\n\n Example 1 ::\n\n ## 2D system\n\n def derivs6(x,t):\n d1 = x[0] + 2*x[1]\n d2 = -3*x[0] + 4*x[1]\n return (d1, d2)\n dt = 0.0005\n t = arange(0.0, 2.0, dt)\n y0 = (1,2)\n yout = rk4(derivs6, y0, t)\n\n Example 2::\n\n ## 1D system\n alpha = 2\n def derivs(x,t):\n return -alpha*x + exp(-t)\n\n y0 = 1\n yout = rk4(derivs, y0, t)\n\n\n If you have access to scipy, you should probably be using the\n scipy.integrate tools rather than this function.\n \"\"\"\n\n try:\n ny = len(y0)\n except TypeError:\n yout = np.zeros((len(t),), np.float_)\n else:\n yout = np.zeros((len(t), ny), np.float_)\n\n yout[0] = y0\n i = 0\n\n for i in np.arange(len(t) - 1):\n thist = t[i]\n dt = t[i + 1] - thist\n dt2 = dt / 2.0\n y0 = yout[i]\n\n k1 = np.asarray(derivs(y0, thist, *args, **kwargs))\n k2 = np.asarray(derivs(y0 + dt2 * k1, thist + dt2, *args, **kwargs))\n k3 = np.asarray(derivs(y0 + dt2 * k2, thist + dt2, *args, **kwargs))\n k4 = np.asarray(derivs(y0 + dt * k3, thist + dt, *args, **kwargs))\n yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)\n return yout\n\n\ndef make_batch(v, original_shape: (list, tuple)):\n if not isinstance(v, np.ndarray):\n v = np.array(v)\n # assert len(v.shape) <= len(original_shape) + 1\n if len(v.shape) == len(original_shape) + 1 and np.equal(np.array(v.shape[1:]),\n np.array(original_shape)).all() is True:\n return v\n else:\n bs = np.prod(list(v.shape)) / np.prod(original_shape)\n assert float(bs).is_integer()\n return np.reshape(v, newshape=[int(bs)] + list(original_shape))\n\n\ndef flat_dim(space):\n if isinstance(space, mbrl_spaces.Box):\n return np.prod(space.low.shape)\n elif isinstance(space, mbrl_spaces.Discrete):\n return space.n\n elif isinstance(space, mbrl_spaces.Tuple):\n return np.sum([flat_dim(x) for x in space.spaces])\n else:\n raise NotImplementedError\n\n# flatten(action_space, action)\ndef flatten(space, obs, one_hot_for_discrete=False):\n if isinstance(space, mbrl_spaces.Box):\n return np.asarray(obs).flatten()\n elif isinstance(space, mbrl_spaces.Discrete):\n if one_hot_for_discrete is True:\n if space.n == 2:\n obs = int(obs)\n return to_onehot(obs, space.n)\n else:\n return int(obs)\n elif isinstance(space, mbrl_spaces.Tuple):\n return np.concatenate(\n [flatten(c, xi) for c, xi in zip(space.spaces, obs)])\n else:\n raise NotImplementedError\n\n\ndef flatten_n(space, obs):\n if isinstance(space, mbrl_spaces.Box):\n obs = np.asarray(obs)\n return obs.reshape((obs.shape[0], -1))\n elif isinstance(space, mbrl_spaces.Discrete):\n return to_onehot_n(np.array(obs, dtype=np.int), space.n)\n elif isinstance(space, mbrl_spaces.Tuple):\n obs_regrouped = [[obs[i] for o in obs] for i in range(len(obs[0]))]\n flat_regrouped = [\n flatten_n(c, oi) for c, oi in zip(space.spaces, obs_regrouped)\n ]\n return np.concatenate(flat_regrouped, axis=-1)\n else:\n raise NotImplementedError\n\n\ndef unflatten(space, obs):\n if isinstance(space, mbrl_spaces.Box):\n return np.asarray(obs).reshape(space.shape)\n elif isinstance(space, mbrl_spaces.Discrete):\n return from_onehot(np.array(obs, dtype=np.int))\n elif isinstance(space, mbrl_spaces.Tuple):\n dims = [flat_dim(c) for c in space.spaces]\n flat_xs = np.split(obs, np.cumsum(dims)[:-1])\n return tuple(unflatten(c, xi) for c, xi in zip(space.spaces, flat_xs))\n else:\n raise NotImplementedError\n\n\ndef unflatten_n(space, obs):\n if isinstance(space, mbrl_spaces.Box):\n obs = np.asarray(obs)\n return obs.reshape((obs.shape[0],) + space.shape)\n elif isinstance(space, mbrl_spaces.Discrete):\n return from_onehot_n(np.array(obs, dtype=np.int))\n elif isinstance(space, mbrl_spaces.Tuple):\n dims = [flat_dim(c) for c in space.spaces]\n flat_xs = np.split(obs, np.cumsum(dims)[:-1], axis=-1)\n unflat_xs = [\n unflatten_n(c, xi) for c, xi in zip(space.spaces, flat_xs)\n ]\n unflat_xs_grouped = list(zip(*unflat_xs))\n return unflat_xs_grouped\n else:\n raise NotImplementedError\n", "\"\"\"This is a garage-compatible wrapper for Dict spaces.\"\"\"\nfrom collections import OrderedDict\n\nfrom baconian.common.spaces.base import Space\nfrom baconian.common.spaces import Box\n\nimport numpy as np\nimport types\n\n\nclass Dict(Space):\n \"\"\"\n A dictionary of simpler spaces, e.g. Discrete, Box.\n\n Example usage:\n self.observation_space = spaces.Dict({\"position\": spaces.Discrete(2),\n \"velocity\": spaces.Discrete(3)})\n \"\"\"\n\n def __init__(self, spaces):\n \"\"\"\n Convert and store the incoming spaces into an OrderedDict.\n\n Note: classes inheriting from garage.Dict need to convert each\n space in spaces to a garage.<class>.space.\n \"\"\"\n if isinstance(spaces, dict):\n spaces = OrderedDict(sorted(list(spaces.items())))\n if isinstance(spaces, list):\n spaces = OrderedDict(spaces)\n self.spaces = spaces\n\n def contains(self, x):\n \"\"\"\n Check if x is contained within self.spaces.\n\n Returns:\n Boolean\n\n \"\"\"\n if isinstance(x, dict):\n return bool(all(item in self.spaces.items() for item in x.items()))\n else:\n return False\n\n def to_jsonable(self, sample_n):\n \"\"\"\n Serialize as a dict-representation of vectors.\n\n Returns:\n JSON (dict)\n\n \"\"\"\n return {key: space.to_jsonable([sample[key] for sample in sample_n]) \\\n for key, space in self.spaces.items()}\n\n def from_jsonable(self, sample_n):\n \"\"\"\n Convert information from a JSON format into a list.\n\n Returns:\n ret (list)\n\n \"\"\"\n dict_of_list = {}\n for key, space in self.spaces.items():\n dict_of_list[key] = space.from_jsonable(sample_n[key])\n ret = []\n for i, _ in enumerate(dict_of_list[key]):\n entry = {}\n for key, value in dict_of_list.items():\n entry[key] = value[i]\n ret.append(entry)\n return ret\n\n @property\n def flat_dim(self):\n \"\"\"\n Return a flat dimension of the dict space.\n\n Returns:\n flat_dim (int)\n\n \"\"\"\n raise NotImplementedError\n\n def flatten(self, x):\n \"\"\"\n Return a flattened observation x.\n\n Returns:\n x (flattened)\n\n \"\"\"\n raise NotImplementedError\n\n def unflatten(self, x):\n \"\"\"\n Return an unflattened observation x.\n\n Returns:\n x (unflattened)\n\n \"\"\"\n raise NotImplementedError\n\n def flatten_n(self, xs):\n \"\"\"\n Return flattened observations xs.\n\n Returns:\n xs (flattened)\n\n \"\"\"\n raise NotImplementedError\n\n def unflatten_n(self, xs):\n \"\"\"\n Return unflattened observations xs.\n\n Returns:\n xs (unflattened)\n\n \"\"\"\n raise NotImplementedError\n\n def sample(self):\n \"\"\"\n Return a sample from each space in spaces.\n\n Returns:\n OrderedDict\n\n \"\"\"\n # raise NotImplementedError\n # return OrderedDict([(k, space.sample()) for k, space in self.spaces.items()])\n\n ordered = OrderedDict()\n for k, space in self.spaces.items():\n for a in space.low:\n if np.isinf(a):\n a = np.nan_to_num(a)\n space.sample = types.MethodType(self._sample_with_nan, space)\n for b in space.high:\n if np.isinf(b):\n b = np.nan_to_num(b)\n space.sample = types.MethodType(self._sample_with_nan, space)\n\n ordered.update([(k, space.sample())])\n return ordered\n\n def new_tensor_variable(self, name, extra_dims):\n \"\"\"\n Return a new tensor variable in the TF graph.\n\n Returns:\n Tensor\n\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def _sample_with_nan(space: Space):\n \"\"\"\n\n :param space:\n :return:\n \"\"\"\n from gym.spaces.box import Box as GymBox\n\n if not isinstance(space, GymBox):\n raise TypeError('space is not of type Box')\n high = np.ones_like(space.low)\n low = -1 * np.ones_like(space.high)\n return np.clip(np.random.uniform(low=low, high=high, size=space.low.shape),\n a_min=space.low,\n a_max=space.high)\n" ]
[ [ "numpy.log", "numpy.minimum", "numpy.nonzero", "numpy.asarray", "numpy.cumsum", "numpy.concatenate", "numpy.max", "numpy.random.rand", "numpy.prod", "numpy.var", "numpy.array", "numpy.exp", "numpy.zeros", "numpy.sum", "numpy.isclose" ], [ "numpy.random.uniform", "numpy.ones_like", "numpy.isinf", "numpy.nan_to_num" ] ]
taskina-alena/selene
[ "3d86c61346909cdae5a3a4d9559e60e0736cf6b0" ]
[ "selene_sdk/train_model.py" ]
[ "\"\"\"\nThis module provides the `TrainModel` class and supporting methods.\n\"\"\"\nimport logging\nimport math\nimport os\nimport shutil\nfrom time import strftime\nfrom time import time\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import average_precision_score\n\nfrom .utils import initialize_logger\nfrom .utils import load_model_from_state_dict\nfrom .utils import PerformanceMetrics\n\nlogger = logging.getLogger(\"selene\")\n\n\ndef _metrics_logger(name, out_filepath):\n logger = logging.getLogger(\"{0}\".format(name))\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(message)s\")\n file_handle = logging.FileHandler(\n os.path.join(out_filepath, \"{0}.txt\".format(name)))\n file_handle.setFormatter(formatter)\n logger.addHandler(file_handle)\n return logger\n\n\nclass TrainModel(object):\n \"\"\"\n This class ties together the various objects and methods needed to\n train and validate a model.\n\n TrainModel saves a checkpoint model (overwriting it after\n `save_checkpoint_every_n_steps`) as well as a best-performing model\n (overwriting it after `report_stats_every_n_steps` if the latest\n validation performance is better than the previous best-performing\n model) to `output_dir`.\n\n TrainModel also outputs 2 files that can be used to monitor training\n as Selene runs: `selene_sdk.train_model.train.txt` (training loss) and\n `selene_sdk.train_model.validation.txt` (validation loss & average\n ROC AUC). The columns in these files can be used to quickly visualize\n training history (e.g. you can use `matplotlib`, `plt.plot(auc_list)`)\n and see, for example, whether the model is still improving, if there are\n signs of overfitting, etc.\n\n Parameters\n ----------\n model : torch.nn.Module\n The model to train.\n data_sampler : selene_sdk.samplers.Sampler\n The example generator.\n loss_criterion : torch.nn._Loss\n The loss function to optimize.\n optimizer_class : torch.optim.Optimizer\n The optimizer to minimize loss with.\n optimizer_kwargs : dict\n The dictionary of keyword arguments to pass to the optimizer's\n constructor.\n batch_size : int\n Specify the batch size to process examples. Should be a power of 2.\n max_steps : int\n The maximum number of mini-batches to iterate over.\n report_stats_every_n_steps : int\n The frequency with which to report summary statistics. You can\n set this value to be equivalent to a training epoch\n (`n_steps * batch_size`) being the total number of samples\n seen by the model so far. Selene evaluates the model on the validation\n dataset every `report_stats_every_n_steps` and, if the model obtains\n the best performance so far (based on the user-specified loss function),\n Selene saves the model state to a file called `best_model.pth.tar` in\n `output_dir`.\n output_dir : str\n The output directory to save model checkpoints and logs in.\n save_checkpoint_every_n_steps : int or None, optional\n Default is 1000. If None, set to the same value as\n `report_stats_every_n_steps`\n save_new_checkpoints_after_n_steps : int or None, optional\n Default is None. The number of steps after which Selene will\n continually save new checkpoint model weights files\n (`checkpoint-<TIMESTAMP>.pth.tar`) every\n `save_checkpoint_every_n_steps`. Before this point,\n the file `checkpoint.pth.tar` is overwritten every\n `save_checkpoint_every_n_steps` to limit the memory requirements.\n n_validation_samples : int or None, optional\n Default is `None`. Specify the number of validation samples in the\n validation set. If `n_validation_samples` is `None` and the data sampler\n used is the `selene_sdk.samplers.IntervalsSampler` or\n `selene_sdk.samplers.RandomSampler`, we will retrieve 32000\n validation samples. If `None` and using\n `selene_sdk.samplers.MultiFileSampler`, we will use all\n available validation samples from the appropriate data file.\n n_test_samples : int or None, optional\n Default is `None`. Specify the number of test samples in the test set.\n If `n_test_samples` is `None` and\n\n - the sampler you specified has no test partition, you should not\n specify `evaluate` as one of the operations in the `ops` list.\n That is, Selene will not automatically evaluate your trained\n model on a test dataset, because the sampler you are using does\n not have any test data.\n - the sampler you use is of type `selene_sdk.samplers.OnlineSampler`\n (and the test partition exists), we will retrieve 640000 test\n samples.\n - the sampler you use is of type\n `selene_sdk.samplers.MultiFileSampler` (and the test partition\n exists), we will use all the test samples available in the\n appropriate data file.\n\n cpu_n_threads : int, optional\n Default is 1. Sets the number of OpenMP threads used for parallelizing\n CPU operations.\n use_cuda : bool, optional\n Default is `False`. Specify whether a CUDA-enabled GPU is available\n for torch to use during training.\n data_parallel : bool, optional\n Default is `False`. Specify whether multiple GPUs are available\n for torch to use during training.\n logging_verbosity : {0, 1, 2}, optional\n Default is 2. Set the logging verbosity level.\n\n * 0 - Only warnings will be logged.\n * 1 - Information and warnings will be logged.\n * 2 - Debug messages, information, and warnings will all be\\\n logged.\n\n checkpoint_resume : str or None, optional\n Default is `None`. If `checkpoint_resume` is not None, it should be the\n path to a model file generated by `torch.save` that can now be read\n using `torch.load`.\n\n Attributes\n ----------\n model : torch.nn.Module\n The model to train.\n sampler : selene_sdk.samplers.Sampler\n The example generator.\n loss_criterion : torch.nn._Loss\n The loss function to optimize.\n optimizer_class : torch.optim.Optimizer\n The optimizer to minimize loss with.\n batch_size : int\n The size of the mini-batch to use during training.\n max_steps : int\n The maximum number of mini-batches to iterate over.\n nth_step_report_stats : int\n The frequency with which to report summary statistics.\n nth_step_save_checkpoint : int\n The frequency with which to save a model checkpoint.\n use_cuda : bool\n If `True`, use a CUDA-enabled GPU. If `False`, use the CPU.\n data_parallel : bool\n Whether to use multiple GPUs or not.\n output_dir : str\n The directory to save model checkpoints and logs.\n training_loss : list(float)\n The current training loss.\n metrics : dict\n A dictionary that maps metric names (`str`) to metric functions.\n By default, this contains `\"roc_auc\"`, which maps to\n `sklearn.metrics.roc_auc_score`, and `\"average_precision\"`,\n which maps to `sklearn.metrics.average_precision_score`.\n\n \"\"\"\n\n def __init__(self,\n model,\n data_sampler,\n loss_criterion,\n optimizer_class,\n optimizer_kwargs,\n batch_size,\n max_steps,\n report_stats_every_n_steps,\n output_dir,\n save_checkpoint_every_n_steps=1000,\n save_new_checkpoints_after_n_steps=None,\n report_gt_feature_n_positives=10,\n n_validation_samples=None,\n n_test_samples=None,\n cpu_n_threads=1,\n use_cuda=False,\n data_parallel=False,\n logging_verbosity=2,\n checkpoint_resume=None,\n metrics=dict(roc_auc=roc_auc_score,\n average_precision=average_precision_score)):\n \"\"\"\n Constructs a new `TrainModel` object.\n \"\"\"\n self.model = model\n self.sampler = data_sampler\n self.criterion = loss_criterion\n self.optimizer = optimizer_class(\n self.model.parameters(), **optimizer_kwargs)\n\n self.batch_size = batch_size\n self.max_steps = max_steps\n self.nth_step_report_stats = report_stats_every_n_steps\n self.nth_step_save_checkpoint = None\n if not save_checkpoint_every_n_steps:\n self.nth_step_save_checkpoint = report_stats_every_n_steps\n else:\n self.nth_step_save_checkpoint = save_checkpoint_every_n_steps\n\n self.save_new_checkpoints = save_new_checkpoints_after_n_steps\n\n logger.info(\"Training parameters set: batch size {0}, \"\n \"number of steps per 'epoch': {1}, \"\n \"maximum number of steps: {2}\".format(\n self.batch_size,\n self.nth_step_report_stats,\n self.max_steps))\n\n torch.set_num_threads(cpu_n_threads)\n\n self.use_cuda = use_cuda\n self.data_parallel = data_parallel\n\n if self.data_parallel:\n self.model = nn.DataParallel(model)\n logger.debug(\"Wrapped model in DataParallel\")\n\n if self.use_cuda:\n self.model.cuda()\n self.criterion.cuda()\n logger.debug(\"Set modules to use CUDA\")\n\n os.makedirs(output_dir, exist_ok=True)\n self.output_dir = output_dir\n\n initialize_logger(\n os.path.join(self.output_dir, \"{0}.log\".format(__name__)),\n verbosity=logging_verbosity)\n\n self._create_validation_set(n_samples=n_validation_samples)\n self._validation_metrics = PerformanceMetrics(\n self.sampler.get_feature_from_index,\n report_gt_feature_n_positives=report_gt_feature_n_positives,\n metrics=metrics)\n\n if \"test\" in self.sampler.modes:\n self._test_data = None\n self._n_test_samples = n_test_samples\n self._test_metrics = PerformanceMetrics(\n self.sampler.get_feature_from_index,\n report_gt_feature_n_positives=report_gt_feature_n_positives,\n metrics=metrics)\n\n self._start_step = 0\n self._min_loss = float(\"inf\") # TODO: Should this be set when it is used later? Would need to if we want to train model 2x in one run.\n if checkpoint_resume is not None:\n checkpoint = torch.load(\n checkpoint_resume,\n map_location=lambda storage, location: storage)\n if \"state_dict\" not in checkpoint:\n raise ValueError(\"Selene does not support continued \"\n \"training of models that were not originally \"\n \"trained using Selene.\")\n\n self.model = load_model_from_state_dict(\n checkpoint[\"state_dict\"], self.model)\n\n self._start_step = checkpoint[\"step\"]\n if self._start_step >= self.max_steps:\n self.max_steps += self._start_step\n\n self._min_loss = checkpoint[\"min_loss\"]\n self.optimizer.load_state_dict(\n checkpoint[\"optimizer\"])\n if self.use_cuda:\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda()\n\n logger.info(\n (\"Resuming from checkpoint: step {0}, min loss {1}\").format(\n self._start_step, self._min_loss))\n\n self._train_logger = _metrics_logger(\n \"{0}.train\".format(__name__), self.output_dir)\n self._validation_logger = _metrics_logger(\n \"{0}.validation\".format(__name__), self.output_dir)\n\n self._train_logger.info(\"loss\")\n self._validation_logger.info(\"\\t\".join([\"loss\"] +\n sorted([x for x in self._validation_metrics.metrics.keys()])))\n\n def _create_validation_set(self, n_samples=None):\n \"\"\"\n Generates the set of validation examples.\n\n Parameters\n ----------\n n_samples : int or None, optional\n Default is `None`. The size of the validation set. If `None`,\n will use all validation examples in the sampler.\n\n \"\"\"\n logger.info(\"Creating validation dataset.\")\n t_i = time()\n self._validation_data, self._all_validation_targets = \\\n self.sampler.get_validation_set(\n self.batch_size, n_samples=n_samples)\n t_f = time()\n logger.info((\"{0} s to load {1} validation examples ({2} validation \"\n \"batches) to evaluate after each training step.\").format(\n t_f - t_i,\n len(self._validation_data) * self.batch_size,\n len(self._validation_data)))\n\n def create_test_set(self):\n \"\"\"\n Loads the set of test samples.\n We do not create the test set in the `TrainModel` object until\n this method is called, so that we avoid having to load it into\n memory until the model has been trained and is ready to be\n evaluated.\n\n \"\"\"\n logger.info(\"Creating test dataset.\")\n t_i = time()\n self._test_data, self._all_test_targets = \\\n self.sampler.get_test_set(\n self.batch_size, n_samples=self._n_test_samples)\n t_f = time()\n logger.info((\"{0} s to load {1} test examples ({2} test batches) \"\n \"to evaluate after all training steps.\").format(\n t_f - t_i,\n len(self._test_data) * self.batch_size,\n len(self._test_data)))\n np.savez_compressed(\n os.path.join(self.output_dir, \"test_targets.npz\"),\n data=self._all_test_targets)\n\n def _get_batch(self):\n \"\"\"\n Fetches a mini-batch of examples\n\n Returns\n -------\n tuple(numpy.ndarray, numpy.ndarray)\n A tuple containing the examples and targets.\n\n \"\"\"\n t_i_sampling = time()\n batch_sequences, batch_targets = self.sampler.sample(\n batch_size=self.batch_size)\n t_f_sampling = time()\n logger.debug(\n (\"[BATCH] Time to sample {0} examples: {1} s.\").format(\n self.batch_size,\n t_f_sampling - t_i_sampling))\n return (batch_sequences, batch_targets)\n\n def train_and_validate(self):\n \"\"\"\n Trains the model and measures validation performance.\n\n \"\"\"\n min_loss = self._min_loss\n scheduler = ReduceLROnPlateau(\n self.optimizer,\n 'min',\n patience=16,\n verbose=True,\n factor=0.8)\n\n time_per_step = []\n for step in range(self._start_step, self.max_steps):\n t_i = time()\n train_loss = self.train()\n t_f = time()\n time_per_step.append(t_f - t_i)\n\n if step % self.nth_step_save_checkpoint == 0:\n checkpoint_dict = {\n \"step\": step,\n \"arch\": self.model.__class__.__name__,\n \"state_dict\": self.model.state_dict(),\n \"min_loss\": min_loss,\n \"optimizer\": self.optimizer.state_dict()\n }\n if self.save_new_checkpoints is not None and \\\n self.save_new_checkpoints >= step:\n checkpoint_filename = \"checkpoint-{0}\".format(\n strftime(\"%m%d%H%M%S\"))\n self._save_checkpoint(\n checkpoint_dict, False, filename=checkpoint_filename)\n logger.debug(\"Saving checkpoint `{0}.pth.tar`\".format(\n checkpoint_filename))\n else:\n self._save_checkpoint(\n checkpoint_dict, False)\n\n # TODO: Should we have some way to report training stats without running validation?\n if step and step % self.nth_step_report_stats == 0:\n logger.info((\"[STEP {0}] average number \"\n \"of steps per second: {1:.1f}\").format(\n step, 1. / np.average(time_per_step)))\n time_per_step = []\n valid_scores = self.validate()\n validation_loss = valid_scores[\"loss\"]\n self._train_logger.info(train_loss)\n to_log = [str(validation_loss)]\n for k in sorted(self._validation_metrics.metrics.keys()):\n if k in valid_scores and valid_scores[k]:\n to_log.append(str(valid_scores[k]))\n else:\n to_log.append(\"NA\")\n self._validation_logger.info(\"\\t\".join(to_log))\n scheduler.step(math.ceil(validation_loss * 1000.0) / 1000.0)\n\n if validation_loss < min_loss:\n min_loss = validation_loss\n self._save_checkpoint({\n \"step\": step,\n \"arch\": self.model.__class__.__name__,\n \"state_dict\": self.model.state_dict(),\n \"min_loss\": min_loss,\n \"optimizer\": self.optimizer.state_dict()}, True)\n logger.debug(\"Updating `best_model.pth.tar`\")\n logger.info(\"training loss: {0}\".format(train_loss))\n logger.info(\"validation loss: {0}\".format(validation_loss))\n\n # Logging training and validation on same line requires 2 parsers or more complex parser.\n # Separate logging of train/validate is just a grep for validation/train and then same parser.\n self.sampler.save_dataset_to_file(\"train\", close_filehandle=True)\n\n def train(self):\n \"\"\"\n Trains the model on a batch of data.\n\n Returns\n -------\n float\n The training loss.\n\n \"\"\"\n self.model.train()\n self.sampler.set_mode(\"train\")\n\n inputs, targets = self._get_batch()\n inputs = torch.Tensor(inputs)\n targets = torch.Tensor(targets)\n\n if self.use_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n inputs = Variable(inputs)\n targets = Variable(targets)\n\n predictions = self.model(inputs.transpose(1, 2))\n loss = self.criterion(predictions, targets)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n return loss.item()\n\n def _evaluate_on_data(self, data_in_batches):\n \"\"\"\n Makes predictions for some labeled input data.\n\n Parameters\n ----------\n data_in_batches : list(tuple(numpy.ndarray, numpy.ndarray))\n A list of tuples of the data, where the first element is\n the example, and the second element is the label.\n\n Returns\n -------\n tuple(float, list(numpy.ndarray))\n Returns the average loss, and the list of all predictions.\n\n \"\"\"\n self.model.eval()\n\n batch_losses = []\n all_predictions = []\n\n for (inputs, targets) in data_in_batches:\n inputs = torch.Tensor(inputs)\n targets = torch.Tensor(targets)\n\n if self.use_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n with torch.no_grad():\n inputs = Variable(inputs)\n targets = Variable(targets)\n\n predictions = self.model(inputs.transpose(1, 2))\n loss = self.criterion(predictions, targets)\n\n all_predictions.append(\n predictions.data.cpu().numpy())\n\n batch_losses.append(loss.item())\n all_predictions = np.vstack(all_predictions)\n return np.average(batch_losses), all_predictions\n\n def validate(self):\n \"\"\"\n Measures model validation performance.\n\n Returns\n -------\n dict\n A dictionary, where keys are the names of the loss metrics,\n and the values are the average value for that metric over\n the validation set.\n\n \"\"\"\n average_loss, all_predictions = self._evaluate_on_data(\n self._validation_data)\n average_scores = self._validation_metrics.update(all_predictions,\n self._all_validation_targets)\n for name, score in average_scores.items():\n logger.info(\"validation {0}: {1}\".format(name, score))\n\n average_scores[\"loss\"] = average_loss\n return average_scores\n\n def evaluate(self):\n \"\"\"\n Measures the model test performance.\n\n Returns\n -------\n dict\n A dictionary, where keys are the names of the loss metrics,\n and the values are the average value for that metric over\n the test set.\n\n \"\"\"\n if self._test_data is None:\n self.create_test_set()\n average_loss, all_predictions = self._evaluate_on_data(\n self._test_data)\n\n average_scores = self._test_metrics.update(all_predictions,\n self._all_test_targets)\n np.savez_compressed(\n os.path.join(self.output_dir, \"test_predictions.npz\"),\n data=all_predictions)\n\n for name, score in average_scores.items():\n logger.info(\"test {0}: {1}\".format(name, score))\n\n test_performance = os.path.join(\n self.output_dir, \"test_performance.txt\")\n feature_scores_dict = self._test_metrics.write_feature_scores_to_file(\n test_performance)\n\n average_scores[\"loss\"] = average_loss\n\n self._test_metrics.visualize(\n all_predictions, self._all_test_targets, self.output_dir)\n\n return (average_scores, feature_scores_dict)\n\n def _save_checkpoint(self,\n state,\n is_best,\n filename=\"checkpoint\"):\n \"\"\"\n Saves snapshot of the model state to file. Will save a checkpoint\n with name `<filename>.pth.tar` and, if this is the model's best\n performance so far, will save the state to a `best_model.pth.tar`\n file as well.\n\n Models are saved in the state dictionary format. This is a more\n stable format compared to saving the whole model (which is another\n option supported by PyTorch). Note that we do save a number of\n additional, Selene-specific parameters in the dictionary\n and that the actual `model.state_dict()` is stored in the `state_dict`\n key of the dictionary loaded by `torch.load`.\n\n See: https://pytorch.org/docs/stable/notes/serialization.html for more\n information about how models are saved in PyTorch.\n\n Parameters\n ----------\n state : dict\n Information about the state of the model. Note that this is\n not `model.state_dict()`, but rather, a dictionary containing\n keys that can be used for continued training in Selene\n _in addition_ to a key `state_dict` that contains\n `model.state_dict()`.\n is_best : bool\n Is this the model's best performance so far?\n filename : str, optional\n Default is \"checkpoint\". Specify the checkpoint filename. Will\n append a file extension to the end of the `filename`\n (e.g. `checkpoint.pth.tar`).\n\n Returns\n -------\n None\n\n \"\"\"\n logger.debug(\"[TRAIN] {0}: Saving model state to file.\".format(\n state[\"step\"]))\n cp_filepath = os.path.join(\n self.output_dir, filename)\n torch.save(state, \"{0}.pth.tar\".format(cp_filepath))\n if is_best:\n best_filepath = os.path.join(self.output_dir, \"best_model\")\n shutil.copyfile(\"{0}.pth.tar\".format(cp_filepath),\n \"{0}.pth.tar\".format(best_filepath))\n\n" ]
[ [ "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.Tensor", "torch.load", "torch.set_num_threads", "torch.no_grad", "torch.nn.DataParallel", "numpy.average", "numpy.vstack", "torch.autograd.Variable" ] ]
PontusHultkrantz/statarb
[ "521017c6f099e1bd7ea0f31df918abd83a0c8be7" ]
[ "src/optimal_controls/z_spread_model_parameters.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom src.estimation.ou_parameter_estimation import (\n estimate_ln_coint_params\n)\n\n\nclass ZSpreadModelParameters:\n\n def __init__(self, gamma=None, rho=None, rho_0=None, sigma_0=None, sigma=None,\n mu_0=None, mu=None, beta=None, delta=None, b=None, a=None,\n sigma_1=None, sigma_2=None, sigma_3=None,\n kappa=None, theta=None, symbols=None):\n\n self.m_gamma = gamma\n self.m_rho = rho\n self.m_rho_0 = rho_0\n self.m_sigma_0 = sigma_0\n self.m_sigma = sigma\n self.m_mu_0 = mu_0\n self.m_mu = mu\n self.m_beta = beta\n self.m_delta = delta\n self.m_b = b\n self.m_a = a\n\n self.m_symbols = None\n if symbols is not None:\n self.m_symbols = symbols\n\n if kappa is None:\n self.m_kappa = np.zeros_like(self.m_delta)\n for i in range(0, len(self.m_kappa)):\n self.m_kappa[i] = - self.m_beta[i] * self.m_delta[i]\n else:\n self.m_kappa = kappa\n\n if sigma_1 is None:\n self.m_sigma_1 = np.matmul(\n np.matmul(np.diag(self.m_sigma.flatten()),\n self.m_rho), np.diag(self.m_sigma.flatten()))\n else:\n self.m_sigma_1 = sigma_1\n\n if sigma_2 is None:\n self.m_sigma_2 = np.zeros_like(self.m_sigma_1)\n for i in range(0, self.m_sigma_1.shape[0]):\n for j in range(0, self.m_sigma_1.shape[1]):\n self.m_sigma_2[i, j] = self.m_sigma_0 * self.m_sigma[i] * self.m_rho_0[i] \\\n + self.m_beta[j] * self.m_sigma_1[i, j]\n else:\n self.m_sigma_2 = sigma_2\n\n if sigma_3 is None:\n self.m_sigma_3 = np.zeros_like(self.m_sigma_1)\n for i in range(0, self.m_sigma_1.shape[0]):\n for j in range(0, self.m_sigma_1.shape[1]):\n self.m_sigma_3[i, j] = (\n self.m_sigma_0 ** 2\n + self.m_sigma_0 * self.m_sigma[i]\n * self.m_beta[i] * self.m_rho_0[i]\n + self.m_sigma_0 * self.m_sigma[j]\n * self.m_beta[j] * self.m_rho_0[j]\n + self.m_sigma[i] * self.m_beta[i]\n * self.m_sigma[j] * self.m_beta[j] * self.m_rho[i, j])\n else:\n self.m_sigma_3 = sigma_3\n\n if theta is None:\n self.m_theta = np.zeros_like(self.m_delta)\n for i in range(0, self.m_sigma_1.shape[0]):\n self.m_theta[i] = -(self.m_b[i] + self.m_mu_0 + self.m_beta[i] * self.m_mu[i]\n - 0.5 * (self.m_sigma_0 ** 2 + self.m_beta[i] * self.m_sigma[i] ** 2)) / (\n self.m_beta[i] * self.m_delta[i])\n else:\n self.m_theta = theta\n\n @staticmethod\n def estimate_from_ln_prices(ln_s_0, ln_s_i, gamma=-1, kappa_min=None):\n \"\"\"\n Run parameter estimation from ln-prices.\n\n \"\"\"\n if not isinstance(ln_s_0, pd.core.frame.DataFrame):\n raise ValueError('ln_s_0 has to be <pd.core.frame.DataFrame>.')\n\n if not isinstance(ln_s_i, pd.core.frame.DataFrame):\n raise ValueError('ln_s_i has to be <pd.core.frame.DataFrame>.')\n\n n_assets = ln_s_i.shape[1]\n symbols = list(ln_s_i.columns)\n ln_s_0_ = np.copy(ln_s_0.values)\n ln_s_i_ = np.copy(ln_s_i.values)\n\n # If kappa min is used, run pre kappa estimation in order to select indices\n if kappa_min:\n if kappa_min < 0:\n raise ValueError('kappa_min has to be non-negative.')\n kappa = np.zeros((n_assets, 1))\n for i in range(0, n_assets):\n _, _, kappa[i], _ = estimate_ln_coint_params(ln_s_0_, ln_s_i_[:, i], 1.0 / 250.0)\n selected_columns = np.where(kappa > kappa_min)[0]\n if len(selected_columns) < 1:\n raise ValueError('No selected columns.')\n symbols = list(ln_s_i.columns[selected_columns])\n n_assets = len(selected_columns)\n ln_s_i_ = ln_s_i_[:, selected_columns]\n\n # Estimate beta, kappa and delta\n a = np.zeros((n_assets, 1))\n b = np.zeros((n_assets, 1))\n beta = np.zeros((n_assets, 1))\n delta = np.zeros((n_assets, 1))\n kappa = np.zeros((n_assets, 1))\n for i in range(0, n_assets):\n delta[i], beta[i], kappa[i], a[i] = estimate_ln_coint_params(ln_s_0_, ln_s_i_[:, i], 1 / 250)\n\n mu_0 = 250 * np.mean(np.diff(ln_s_0_, 1, axis=0))\n sigma_0 = np.sqrt(250) * np.std(np.diff(ln_s_0_, 1, axis=0))\n\n series_all_ = np.concatenate([ln_s_0_, ln_s_i_], axis=1)\n cor = np.corrcoef(np.diff(series_all_, axis=0), rowvar=False)\n\n # Correlations between assets and the the benchmark\n rho_0 = np.zeros((n_assets, 1))\n for i in range(0, n_assets):\n rho_0[i, 0] = cor[i + 1, 0]\n\n # Correlations between assets\n rho = cor[1:, 1:]\n\n # Estimate annual drift rates\n mu = np.zeros((n_assets, 1))\n for i in range(0, n_assets):\n # NOTE: estimate is scaled down by 50 %\n mu[i] = 0.5 * 250 * np.mean(np.diff(ln_s_i_[:, i], 1, axis=0))\n\n # Estimate annual volatilities\n sigma = np.zeros((n_assets, 1))\n for i in range(0, n_assets):\n sigma[i] = np.sqrt(250) * np.std(np.diff(ln_s_i_[:, i], 1, axis=0))\n\n # Estimate sigmas for riccati equations\n sigma_1 = np.matmul(\n np.matmul(np.diag(sigma.flatten()), rho),\n np.diag(sigma.flatten())\n )\n\n sigma_2 = np.zeros_like(sigma_1)\n for i in range(0, sigma_1.shape[0]):\n for j in range(0, sigma_1.shape[1]):\n sigma_2[i, j] = sigma_0 * sigma[i] * rho_0[i] \\\n + beta[j] * sigma_1[i, j]\n\n sigma_3 = np.zeros_like(sigma_1)\n for i in range(0, sigma_1.shape[0]):\n for j in range(0, sigma_1.shape[1]):\n sigma_3[i, j] = (\n sigma_0 ** 2\n + sigma_0 * sigma[i]\n * beta[i] * rho_0[i]\n + sigma_0 * sigma[j]\n * beta[j] * rho_0[j]\n + sigma[i] * beta[i]\n * sigma[j] * beta[j] * rho[i, j])\n\n theta = np.zeros_like(delta)\n for i in range(0, sigma_1.shape[0]):\n theta[i] = -(b[i] + mu_0 + beta[i] * mu[i]\n - 0.5 * (sigma_0 ** 2 + beta[i] * sigma[i] ** 2)) / (\n beta[i] * delta[i])\n\n return ZSpreadModelParameters(gamma, rho, rho_0, sigma_0, sigma,\n mu_0, mu, beta, delta, b, a, sigma_1, sigma_2, sigma_3,\n kappa, theta, symbols)\n\n" ]
[ [ "numpy.sqrt", "numpy.concatenate", "numpy.copy", "numpy.zeros_like", "numpy.diff", "numpy.zeros", "numpy.where" ] ]
ndai093/UtilityBasedRegression
[ "1af262ab5b2b0645eda247ca63ef0b2f24fe18dd" ]
[ "packaging/src/ImbalancedUtilityBasedSampler/utility_based_random_under_sampler.py" ]
[ "from PhiRelevance.PhiUtils import phiControl,phi\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass UtilityBasedRandomUnderSampler:\n \"\"\"\n Class UtilityBasedRandomUnderSampler takes arguments as follows:\n data - Pandas data frame with target value as last column; if read from .csv, recommend to use 'index_col=0'\n method - \"auto\"(\"extremes\") as default,\"range\"\n extrType - \"high\", \"both\" as default, \"low\"\n thr_rel - user defined relevance threadhold between 0 to 1, all the target values with relevance below\n the threshold are candicates to be undersampled\n controlPts - list of control points formatted as [y1, phi(y1), phi'(y1), y2, phi(y2), phi'(y2)], where\n y1: target value; phi(y1): relevane value of y1; phi'(y1): derivative of phi(y1), etc.\n c_perc - undersampling percentage should be applied in each bump with uninteresting values, \n possible types are defined below,\n \"balance\" - will try to distribute the examples evenly across the existing bumps \n \"extreme\" - invert existing frequency of interesting/uninteresting set\n <percentage> - A list of percentage values with either one value apply to all bumps of undersampling set\n or multiple percentage values mapping to each bump of undersampling set\n\n \"\"\"\n def __init__(self, data, method='auto', extrType='both', thr_rel=1.0, controlPts=[], c_perc=\"balance\"):\n \n self.data = data;\n \n self.method = 'extremes' if method in ['extremes', 'auto'] else 'range'\n \n if self.method == 'extremes':\n if extrType in ['high','low','both']:\n self.extrType = extrType\n else:\n self.extrType = 'both'\n else:\n self.extrType =''\n\n self.thr_rel = thr_rel\n \n if method == 'extremes':\n self.controlPts = []\n else:\n self.controlPts = controlPts\n \n if str == type(c_perc):\n self.c_perc = c_perc if c_perc in [\"balance\", \"extreme\"] else c_perc\n elif list == type(c_perc):\n self.c_perc = c_perc\n \n self.coef = 1.5\n \n def getMethod(self):\n return self.method\n\n def getData(self):\n return self.data\n\n def getExtrType(self):\n return self.extrType\n\n def getThrRel(self):\n return self.thr_rel\n\n def getControlPtr(self):\n return self.controlPts\n\n def getCPerc(self):\n return self.c_perc\n\n def resample(self):\n\n yPhi, ydPhi, yddPhi = self.__calc_rel_values()\n\n data1 = self.__preprocess_data(yPhi)\n #interesting set\n interesting_set = self.get_interesting_set(data1)\n #uninteresting set\n bumps_undersampling, bumps_interesting = self.__calc_bumps(data1)\n\n if self.c_perc == 'balance':\n resampled = self.__process_balance(bumps_undersampling, interesting_set)\n elif self.c_perc == 'extreme':\n resampled = self.__process_extreme(bumps_undersampling, bumps_interesting, interesting_set)\n elif isinstance(self.c_perc, list):\n resampled = self.__process_percentage(bumps_undersampling, interesting_set)\n\n #clean up resampled set and return\n self.__postprocess_data(resampled)\n return resampled\n\n def __postprocess_data(self, resampled):\n self.data.drop('yPhi',axis=1,inplace=True )\n resampled.drop('yPhi',axis=1,inplace=True )\n resampled.sort_index(inplace=True)\n return resampled\n\n def __preprocess_data(self, yPhi):\n #append column 'yPhi'\n data1 = self.data\n data1['yPhi'] = yPhi\n data1 = self.data.sort_values(by=['Tgt'])\n return data1\n \n def get_interesting_set(self, data):\n interesting_set = data[data.yPhi >= self.thr_rel]\n return interesting_set\n \n def get_undersampling_set(self, data):\n undersampleing_set = data[data.yPhi < self.thr_rel]\n return undersampleing_set \n \n def __calc_rel_values(self):\n #retrieve target(last column) from DataFrame\n y = self.data.iloc[:,-1]\n\n #generate control ptrs \n if self.method == 'extremes':\n controlPts, npts = phiControl(y, extrType=self.extrType)\n else:\n controlPts, npts = phiControl(y, 'range', extrType=\"\", controlPts=self.controlPts)\n\n #calculate relevance value\n yPhi, ydPhi, yddPhi = phi(y, controlPts, npts, self.method)\n return yPhi, ydPhi, yddPhi\n\n def __process_balance(self, bumps_undersampling, interesting_set):\n resample_size = round(len(interesting_set) / len(bumps_undersampling))\n #print('process_balance(): resample_size per bump='+str(resample_size))\n resampled_sets = []\n for s in bumps_undersampling:\n resampled_sets.append(s.sample(n=resample_size))\n #includes interesting set\n resampled_sets.append(interesting_set)\n result = pd.concat(resampled_sets)\n return result\n\n def __process_extreme(self, bumps_undersampling, bumps_interesting, interesting_set):\n \n #print('process_extreme(): size of bumps_undersampling='+str(len(bumps_undersampling)))\n #print('process_extreme(): size of bumps_interesting='+str(len(bumps_interesting)))\n #print('process_extreme(): size of interesting_set='+str(len(interesting_set)))\n resampled_sets = []\n #calculate average cnt\n len_interesting_set = len(interesting_set)\n len_total = len(self.data)\n #print('process_extreme(): size of total_set='+str(len_total))\n average_cnt_interesting_set = len_interesting_set/len(bumps_interesting)\n #print('process_extreme(): average_cnt_interesting_set='+str(average_cnt_interesting_set))\n resample_size = (average_cnt_interesting_set**2.0)/(len_total-len_interesting_set)\n #print('process_extreme(): resample_size='+str(resample_size))\n resample_size_per_bump = round(resample_size / len(bumps_undersampling))\n #print('process_extreme(): resample_size_per_bump='+str(resample_size_per_bump))\n\n for s in bumps_undersampling:\n resampled_sets.append(s.sample(n = resample_size_per_bump))\n #includes interesting set \n resampled_sets.append(interesting_set)\n result = pd.concat(resampled_sets)\n return result\n\n def __process_percentage(self, bumps_undersampling, interesting_set):\n #make sure all percentage values are float values and <= 1.0\n for c in self.c_perc:\n if (not isinstance(c, float)) or (c>1.0):\n print('c_perc must be list of float number <= 1.0')\n return[]\n #make sure c_perc values matches bumps\n resampled_sets = []\n if (len(bumps_undersampling) != len(self.c_perc)) and (len(self.c_perc) != 1):\n print('c_perc value list must have either one value or values equal to number of bumps')\n return []\n elif len(self.c_perc) == 1: \n undersample_perc = self.c_perc[0]\n #print('len(self.c_perc) == 1')\n #print('process_percentage(): undersample_perc='+str(undersample_perc))\n for s in bumps_undersampling:\n #print('process_percentage(): bump size='+str(len(s)))\n resample_size = round(len(s)*undersample_perc)\n #print('process_percentage(): resample_size='+str(resample_size))\n resampled_sets.append(s.sample(n = resample_size))\n #adding interesting set\n resampled_sets.append(interesting_set)\n result = pd.concat(resampled_sets)\n else:\n for i in range(len(bumps_undersampling)):\n #print('len(self.c_perc) > 1 loop i='+str(i))\n undersample_perc = self.c_perc[i]\n #print('process_percentage(): undersample_perc='+str(undersample_perc))\n resample_size = round(len(bumps_undersampling[i])*undersample_perc)\n #print('process_percentage(): resample_size='+str(resample_size))\n resampled_sets.append(bumps_undersampling[i].sample(n = resample_size))\n #adding interesting set\n resampled_sets.append(interesting_set)\n result = pd.concat(resampled_sets)\n return result\n\n def __calc_bumps(self, df):\n\n thr_rel = self.thr_rel\n less_than_thr_rel = True if df.loc[0,'yPhi'] < thr_rel else False\n bumps_undersampling = []\n bumps_interesting = []\n bumps_undersampling_df = pd.DataFrame(columns = df.columns) \n bumps_interesting_df = pd.DataFrame(columns = df.columns)\n\n for idx, row in df.iterrows():\n if less_than_thr_rel and (row['yPhi'] < thr_rel):\n bumps_undersampling_df = bumps_undersampling_df.append(row)\n elif less_than_thr_rel and row['yPhi'] >= thr_rel:\n bumps_undersampling.append(bumps_undersampling_df)\n bumps_undersampling_df = pd.DataFrame(columns = df.columns)\n bumps_interesting_df = bumps_interesting_df.append(row)\n less_than_thr_rel = False\n elif (not less_than_thr_rel) and (row['yPhi'] >= thr_rel):\n bumps_interesting_df = bumps_interesting_df.append(row)\n elif (not less_than_thr_rel) and (row['yPhi'] < thr_rel):\n bumps_interesting.append(bumps_interesting_df)\n bumps_interesting_df = pd.DataFrame(columns = df.columns)\n bumps_undersampling_df = bumps_undersampling_df.append(row)\n less_than_thr_rel = True\n\n if less_than_thr_rel and (df.iloc[-1,:]['yPhi'] < thr_rel):\n bumps_undersampling.append(bumps_undersampling_df)\n elif not less_than_thr_rel and (df.iloc[-1,:]['yPhi'] >= thr_rel):\n bumps_interesting.append(bumps_interesting_df)\n\n return bumps_undersampling, bumps_interesting\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
vishalbelsare/AutoTS-1
[ "9b08d42c6f80ee92b7838ede5a6fa45e239c8ca8" ]
[ "test.py" ]
[ "\"\"\"Informal testing script.\"\"\"\nfrom time import sleep\nimport timeit\nimport platform\nimport pandas as pd\nfrom autots.datasets import ( # noqa\n load_daily,\n load_hourly,\n load_monthly,\n load_yearly,\n load_weekly,\n load_weekdays,\n load_zeroes,\n load_linear,\n load_sine,\n)\nfrom autots import AutoTS, create_regressor, model_forecast # noqa\nimport matplotlib.pyplot as plt\n\n# raise ValueError(\"aaargh!\")\nuse_template = False\nforce_univariate = False # long = False\nback_forecast = False\ngraph = True\n\n# this is the template file imported:\nexample_filename = \"example_export.csv\" # .csv/.json\nforecast_length = 8\nlong = False\n# df = load_linear(long=long, shape=(200, 500), introduce_nan=0.2)\ndf = load_daily(long=long)\nn_jobs = \"auto\"\nverbose = 2\nvalidation_method = \"similarity\"\nfrequency = \"infer\"\ndrop_most_recent = 0\nif use_template:\n generations = 5\n num_validations = 0\nelse:\n generations = 2\n num_validations = 3\n\nif force_univariate:\n df = df.iloc[:, 0]\n\ntransformer_list = (\n \"superfast\" # [\"bkfilter\", \"STLFilter\", \"HPFilter\", 'StandardScaler']\n)\ntransformer_max_depth = 1\nmodels_mode = \"default\" # \"regressor\"\nmodel_list = \"fast\"\n# model_list = \"regressor\" # fast_parallel, all\n# model_list = [\"SeasonalNaive\", 'AverageValueNaive']\n\nmetric_weighting = {\n 'smape_weighting': 5,\n 'mae_weighting': 2,\n 'rmse_weighting': 2,\n 'made_weighting': 1,\n 'containment_weighting': 0,\n 'runtime_weighting': 0.05,\n 'spl_weighting': 2,\n 'contour_weighting': 1,\n}\n\nmodel = AutoTS(\n forecast_length=forecast_length,\n frequency=frequency,\n prediction_interval=0.9,\n ensemble=[\"horizontal-max\", \"dist\", \"simple\"], # \"subsample\"\n constraint=None,\n max_generations=generations,\n num_validations=num_validations,\n validation_method=validation_method,\n model_list=model_list,\n transformer_list=transformer_list,\n transformer_max_depth=transformer_max_depth,\n initial_template=\"Random\",\n metric_weighting=metric_weighting,\n models_to_validate=0.35,\n max_per_model_class=None,\n model_interrupt=\"end_generation\",\n n_jobs=n_jobs,\n drop_most_recent=drop_most_recent,\n introduce_na=True,\n # prefill_na=0,\n # subset=5,\n verbose=verbose,\n models_mode=models_mode,\n)\n\n\nregr_train, regr_fcst = create_regressor(\n df,\n forecast_length=forecast_length,\n frequency=frequency,\n drop_most_recent=drop_most_recent,\n scale=True,\n summarize=\"auto\",\n backfill=\"bfill\",\n fill_na=\"pchip\",\n holiday_countries=[\"US\"],\n datepart_method=\"recurring\",\n)\n\n\n# model = model.import_results('test.pickle')\nif use_template:\n model = model.import_template(\n example_filename, method=\"only\", enforce_model_list=True\n )\n\nstart_time_for = timeit.default_timer()\nmodel = model.fit(\n df,\n future_regressor=regr_train,\n weights=\"mean\",\n # result_file='test.pickle',\n validation_indexes=[\n pd.date_range(\"2021-01-01\", \"2022-05-02\"),\n pd.date_range(\"2021-01-01\", \"2022-02-02\"),\n pd.date_range(\"2021-01-01\", \"2022-03-03\"),\n ],\n date_col=\"datetime\" if long else None,\n value_col=\"value\" if long else None,\n id_col=\"series_id\" if long else None,\n)\n\nelapsed_for = timeit.default_timer() - start_time_for\n\nprediction = model.predict(future_regressor=regr_fcst, verbose=1)\n# point forecasts dataframe\nforecasts_df = prediction.forecast\n# accuracy of all tried model results (not including cross validation)\ninitial_results = model.results()\n# validation results\nvalidation_results = model.results(\"validation\")\n\ninitial_results[\"TransformationRuntime\"] = initial_results[\n \"TransformationRuntime\"\n].dt.total_seconds()\ninitial_results[\"FitRuntime\"] = initial_results[\"FitRuntime\"].dt.total_seconds()\ninitial_results[\"PredictRuntime\"] = initial_results[\"PredictRuntime\"].dt.total_seconds()\ninitial_results[\"TotalRuntime\"] = initial_results[\"TotalRuntime\"].dt.total_seconds()\n\nsleep(5)\nprint(model)\nprint(model.validation_test_indexes)\nprint(f\"Model failure rate is {model.failure_rate() * 100:.1f}%\")\nprint(\"Slowest models:\")\nprint(\n initial_results[initial_results[\"Ensemble\"] < 1]\n .groupby(\"Model\")\n .agg({\"TotalRuntime\": [\"mean\", \"max\"]})\n .idxmax()\n)\n\ninitial_results.to_csv(\"general_template_\" + str(platform.node()) + \".csv\")\n\nif graph:\n prediction.plot(\n model.df_wide_numeric,\n series=model.df_wide_numeric.columns[2],\n remove_zeroes=False,\n start_date=\"2018-09-26\",\n )\n plt.show()\n model.plot_generation_loss()\n\n if model.best_model[\"Ensemble\"].iloc[0] == 2:\n plt.show()\n model.plot_horizontal_transformers(method=\"fillna\")\n plt.show()\n model.plot_horizontal_transformers()\n plt.show()\n model.plot_horizontal()\n plt.show()\n if \"mosaic\" in model.best_model[\"ModelParameters\"].iloc[0].lower():\n mosaic_df = model.mosaic_to_df()\n print(mosaic_df[mosaic_df.columns[0:5]].head(5))\n\n plt.show()\n if back_forecast:\n model.plot_backforecast(n_splits=\"auto\", start_date=\"2019-01-01\")\n\ndf_wide_numeric = model.df_wide_numeric\n\ndf = df_wide_numeric.tail(100).fillna(0).astype(float)\n\nprint(\"test run complete\")\n\n\n\"\"\"\ndf_forecast = model_forecast(\n model_name=\"SectionalMotif\",\n model_param_dict={},\n model_transform_dict={\n 'fillna': 'mean',\n 'transformations': {'0': 'ClipOutliers'},\n 'transformation_params': {'0': {\"method\": \"clip\", \"std_threshold\": 3, \"fillna\": None}}\n },\n df_train=df,\n forecast_length=5,\n frequency='infer',\n prediction_interval=0.9,\n no_negatives=False,\n # future_regressor_train=future_regressor_train2d,\n # future_regressor_forecast=future_regressor_forecast2d,\n random_seed=321,\n verbose=1,\n n_jobs=\"auto\",\n)\ndf_forecast.forecast.head(5)\n\"\"\"\n\n\"\"\"\n# Import/Export\nmodel.export_template(example_filename, models='all',\n n=15, max_per_model_class=3)\ndel(model)\nmodel = model.import_template(example_filename, method='only')\nprint(\"Overwrite template is: {}\".format(str(model.initial_template)))\n\n# default save location of files is apparently root\nsystemd-run --unit=background_cmd_service --remain-after-exit /home/colin/miniconda3/envs/openblas/bin/python /home/colin/AutoTS/test.py\nsystemd-run --unit=background_cmd_service --remain-after-exit /home/colin/miniconda3/envs/openblas/bin/python /home/colin/AutoTS/local_example.py\njournalctl -r -n 10 -u background_cmd_service\njournalctl -f -u background_cmd_service\njournalctl -b -u background_cmd_service\n\nsystemctl stop background_cmd_service\nsystemctl reset-failed\nsystemctl kill background_cmd_service\n\nscp colin@192.168.1.122:/home/colin/AutoTS/general_template_colin-1135.csv ./Documents/AutoTS\nscp colin@192.168.1.122:/general_template_colin-1135.csv ./Documents/AutoTS\n\n\nEdgey Cases:\n Single Time Series\n Forecast Length of 1\n Very short training data\n Lots of NaN\n\n\nPACKAGE RELEASE\n# update version in setup.py, /docs/conf.py, /autots/_init__.py\n\nset PYTHONPATH=%PYTHONPATH%;C:/Users/Colin/Documents/AutoTS\npython -m unittest discover ./tests\n\npython ./autots/evaluator/benchmark.py\n\ncd <project dir>\nblack ./autots -l 88 -S\n\nhttps://github.com/sphinx-doc/sphinx/issues/3382\n# pip install sphinx==2.4.4\n# m2r does not yet work on sphinx 3.0\n# pip install m2r\ncd <project dir>\n# delete docs/source and /build (not tutorial or intro.rst)\nsphinx-apidoc -f -o docs/source autots\ncd ./docs\nmake html\n\nhttps://winedarksea.github.io/AutoTS/build/index.html\n\"\"\"\n\"\"\"\nhttps://packaging.python.org/tutorials/packaging-projects/\n\npython -m pip install --user --upgrade setuptools wheel\ncd /to project directory\npython setup.py sdist bdist_wheel\ntwine upload dist/*\n\nMerge dev to master on GitHub and create release (include .tar.gz)\n\"\"\"\n\n# Help correlate errors with parameters\n\"\"\"\ntest = initial_results[initial_results['TransformationParameters'].str.contains('FastICA')]\n\ncols = ['Model', 'ModelParameters', 'TransformationParameters', 'Exceptions']\nif (~initial_results['Exceptions'].isna()).sum() > 0:\n test_corr = error_correlations(\n initial_results[cols], result='corr'\n ) # result='poly corr'\n\"\"\"\n" ]
[ [ "matplotlib.pyplot.show", "pandas.date_range" ] ]
JamesJeffryes/kb_ke_apps
[ "a4b568787a50c33e284ca697e24ab9162e75e3c7" ]
[ "lib/kb_ke_apps/Utils/KnowledgeEngineAppsUtil.py" ]
[ "import time\nimport json\nimport os\nimport errno\nimport uuid\nimport shutil\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport itertools\n\nfrom kb_ke_util.kb_ke_utilClient import kb_ke_util\nfrom DataFileUtil.DataFileUtilClient import DataFileUtil\nfrom Workspace.WorkspaceClient import Workspace as Workspace\nfrom KBaseReport.KBaseReportClient import KBaseReport\nfrom SetAPI.SetAPIServiceClient import SetAPI\nfrom GenericsAPI.GenericsAPIClient import GenericsAPI\n\n\ndef log(message, prefix_newline=False):\n print(('\\n' if prefix_newline else '') + str(time.time()) + ': ' + message)\n\n\nclass KnowledgeEngineAppsUtil:\n\n METRIC = [\"braycurtis\", \"canberra\", \"chebyshev\", \"cityblock\", \"correlation\", \"cosine\",\n \"dice\", \"euclidean\", \"hamming\", \"jaccard\", \"kulsinski\", \"matching\",\n \"rogerstanimoto\", \"russellrao\", \"sokalmichener\", \"sokalsneath\", \"sqeuclidean\",\n \"yule\"]\n\n METHOD = [\"single\", \"complete\", \"average\", \"weighted\", \"centroid\", \"median\", \"ward\"]\n\n CRITERION = [\"inconsistent\", \"distance\", \"maxclust\"]\n\n def _mkdir_p(self, path):\n \"\"\"\n _mkdir_p: make directory for given path\n \"\"\"\n if not path:\n return\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n def _validate_run_pca_params(self, params):\n \"\"\"\n _validate_run_pca_params:\n validates params passed to run_pca method\n \"\"\"\n\n log('start validating run_pca params')\n\n # check for required parameters\n for p in ['cluster_set_ref', 'workspace_name', 'pca_matrix_name']:\n if p not in params:\n raise ValueError('\"{}\" parameter is required, but missing'.format(p))\n\n def _validate_run_kmeans_cluster_params(self, params):\n \"\"\"\n _validate_run_kmeans_cluster_params:\n validates params passed to run_kmeans_cluster method\n \"\"\"\n\n log('start validating run_kmeans_cluster params')\n\n # check for required parameters\n for p in ['matrix_ref', 'workspace_name', 'cluster_set_name',\n 'k_num']:\n if p not in params:\n raise ValueError('\"{}\" parameter is required, but missing'.format(p))\n\n # check metric validation\n metric = params.get('dist_metric')\n if metric and metric not in self.METRIC:\n error_msg = 'INPUT ERROR:\\nInput metric function [{}] is not valid.\\n'.format(metric)\n error_msg += 'Available metric: {}'.format(self.METRIC)\n raise ValueError(error_msg)\n\n def _validate_run_hierarchical_cluster_params(self, params):\n \"\"\"\n _validate_run_hierarchical_cluster_params:\n validates params passed to run_hierarchical_cluster method\n \"\"\"\n\n log('start validating run_hierarchical_cluster params')\n\n # check for required parameters\n for p in ['matrix_ref', 'workspace_name', 'cluster_set_name',\n 'dist_cutoff_rate']:\n if p not in params:\n raise ValueError('\"{}\" parameter is required, but missing'.format(p))\n\n # check metric validation\n metric = params.get('dist_metric')\n if metric and metric not in self.METRIC:\n error_msg = 'INPUT ERROR:\\nInput metric function [{}] is not valid.\\n'.format(metric)\n error_msg += 'Available metric: {}'.format(self.METRIC)\n raise ValueError(error_msg)\n\n # check method validation\n method = params.get('linkage_method')\n if method and method not in self.METHOD:\n error_msg = 'INPUT ERROR:\\nInput linkage algorithm [{}] is not valid.\\n'.format(\n method)\n error_msg += 'Available metric: {}'.format(self.METHOD)\n raise ValueError(error_msg)\n\n # check criterion validation\n criterion = params.get('fcluster_criterion')\n if criterion and criterion not in self.CRITERION:\n error_msg = 'INPUT ERROR:\\nInput criterion [{}] is not valid.\\n'.format(criterion)\n error_msg += 'Available metric: {}'.format(self.CRITERION)\n raise ValueError(error_msg)\n\n def _gen_clusters(self, clusters, conditionset_mapping):\n clusters_list = list()\n\n for cluster in clusters.values():\n labeled_cluster = {}\n labeled_cluster.update({'id_to_data_position': cluster})\n if conditionset_mapping:\n id_to_condition = {k: v for k, v in conditionset_mapping.items() if k in cluster.keys()}\n labeled_cluster.update({'id_to_condition': id_to_condition})\n\n clusters_list.append(labeled_cluster)\n\n return clusters_list\n\n def _gen_hierarchical_clusters(self, clusters, conditionset_mapping, data_matrix):\n clusters_list = list()\n\n df = pd.read_json(data_matrix)\n index = df.index.tolist()\n\n for cluster in clusters.values():\n labeled_cluster = {}\n id_to_data_position = {}\n for item in cluster:\n id_to_data_position.update({item: index.index(item)})\n\n labeled_cluster.update({'id_to_data_position': id_to_data_position})\n if conditionset_mapping:\n id_to_condition = {k: v for k, v in conditionset_mapping.items() if k in cluster}\n labeled_cluster.update({'id_to_condition': id_to_condition})\n\n clusters_list.append(labeled_cluster)\n\n return clusters_list\n\n def _build_hierarchical_cluster_set(self, clusters, cluster_set_name, genome_ref, matrix_ref,\n conditionset_mapping, conditionset_ref, workspace_name,\n clustering_parameters, data_matrix):\n\n \"\"\"\n _build_kmeans_cluster_set: build KBaseExperiments.ClusterSet object\n \"\"\"\n\n log('start saving KBaseExperiments.ClusterSet object')\n\n if isinstance(workspace_name, int) or workspace_name.isdigit():\n workspace_id = workspace_name\n else:\n workspace_id = self.dfu.ws_name_to_id(workspace_name)\n\n clusters_list = self._gen_hierarchical_clusters(clusters, conditionset_mapping,\n data_matrix)\n\n cluster_set_data = {'clusters': clusters_list,\n 'clustering_parameters': clustering_parameters,\n 'original_data': matrix_ref,\n 'condition_set_ref': conditionset_ref,\n 'genome_ref': genome_ref}\n\n cluster_set_data = {k: v for k, v in cluster_set_data.items() if v}\n\n object_type = 'KBaseExperiments.ClusterSet'\n save_object_params = {\n 'id': workspace_id,\n 'objects': [{'type': object_type,\n 'data': cluster_set_data,\n 'name': cluster_set_name}]}\n\n dfu_oi = self.dfu.save_objects(save_object_params)[0]\n cluster_set_ref = str(dfu_oi[6]) + '/' + str(dfu_oi[0]) + '/' + str(dfu_oi[4])\n\n return cluster_set_ref\n\n def _build_kmeans_cluster_set(self, clusters, cluster_set_name, genome_ref, matrix_ref,\n conditionset_mapping, conditionset_ref, workspace_name,\n clustering_parameters):\n \"\"\"\n _build_kmeans_cluster_set: build KBaseExperiments.ClusterSet object\n \"\"\"\n\n log('start saving KBaseExperiments.ClusterSet object')\n\n if isinstance(workspace_name, int) or workspace_name.isdigit():\n workspace_id = workspace_name\n else:\n workspace_id = self.dfu.ws_name_to_id(workspace_name)\n\n clusters_list = self._gen_clusters(clusters, conditionset_mapping)\n\n cluster_set_data = {'clusters': clusters_list,\n 'clustering_parameters': clustering_parameters,\n 'original_data': matrix_ref,\n 'condition_set_ref': conditionset_ref,\n 'genome_ref': genome_ref}\n\n cluster_set_data = {k: v for k, v in cluster_set_data.items() if v}\n\n object_type = 'KBaseExperiments.ClusterSet'\n save_object_params = {\n 'id': workspace_id,\n 'objects': [{'type': object_type,\n 'data': cluster_set_data,\n 'name': cluster_set_name}]}\n\n dfu_oi = self.dfu.save_objects(save_object_params)[0]\n cluster_set_ref = str(dfu_oi[6]) + '/' + str(dfu_oi[0]) + '/' + str(dfu_oi[4])\n\n return cluster_set_ref\n\n def _generate_visualization_content(self, output_directory,\n row_dendrogram_path,\n row_dendrogram_truncate_path,\n col_dendrogram_path,\n col_dendrogram_truncate_path):\n\n \"\"\"\n _generate_visualization_content: generate visualization html content\n \"\"\"\n\n visualization_content = ''\n\n if row_dendrogram_path:\n row_dendrogram_name = 'row_dendrogram.png'\n row_dendrogram_display_name = 'row dendrogram'\n\n shutil.copy2(row_dendrogram_path,\n os.path.join(output_directory, row_dendrogram_name))\n\n visualization_content += '<div class=\"gallery\">'\n visualization_content += '<a target=\"_blank\" href=\"{}\">'.format(\n row_dendrogram_name)\n visualization_content += '<img src=\"{}\" '.format(row_dendrogram_name)\n visualization_content += 'alt=\"{}\" width=\"600\" height=\"400\">'.format(\n row_dendrogram_display_name)\n visualization_content += '</a><div class=\"desc\">{}</div></div>'.format(\n row_dendrogram_display_name)\n\n if row_dendrogram_truncate_path:\n row_den_truncate_name = 'row_dendrogram_last12.png'\n row_den_truncate_display_name = 'row dendrogram truncated (last 12 merges)'\n\n shutil.copy2(row_dendrogram_truncate_path,\n os.path.join(output_directory, row_den_truncate_name))\n\n visualization_content += '<div class=\"gallery\">'\n visualization_content += '<a target=\"_blank\" href=\"{}\">'.format(\n row_den_truncate_name)\n visualization_content += '<img src=\"{}\" '.format(row_den_truncate_name)\n visualization_content += 'alt=\"{}\" width=\"600\" height=\"400\">'.format(\n row_den_truncate_display_name)\n visualization_content += '</a><div class=\"desc\">{}</div></div>'.format(\n row_den_truncate_display_name)\n\n if col_dendrogram_path:\n col_dendrogram_name = 'column_dendrogram.png'\n col_dendrogram_display_name = 'column dendrogram'\n\n shutil.copy2(col_dendrogram_path,\n os.path.join(output_directory, col_dendrogram_name))\n\n visualization_content += '<div class=\"gallery\">'\n visualization_content += '<a target=\"_blank\" href=\"{}\">'.format(\n col_dendrogram_name)\n visualization_content += '<img src=\"{}\" '.format(col_dendrogram_name)\n visualization_content += 'alt=\"{}\" width=\"600\" height=\"400\">'.format(\n col_dendrogram_display_name)\n visualization_content += '</a><div class=\"desc\">{}</div></div>'.format(\n col_dendrogram_display_name)\n\n if col_dendrogram_truncate_path:\n col_den_truncate_name = 'column_dendrogram_last12.png'\n col_den_truncate_display_name = 'column dendrogram truncated (last 12 merges)'\n\n shutil.copy2(col_dendrogram_truncate_path,\n os.path.join(output_directory, col_den_truncate_name))\n\n visualization_content += '<div class=\"gallery\">'\n visualization_content += '<a target=\"_blank\" href=\"{}\">'.format(\n col_den_truncate_name)\n visualization_content += '<img src=\"{}\" '.format(col_den_truncate_name)\n visualization_content += 'alt=\"{}\" width=\"600\" height=\"400\">'.format(\n col_den_truncate_display_name)\n visualization_content += '</a><div class=\"desc\">{}</div></div>'.format(\n col_den_truncate_display_name)\n\n if not visualization_content:\n visualization_content = '<p>Dendrogram is too large to be printed.</p>'\n\n return visualization_content\n\n def _generate_hierarchical_html_report(self, cluster_set_refs,\n row_dendrogram_path,\n row_dendrogram_truncate_path,\n col_dendrogram_path,\n col_dendrogram_truncate_path):\n \"\"\"\n _generate_hierarchical_html_report: generate html summary report for hierarchical\n clustering app\n \"\"\"\n\n log('start generating html report')\n html_report = list()\n\n output_directory = os.path.join(self.scratch, str(uuid.uuid4()))\n self._mkdir_p(output_directory)\n result_file_path = os.path.join(output_directory, 'hier_report.html')\n\n visualization_content = self._generate_visualization_content(\n output_directory,\n row_dendrogram_path,\n row_dendrogram_truncate_path,\n col_dendrogram_path,\n col_dendrogram_truncate_path)\n\n with open(result_file_path, 'w') as result_file:\n with open(os.path.join(os.path.dirname(__file__), 'hier_report_template.html'),\n 'r') as report_template_file:\n report_template = report_template_file.read()\n report_template = report_template.replace('<p>Visualization_Content</p>',\n visualization_content)\n result_file.write(report_template)\n\n report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,\n 'pack': 'zip'})['shock_id']\n\n html_report.append({'shock_id': report_shock_id,\n 'name': os.path.basename(result_file_path),\n 'label': os.path.basename(result_file_path),\n 'description': 'HTML summary report for ExpressionMatrix Cluster App'\n })\n return html_report\n\n def _generate_hierarchical_cluster_report(self, cluster_set_refs, workspace_name,\n row_dendrogram_path,\n row_dendrogram_truncate_path,\n col_dendrogram_path,\n col_dendrogram_truncate_path):\n \"\"\"\n _generate_hierarchical_cluster_report: generate summary report\n \"\"\"\n\n log('creating report')\n\n output_html_files = self._generate_hierarchical_html_report(\n cluster_set_refs,\n row_dendrogram_path,\n row_dendrogram_truncate_path,\n col_dendrogram_path,\n col_dendrogram_truncate_path)\n\n objects_created = []\n for cluster_set_ref in cluster_set_refs:\n objects_created.append({'ref': cluster_set_ref,\n 'description': 'Hierarchical ClusterSet'})\n\n report_params = {'message': '',\n 'workspace_name': workspace_name,\n 'objects_created': objects_created,\n 'html_links': output_html_files,\n 'direct_html_link_index': 0,\n 'html_window_height': 333,\n 'report_object_name': 'kb_hier_cluster_report_' + str(uuid.uuid4())}\n\n kbase_report_client = KBaseReport(self.callback_url)\n output = kbase_report_client.create_extended_report(report_params)\n\n report_output = {'report_name': output['name'], 'report_ref': output['ref']}\n\n return report_output\n\n def _generate_kmeans_cluster_report(self, cluster_set_refs, workspace_name):\n \"\"\"\n _generate_kmeans_cluster_report: generate summary report\n \"\"\"\n objects_created = []\n for cluster_set_ref in cluster_set_refs:\n objects_created.append({'ref': cluster_set_ref,\n 'description': 'Kmeans ClusterSet'})\n report_params = {'message': '',\n 'objects_created': objects_created,\n 'workspace_name': workspace_name,\n 'report_object_name': 'run_kmeans_cluster_' + str(uuid.uuid4())}\n\n kbase_report_client = KBaseReport(self.callback_url, token=self.token)\n output = kbase_report_client.create_extended_report(report_params)\n\n report_output = {'report_name': output['name'], 'report_ref': output['ref']}\n\n return report_output\n\n def _generate_pca_html_files(self, pca_plots, n_components):\n\n log('start generating html report')\n html_report = list()\n\n output_directory = os.path.join(self.scratch, str(uuid.uuid4()))\n self._mkdir_p(output_directory)\n result_file_path = os.path.join(output_directory, 'pca_report.html')\n\n visualization_content = ''\n\n for pca_plot in pca_plots:\n pca_plot_name = os.path.basename(pca_plot)\n pca_plot_display_name = '{} Component PCA'.format(n_components)\n\n shutil.copy2(pca_plot,\n os.path.join(output_directory, pca_plot_name))\n\n visualization_content += '<div class=\"gallery\">'\n visualization_content += '<a target=\"_blank\" href=\"{}\">'.format(pca_plot_name)\n visualization_content += '<img src=\"{}\" '.format(pca_plot_name)\n visualization_content += 'alt=\"{}\" width=\"600\" height=\"600\">'.format(\n pca_plot_display_name)\n visualization_content += '</a><div class=\"desc\">{}</div></div>'.format(\n pca_plot_display_name)\n\n with open(result_file_path, 'w') as result_file:\n with open(os.path.join(os.path.dirname(__file__), 'pca_report_template.html'),\n 'r') as report_template_file:\n report_template = report_template_file.read()\n report_template = report_template.replace('<p>Visualization_Content</p>',\n visualization_content)\n result_file.write(report_template)\n\n report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,\n 'pack': 'zip'})['shock_id']\n\n html_report.append({'shock_id': report_shock_id,\n 'name': os.path.basename(result_file_path),\n 'label': os.path.basename(result_file_path),\n 'description': 'HTML summary report for ExpressionMatrix Cluster App'\n })\n return html_report\n\n def _generate_pca_plot(self, pca_matrix_data):\n \"\"\"\n _generate_pca_plot: generate a plot for PCA data\n \"\"\"\n pca_plots = []\n output_directory = os.path.join(self.scratch, str(uuid.uuid4()))\n self._mkdir_p(output_directory)\n\n df = pd.DataFrame(pca_matrix_data.get('values'),\n index=pca_matrix_data.get('row_ids'),\n columns=pca_matrix_data.get('col_ids'))\n\n n_components = range(1, df.columns.size)\n all_pairs = list(itertools.combinations(n_components, 2))\n\n for pair in all_pairs:\n first_component = pair[0]\n second_component = pair[1]\n pca_plot = os.path.join(output_directory, 'pca_{}_{}.png'.format(first_component,\n second_component))\n\n plt.switch_backend('agg')\n\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlabel('Principal Component {}'.format(first_component), fontsize=15)\n ax.set_ylabel('Principal Component {}'.format(second_component), fontsize=15)\n ax.set_title('{} component PCA'.format(len(n_components)), fontsize=20)\n\n clusters = list(set(['cluster_{}'.format(x) for x in df['cluster'].tolist()]))\n colors = ['red', 'green', 'blue', 'orange', 'yellow', 'pink', 'lightcyan', 'cyan']\n if len(clusters) > len(colors):\n np.random.seed(19680801)\n N = len(clusters)\n colors = []\n for i in range(N):\n colors.append(np.random.rand(3,))\n\n for cluster, color in zip(clusters, colors):\n indicesToKeep = df['cluster'] == int(cluster.split('_')[-1])\n ax.scatter(df.loc[indicesToKeep, 'principal_component_{}'.format(first_component)],\n df.loc[indicesToKeep, 'principal_component_{}'.format(second_component)],\n c=color,\n s=50)\n ax.legend(clusters, loc='best')\n ax.grid()\n\n plt.savefig(pca_plot)\n\n pca_plots.append(pca_plot)\n\n return pca_plots, len(n_components)\n\n def _generate_pca_report(self, pca_ref, pca_matrix_data, workspace_name):\n \"\"\"\n _generate_kmeans_cluster_report: generate summary report\n \"\"\"\n objects_created = []\n objects_created.append({'ref': pca_ref,\n 'description': 'PCA Matrix'})\n\n pca_plots, n_components = self._generate_pca_plot(pca_matrix_data)\n output_html_files = self._generate_pca_html_files(pca_plots, n_components)\n report_params = {'message': '',\n 'objects_created': objects_created,\n 'workspace_name': workspace_name,\n 'html_links': output_html_files,\n 'direct_html_link_index': 0,\n 'report_object_name': 'run_pca_' + str(uuid.uuid4())}\n\n kbase_report_client = KBaseReport(self.callback_url, token=self.token)\n output = kbase_report_client.create_extended_report(report_params)\n\n report_output = {'report_name': output['name'], 'report_ref': output['ref']}\n\n return report_output\n\n def _save_2D_matrix(self, df, clusters, workspace_name, pca_matrix_name):\n \"\"\"\n _save_2D_matrix: save dataframe as KBaseFeatureValues.FloatMatrix2D object\n \"\"\"\n\n log('start saving KBaseFeatureValues.FloatMatrix2D object')\n\n if isinstance(workspace_name, int) or workspace_name.isdigit():\n workspace_id = workspace_name\n else:\n workspace_id = self.dfu.ws_name_to_id(workspace_name)\n\n row_ids = df.index.tolist()\n col_ids = df.columns.tolist()\n col_ids.append('cluster')\n values = df.values.tolist()\n\n idx = 0\n for cluster in clusters:\n cluster_items = cluster.get('id_to_condition').keys()\n\n for cluster_item in cluster_items:\n pos = row_ids.index(cluster_item)\n values[pos].append(idx)\n\n idx += 1\n\n pca_matrix_data = {'row_ids': row_ids,\n 'col_ids': col_ids,\n 'values': values}\n\n object_type = 'KBaseFeatureValues.FloatMatrix2D'\n save_object_params = {\n 'id': workspace_id,\n 'objects': [{'type': object_type,\n 'data': pca_matrix_data,\n 'name': pca_matrix_name}]}\n\n dfu_oi = self.dfu.save_objects(save_object_params)[0]\n float_matrix_ref = str(dfu_oi[6]) + '/' + str(dfu_oi[0]) + '/' + str(dfu_oi[4])\n\n return float_matrix_ref, pca_matrix_data\n\n def _build_flat_cluster(self, data_matrix, dist_cutoff_rate,\n dist_metric=None, linkage_method=None, fcluster_criterion=None):\n \"\"\"\n _build_cluster: build flat clusters and dendrogram for data_matrix\n \"\"\"\n\n # calculate distance matrix\n pdist_params = {'data_matrix': data_matrix,\n 'metric': dist_metric}\n pdist_ret = self.ke_util.run_pdist(pdist_params)\n\n dist_matrix = pdist_ret['dist_matrix']\n labels = pdist_ret['labels']\n\n # performs hierarchical/agglomerative clustering\n linkage_params = {'dist_matrix': dist_matrix,\n 'method': linkage_method}\n linkage_ret = self.ke_util.run_linkage(linkage_params)\n\n linkage_matrix = linkage_ret['linkage_matrix']\n\n newick = self.ke_util.linkage_2_newick({'linkage_matrix': linkage_matrix,\n 'labels': labels})['newick']\n\n height = max([item[2] for item in linkage_matrix])\n dist_threshold = height * dist_cutoff_rate\n log('Height: {} Setting dist_threshold: {}'.format(height, dist_threshold))\n merges = len(linkage_matrix)\n\n # generate flat clusters\n fcluster_params = {'linkage_matrix': linkage_matrix,\n 'dist_threshold': dist_threshold,\n 'labels': labels,\n 'criterion': fcluster_criterion}\n fcluster_ret = self.ke_util.run_fcluster(fcluster_params)\n\n flat_cluster = fcluster_ret['flat_cluster']\n\n # generate dendrogram\n try:\n dendrogram_params = {'linkage_matrix': linkage_matrix,\n 'dist_threshold': dist_threshold,\n 'labels': labels}\n\n dendrogram_ret = self.ke_util.run_dendrogram(dendrogram_params)\n\n dendrogram_path = dendrogram_ret['result_plots'][0]\n except:\n dendrogram_path = None\n\n # generate truncated (last 12 merges) dendrogram\n if merges > 256:\n dendrogram_truncate_params = {'linkage_matrix': linkage_matrix,\n 'dist_threshold': dist_threshold,\n 'labels': labels,\n 'last_merges': 12}\n dendrogram_truncate_ret = self.ke_util.run_dendrogram(dendrogram_truncate_params)\n\n dendrogram_truncate_path = dendrogram_truncate_ret['result_plots'][0]\n else:\n dendrogram_truncate_path = None\n\n return flat_cluster, labels, newick, dendrogram_path, dendrogram_truncate_path\n\n def _build_kmeans_cluster(self, data_matrix, k_num, dist_metric=None):\n \"\"\"\n _build_kmeans_cluster: Build Kmeans cluster\n \"\"\"\n\n # calculate distance matrix\n pdist_params = {'data_matrix': data_matrix,\n 'metric': dist_metric}\n pdist_ret = self.ke_util.run_pdist(pdist_params)\n\n dist_matrix = pdist_ret['dist_matrix']\n labels = pdist_ret['labels']\n\n # run kmeans algorithm\n kmeans_params = {'dist_matrix': dist_matrix,\n 'k_num': k_num}\n kmeans_ret = self.ke_util.run_kmeans2(kmeans_params)\n\n centroid = kmeans_ret.get('kmeans_ret')\n idx = kmeans_ret.get('idx')\n\n df = pd.read_json(data_matrix)\n rows = df.index.tolist()\n\n clusters = {}\n for list_index, value in enumerate(idx):\n cluster = clusters.get(value)\n if not cluster:\n clusters.update({value: {rows[list_index]: list_index}})\n else:\n cluster.update({rows[list_index]: list_index})\n\n return clusters\n\n def __init__(self, config):\n self.ws_url = config[\"workspace-url\"]\n self.callback_url = config['SDK_CALLBACK_URL']\n self.token = config['KB_AUTH_TOKEN']\n self.shock_url = config['shock-url']\n self.srv_wiz_url = config['srv-wiz-url']\n self.scratch = config['scratch']\n self.dfu = DataFileUtil(self.callback_url)\n self.ke_util = kb_ke_util(self.callback_url, service_ver=\"dev\")\n self.gen_api = GenericsAPI(self.callback_url, service_ver=\"dev\")\n\n self.ws = Workspace(self.ws_url, token=self.token)\n self.set_client = SetAPI(self.srv_wiz_url)\n\n def run_pca(self, params):\n \"\"\"\n run_pca: generates PCA matrix for KBaseExperiments.ClusterSet data object\n\n cluster_set_ref: KBaseExperiments.ClusterSet object references\n workspace_name: the name of the workspace\n pca_matrix_name: name of PCA (KBaseFeatureValues.FloatMatrix2D) object\n n_components - number of components (default 2)\n\n pca_ref: PCA object reference (as KBaseFeatureValues.FloatMatrix2D data type)\n report_name: report name generated by KBaseReport\n report_ref: report reference generated by KBaseReport\n \"\"\"\n\n self._validate_run_pca_params(params)\n\n cluster_set_ref = params.get('cluster_set_ref')\n workspace_name = params.get('workspace_name')\n pca_matrix_name = params.get('pca_matrix_name')\n n_components = int(params.get('n_components', 2))\n\n cluster_set_source = self.dfu.get_objects(\n {\"object_refs\": [cluster_set_ref]})['data'][0]\n\n cluster_set_info = cluster_set_source.get('info')\n cluster_set_name = cluster_set_info[1]\n cluster_set_data = cluster_set_source.get('data')\n clusters = cluster_set_data.get('clusters')\n\n matrix_ref = cluster_set_data.get('original_data')\n\n data_matrix = self.gen_api.fetch_data({'obj_ref': matrix_ref}).get('data_matrix')\n\n if '_column' in cluster_set_name:\n data_matrix = pd.read_json(data_matrix).T.to_json() # transpose matrix\n\n # run pca algorithm\n pca_params = {'data_matrix': data_matrix,\n 'n_components': n_components}\n PCA_matrix = self.ke_util.run_PCA(pca_params).get('PCA_matrix')\n\n df = pd.read_json(PCA_matrix)\n df.fillna(0, inplace=True)\n\n pca_ref, pca_matrix_data = self._save_2D_matrix(df, clusters,\n workspace_name, pca_matrix_name)\n\n returnVal = {'pca_ref': pca_ref}\n\n report_output = self._generate_pca_report(pca_ref, pca_matrix_data, workspace_name)\n\n returnVal.update(report_output)\n return returnVal\n\n def run_kmeans_cluster(self, params):\n \"\"\"\n run_kmeans_cluster: generates Kmeans clusters for Matrix data object\n\n matrix_ref: Matrix object reference\n workspace_name: the name of the workspace\n cluster_set_name: KBaseExperiments.ClusterSet object name\n k_num: number of clusters to form\n\n Optional arguments:\n dist_metric: The distance metric to use. Default set to 'euclidean'.\n The distance function can be\n [\"braycurtis\", \"canberra\", \"chebyshev\", \"cityblock\", \"correlation\", \"cosine\",\n \"dice\", \"euclidean\", \"hamming\", \"jaccard\", \"kulsinski\", \"matching\",\n \"rogerstanimoto\", \"russellrao\", \"sokalmichener\", \"sokalsneath\", \"sqeuclidean\",\n \"yule\"]\n Details refer to:\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html\n\n return:\n cluster_set_refs: KBaseExperiments.ClusterSet object references\n report_name: report name generated by KBaseReport\n report_ref: report reference generated by KBaseReport\n \"\"\"\n\n self._validate_run_kmeans_cluster_params(params)\n\n matrix_ref = params.get('matrix_ref')\n workspace_name = params.get('workspace_name')\n cluster_set_name = params.get('cluster_set_name')\n k_num = params.get('k_num')\n dist_metric = params.get('dist_metric')\n\n matrix_object = self.ws.get_objects2({'objects': [{'ref': matrix_ref}]})['data'][0]\n matrix_data = matrix_object['data']\n\n data_matrix = self.gen_api.fetch_data({'obj_ref': matrix_ref}).get('data_matrix')\n transpose_data_matrix = pd.read_json(data_matrix).T.to_json()\n\n row_kmeans_clusters = self._build_kmeans_cluster(data_matrix, k_num,\n dist_metric=dist_metric)\n\n col_kmeans_clusters = self._build_kmeans_cluster(transpose_data_matrix, k_num,\n dist_metric=dist_metric)\n\n genome_ref = matrix_data.get('genome_ref')\n clustering_parameters = {'k_num': str(k_num),\n 'dist_metric': str(dist_metric)}\n\n cluster_set_refs = []\n\n row_cluster_set_name = cluster_set_name + '_row'\n row_cluster_set = self._build_kmeans_cluster_set(\n row_kmeans_clusters,\n row_cluster_set_name,\n genome_ref,\n matrix_ref,\n matrix_data.get('row_mapping'),\n matrix_data.get('row_conditionset_ref'),\n workspace_name,\n clustering_parameters)\n cluster_set_refs.append(row_cluster_set)\n\n col_cluster_set_name = cluster_set_name + '_column'\n col_cluster_set = self._build_kmeans_cluster_set(\n col_kmeans_clusters,\n col_cluster_set_name,\n genome_ref,\n matrix_ref,\n matrix_data.get('col_mapping'),\n matrix_data.get('col_conditionset_ref'),\n workspace_name,\n clustering_parameters)\n cluster_set_refs.append(col_cluster_set)\n\n returnVal = {'cluster_set_refs': cluster_set_refs}\n\n report_output = self._generate_kmeans_cluster_report(cluster_set_refs, workspace_name)\n\n returnVal.update(report_output)\n\n return returnVal\n\n def run_hierarchical_cluster(self, params):\n \"\"\"\n run_hierarchical_cluster: generates hierarchical clusters for Matrix data object\n\n matrix_ref: Matrix object reference\n workspace_name: the name of the workspace\n cluster_set_name: KBaseExperiments.ClusterSet object name\n dist_cutoff_rate: the threshold to apply when forming flat clusters\n\n Optional arguments:\n dist_metric: The distance metric to use. Default set to 'euclidean'.\n The distance function can be\n [\"braycurtis\", \"canberra\", \"chebyshev\", \"cityblock\", \"correlation\", \"cosine\",\n \"dice\", \"euclidean\", \"hamming\", \"jaccard\", \"kulsinski\", \"matching\",\n \"rogerstanimoto\", \"russellrao\", \"sokalmichener\", \"sokalsneath\",\n \"sqeuclidean\", \"yule\"]\n Details refer to:\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html\n\n linkage_method: The linkage algorithm to use. Default set to 'ward'.\n The method can be\n [\"single\", \"complete\", \"average\", \"weighted\", \"centroid\", \"median\", \"ward\"]\n Details refer to:\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html\n\n fcluster_criterion: The criterion to use in forming flat clusters.\n Default set to 'inconsistent'.\n The criterion can be\n [\"inconsistent\", \"distance\", \"maxclust\"]\n Details refer to:\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fcluster.html\n\n return:\n cluster_set_refs: KBaseExperiments.ClusterSet object references\n report_name: report name generated by KBaseReport\n report_ref: report reference generated by KBaseReport\n \"\"\"\n log('--->\\nrunning run_hierarchical_cluster\\n' +\n 'params:\\n{}'.format(json.dumps(params, indent=1)))\n\n self._validate_run_hierarchical_cluster_params(params)\n\n matrix_ref = params.get('matrix_ref')\n workspace_name = params.get('workspace_name')\n cluster_set_name = params.get('cluster_set_name')\n dist_cutoff_rate = float(params.get('dist_cutoff_rate'))\n dist_metric = params.get('dist_metric')\n linkage_method = params.get('linkage_method')\n fcluster_criterion = params.get('fcluster_criterion')\n\n matrix_object = self.ws.get_objects2({'objects': [{'ref':\n matrix_ref}]})['data'][0]\n matrix_data = matrix_object['data']\n\n data_matrix = self.gen_api.fetch_data({'obj_ref': matrix_ref}).get('data_matrix')\n transpose_data_matrix = pd.read_json(data_matrix).T.to_json()\n\n (row_flat_cluster,\n row_labels,\n row_newick,\n row_dendrogram_path,\n row_dendrogram_truncate_path) = self._build_flat_cluster(\n data_matrix,\n dist_cutoff_rate,\n dist_metric=dist_metric,\n linkage_method=linkage_method,\n fcluster_criterion=fcluster_criterion)\n\n (col_flat_cluster,\n col_labels,\n col_newick,\n col_dendrogram_path,\n col_dendrogram_truncate_path) = self._build_flat_cluster(\n transpose_data_matrix,\n dist_cutoff_rate,\n dist_metric=dist_metric,\n linkage_method=linkage_method,\n fcluster_criterion=fcluster_criterion)\n\n genome_ref = matrix_data.get('genome_ref')\n\n clustering_parameters = {'dist_cutoff_rate': str(dist_cutoff_rate),\n 'dist_metric': dist_metric,\n 'linkage_method': linkage_method,\n 'fcluster_criterion': fcluster_criterion}\n\n cluster_set_refs = []\n\n row_cluster_set_name = cluster_set_name + '_row'\n row_cluster_set = self._build_hierarchical_cluster_set(\n row_flat_cluster,\n row_cluster_set_name,\n genome_ref,\n matrix_ref,\n matrix_data.get('row_mapping'),\n matrix_data.get('row_conditionset_ref'),\n workspace_name,\n clustering_parameters,\n data_matrix)\n cluster_set_refs.append(row_cluster_set)\n\n col_cluster_set_name = cluster_set_name + '_column'\n col_cluster_set = self._build_hierarchical_cluster_set(\n col_flat_cluster,\n col_cluster_set_name,\n genome_ref,\n matrix_ref,\n matrix_data.get('col_mapping'),\n matrix_data.get('col_conditionset_ref'),\n workspace_name,\n clustering_parameters,\n transpose_data_matrix)\n cluster_set_refs.append(col_cluster_set)\n\n returnVal = {'cluster_set_refs': cluster_set_refs}\n\n report_output = self._generate_hierarchical_cluster_report(cluster_set_refs,\n workspace_name,\n row_dendrogram_path,\n row_dendrogram_truncate_path,\n col_dendrogram_path,\n col_dendrogram_truncate_path)\n returnVal.update(report_output)\n\n return returnVal\n" ]
[ [ "numpy.random.seed", "matplotlib.pyplot.switch_backend", "matplotlib.pyplot.savefig", "numpy.random.rand", "pandas.read_json", "matplotlib.pyplot.figure" ] ]
techunison-software/data-science-trials
[ "46c9e7bfcda8270573e49a7be8cee3b9c445c2cf" ]
[ "Aswath/visualization.py" ]
[ "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport inspect, os.path\n# filePath=os.path.dirname(os.path.abspath(__file__))+'\\\\DataSets\\\\train.csv'\n# print(filePath)\n\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\npath = os.path.dirname(os.path.abspath(filename))+\"/datasets/titanic/output.csv\"\n\n\ndata=pd.read_csv(path)\nprint(\"Subset of the Data from the Output\")\nprint(data.head())\nfig, ax = plt.subplots(1,1,figsize=(13,5))\nsns.heatmap(data.isnull(), cbar=False, cmap=\"YlGnBu_r\")\n#plt.show()\ntotal = data.isnull().sum().sort_values(ascending = False)\npercent = round(data.isnull().sum().sort_values(ascending = False)/len(data)*100, 2)\ntemp = pd.concat([total, percent], axis = 1,keys= ['Total', 'Percent'])\n# ax [1] = sns.barplot(data=data.isnull())\nplt.xticks(rotation=90)\nf,ax=plt.subplots(1,5,figsize=(13,5))\ndata['Survived'].value_counts().plot.pie(explode=[0,0.05],autopct='%1.1f%%',ax=ax[1],shadow=True)\nax[0].set_title('Survived')\nax[0].set_ylabel('')\nsns.countplot('Survived',data=data,ax=ax[0])\nax[1].set_title('Survived')\n# plt.show()\n#f,ax=plt.subplots(1,3,figsize=(18,5))\ndata[['Sex','Survived']].groupby(['Sex']).mean().plot.bar(ax=ax[2])\nax[2].set_title('Fraction of Survival \\n with respect to Sex')\nsns.countplot('Sex',hue='Survived',data=data,ax=ax[3])\nax[3].set_title('Survived vs Dead counts \\n with respect to Sex')\nsns.barplot(x=\"Sex\", y=\"Survived\", data=data,ax=ax[4])\nax[4].set_title('Survival by Gender')\nf.show()\nplt.show()\nf,ax=plt.subplots(1,3,figsize=(18,5))\ndata['Pclass'].value_counts().plot.bar(color=['#BC8F8F','#F4A460','#DAA520'],ax=ax[0])\nax[0].set_title('Number Of Passengers \\n with respect to Pclass')\nax[0].set_ylabel('Count')\nsns.countplot('Pclass',hue='Survived',data=data,ax=ax[1])\nax[1].set_title('Survived vs Dead counts \\n with respect to Pclass')\nsns.barplot(x=\"Pclass\", y=\"Survived\", data=data,ax=ax[2])\nax[2].set_title('Survival by Pclass')\nf.show()\nplt.show()\nf,ax=plt.subplots(1,1,figsize=(25,6))\nsns.barplot(data['Age'],data['Survived'], ci=None, ax=ax)\nax.set_title('Survived vs Dead counts with respect to Pclass')\nplt.xticks(rotation=90)\nf.show()\nplt.show()\nf,ax=plt.subplots(1,2,figsize=(18,6))\ndata.boxplot(column=['Age'],ax=ax[0], grid=False)\nax[0].set_title('Age Box Plot')\nax[0].set_ylabel('Age Years')\nsns.boxplot(y='Age', data=data, ax=ax[0])\nsns.boxplot(x='Survived', y='Age', data=data, ax=ax[1])\nax[1].set_title('Age Box Plot')\nax[1].set_ylabel('Age Years')\nf.show()\nplt.show()\nf,ax=plt.subplots(1,2,figsize=(12,5))\nsns.countplot('Embarked',data=data,ax=ax[0])\nax[0].set_title('No. Of Passengers Boarded')\nsns.countplot('Embarked',hue='Survived',data=data,ax=ax[1])\nax[1].set_title('Embarked vs Survived')\nplt.subplots_adjust(wspace=0.2,hspace=0.5)\nf.show()\nplt.show()\nf,ax=plt.subplots(1,2,figsize=(12,5))\nsns.countplot('Embarked',hue='Sex',data=data,ax=ax[0])\nax[0].set_title('Male-Female Split for Embarked')\nsns.countplot('Embarked',hue='Pclass',data=data,ax=ax[1])\nax[1].set_title('Embarked vs Pclass')\nplt.subplots_adjust(wspace=0.2,hspace=0.5)\nf.show()\nf,ax=plt.subplots(2,2,figsize=(15,10))\nsns.countplot('SibSp',hue='Survived',data=data,ax=ax[0,0])\nax[0,0].set_title('SibSp vs Survived')\nsns.barplot('SibSp','Survived',data=data,ax=ax[0,1])\nax[0,1].set_title('SibSp vs Survived')\nsns.countplot('Parch',hue='Survived',data=data,ax=ax[1,0])\nax[1,0].set_title('Parch vs Survived')\nsns.barplot('Parch','Survived',data=data,ax=ax[1,1])\nax[1,1].set_title('Parch vs Survived')\nplt.subplots_adjust(wspace=0.2,hspace=0.5)\nplt.show()\nf,ax=plt.subplots(1,1,figsize=(20,5))\nsns.distplot(data.Fare,ax=ax)\nax.set_title('Distribution of Fares')\nplt.show()" ]
[ [ "pandas.concat", "pandas.read_csv", "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show" ] ]
VirtualRoyalty/Text-level-GCN
[ "4351d4595f47cf8e7574d0f22e22dc5bbc384ffa" ]
[ "preprocess/general.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport networkx as nx\nfrom tqdm import tqdm, tqdm_notebook\n\nfrom preprocess.graph import doc2graph\n\n\ndef prepare2gcn(doc,\n max_nodes,\n window_size,\n term2id,\n is_directed=True,\n is_weighted_edges=False,\n infranodus_weights=False,\n MASTER_NODE=False,\n pmi_matrix=None):\n\n # doc = doc[-max_nodes:] #doc[:max_nodes]\n # assert len(set(doc)) <= max_nodes\n\n doc = [token for token in doc if token in term2id]\n G = doc2graph(doc,\n max_nodes=max_nodes,\n window_size=window_size,\n term2id=term2id,\n pmi_matrix=pmi_matrix,\n is_directed=is_directed,\n is_weighted_edges=is_weighted_edges,\n infranodus_weights=False)\n A = nx.adjacency_matrix(G).todense()\n padded = np.zeros((max_nodes, max_nodes), dtype='float32')\n padded[:A.shape[0], :A.shape[1]] = A\n\n embs = []\n if MASTER_NODE:\n embs.append(term2id['MASTER_NODE'])\n padded = np.concatenate((np.ones(padded.shape[1]), b), axis=0)\n for i, token in enumerate(list(G.nodes())):\n vec = term2id[token]\n embs.append(vec)\n for i in range(max_nodes-len(G.nodes())):\n embs.append(term2id['PAD'])\n embs = np.array(embs, dtype='int32')\n\n return padded, embs\n\n\ndef get_dataset_from_df(df,\n max_nodes,\n term2id,\n window_size=3,\n token_col='tokens',\n label_col='label',\n pmi_matrix=None,\n is_directed=True,\n is_weighted_edges=False,\n infranodus_weights=False):\n\n X_adj, X_emb, Y = list(), list(), list()\n for i in tqdm_notebook(range(len(df))):\n tokens = df[token_col].iloc[i]\n target = df[label_col].iloc[i]\n if len(tokens) > 1:\n A, embs = prepare2gcn(tokens,\n max_nodes=max_nodes,\n window_size=window_size,\n term2id=term2id,\n pmi_matrix=pmi_matrix,\n is_directed=is_directed,\n is_weighted_edges=is_weighted_edges,\n infranodus_weights=False)\n if pmi_matrix is not None:\n np.fill_diagonal(A, 1)\n X_adj.append(A.astype('float32'))\n X_emb.append(embs.astype('int32'))\n Y.append(target)\n\n X_adj = np.array(X_adj, dtype='float32')\n X_emb = np.array(X_emb, dtype='int32')\n # X_emb = np.expand_dims(X_emb, axis=-1)\n\n Y = np.array(Y)\n Y = tf.one_hot(Y, df[label_col].nunique(), dtype='float32')\n return X_adj, X_emb, Y\n\n\nfrom spektral.data import BatchLoader, Dataset, Graph, PackedBatchLoader\n\nclass CustomDataset(Dataset):\n\n def __init__(self, emb, adj, y, **kwargs):\n self.emb = emb\n self.adj = adj\n self.y = y\n\n super().__init__(**kwargs)\n\n def read(self):\n return [Graph(x=emb.reshape(-1, 1), a=adj, y=y) for emb, adj, y in zip(self.emb, self.adj, self.y)]\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.fill_diagonal", "numpy.ones" ] ]
opent03/gym-games
[ "47cdbe522d5845676eadab1ec1a21f9c23eb5b95" ]
[ "gym_exploration/envs/sparse_mountain_car.py" ]
[ "import math\nimport numpy as np\nfrom gym.envs.classic_control.mountain_car import MountainCarEnv\n\n\nclass SparseMountainCarEnv(MountainCarEnv):\n ''' Modified based on Mountain Car.\n The only difference is the reward function: \n the agent gets 0 reward every step until it reaches the goal with 1 reward.\n '''\n def __init__(self, goal_velocity=0):\n super().__init__(goal_velocity=goal_velocity)\n \n def step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\" % (action, type(action))\n\n position, velocity = self.state\n velocity += (action - 1) * self.force + math.cos(3 * position) * (-self.gravity)\n velocity = np.clip(velocity, -self.max_speed, self.max_speed)\n position += velocity\n position = np.clip(position, self.min_position, self.max_position)\n if (position == self.min_position and velocity < 0):\n velocity = 0\n\n done = bool(\n position >= self.goal_position and velocity >= self.goal_velocity\n )\n # Set a sparse reward signal\n if done:\n reward = 1.0\n else:\n reward = 0.0\n\n self.state = (position, velocity)\n return np.array(self.state), reward, done, {}\n\n\n\nif __name__ == '__main__':\n env = SparseMountainCarEnv()\n env.seed(0)\n print('Action space:', env.action_space)\n print('Obsevation space:', env.observation_space)\n print('Obsevation space high:', env.observation_space.high)\n print('Obsevation space low:', env.observation_space.low)\n\n for i in range(1):\n ob = env.reset()\n for _ in range(10):\n action = env.action_space.sample()\n ob, reward, done, _ = env.step(action)\n print('Observation:', ob)\n print('Reward:', reward)\n print('Done:', done)\n if done:\n break\n env.close()" ]
[ [ "numpy.array", "numpy.clip" ] ]
charx7/MachineLearning
[ "527602a40092f1458b6b99f2de48c10d323e8ea4" ]
[ "FeatureSelection/Label_Onehot_Encoding_module.py" ]
[ "#Implement for OneHotEncoder\n\n# author : Haibin\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\n\ndef LabelEncoder_OneHotEncoder(Whole_Dataframe):\n\n df =Whole_Dataframe\n Cleaned_df = df[['source', 'user_id', 'truncated', 'in_reply_to_status_id', 'in_reply_to_user_id',\n 'retweeted_status_id', 'geo', 'place', 'contributors', 'retweet_count',\n 'reply_count', 'favorite_count', 'favorited', 'retweeted', 'possibly_sensitive', 'num_hashtags',\n 'num_urls', 'num_mentions', 'created_at', 'timestamp', 'crawled_at', 'updated']]\n\n del Cleaned_df['source']\n del Cleaned_df['user_id']\n del Cleaned_df['created_at']\n del Cleaned_df['timestamp']\n del Cleaned_df['crawled_at']\n del Cleaned_df['updated']\n\n num_cols = ['in_reply_to_status_id', 'in_reply_to_user_id', 'retweeted_status_id',\n 'retweet_count', 'reply_count', 'favorite_count', 'num_hashtags', 'num_urls', 'num_mentions']\n\n cate_cols = Cleaned_df.columns.drop(num_cols)\n\n Cleaned_df[cate_cols]=Cleaned_df[cate_cols].astype('object')\n\n # Convert numerical data\n Cleaned_df[num_cols] = Cleaned_df[num_cols].apply(pd.to_numeric, errors='coerce')\n\n print(Cleaned_df.info())\n # Select categorical columns using object feature\n obj_df = Cleaned_df.select_dtypes(include=['object']).copy()\n\n # LabelEncoder\n L_encoder = LabelEncoder()\n\n # Applying LabelEncoder on each of the categorical columns:\n obj_df = obj_df.apply(lambda col: L_encoder.fit_transform(col.astype(str)))\n\n # OneHotEncoder\n One_encoder = OneHotEncoder(categories='auto')\n\n def One_encoder_function(input_obj_df_column):\n\n indicator_for_cat = input_obj_df_column.name\n\n One_encoded_cat_array = One_encoder.fit_transform(input_obj_df_column.values.reshape(-1, 1)).toarray()\n\n Current_cat_dataframe = pd.DataFrame(One_encoded_cat_array,\n columns=[str(indicator_for_cat) + '_' + str(int(n)) for n in\n range(One_encoded_cat_array.shape[1])])\n\n return Current_cat_dataframe\n\n truncated_One_encoded_dataframe = One_encoder_function(obj_df['truncated'])\n\n geo_One_encoded_dataframe = One_encoder_function(obj_df['geo'])\n\n place_One_encoded_dataframe = One_encoder_function(obj_df['place'])\n\n contributors_One_encoded_dataframe = One_encoder_function(obj_df['contributors'])\n\n favorited_One_encoded_dataframe = One_encoder_function(obj_df['favorited'])\n\n retweeted_One_encoded_dataframe = One_encoder_function(obj_df['retweeted'])\n\n possibly_sensitive_One_encoded_dataframe = One_encoder_function(obj_df['possibly_sensitive'])\n\n Current_cats_dataframe = pd.concat(\n [ truncated_One_encoded_dataframe, geo_One_encoded_dataframe, place_One_encoded_dataframe,\n contributors_One_encoded_dataframe, favorited_One_encoded_dataframe, retweeted_One_encoded_dataframe,\n possibly_sensitive_One_encoded_dataframe], axis=1)\n\n print('\\n The index for Nan value before imputing: \\n')\n print(np.where(np.isnan(Cleaned_df[num_cols])))\n\n # define numerical imputer\n\n num_imputer = SimpleImputer(missing_values=np.nan, strategy='median')\n\n # Imputing on numerical data\n Cleaned_df[num_cols] = num_imputer.fit_transform(Cleaned_df[num_cols])\n\n print('\\n The index for Nan value after imputing: \\n')\n print(np.where(np.isnan(Cleaned_df[num_cols])))\n\n Encoded_Cleaned_df_Concatenated = pd.concat([Cleaned_df[num_cols], Current_cats_dataframe], axis=1)\n\n LabelandOneHot_Encoded_feature = Encoded_Cleaned_df_Concatenated\n\n return LabelandOneHot_Encoded_feature\n" ]
[ [ "pandas.concat", "numpy.isnan", "sklearn.preprocessing.OneHotEncoder", "sklearn.impute.SimpleImputer", "sklearn.preprocessing.LabelEncoder" ] ]
NuxDD/pyrate
[ "01e61027813d4eaf674dd5ce5db6898ea5ca22aa" ]
[ "doc/SM/PythonOuput/SM.py" ]
[ "#########################################################\n## This file was automatically generated by PyR@TE 3 ##\n### ###\n## ##\n# Model : SM #\n# Author : Lohan Sartore #\n# Date : 08.06.2020 #\n#########################################################\n\nimport time\nimport numpy as np\nfrom sympy import flatten\nfrom scipy.integrate import ode\nimport matplotlib.pyplot as plt\n\nfrom RGEs import (beta_g1, beta_g2, beta_g3,\n beta_yt, beta_yb, beta_ytau,\n beta_lambda_,\n beta_mu,\n beta_v)\n\nclass Coupling():\n couplings = {}\n\n def __init__(self, name, couplingType, latex=None, shape = (), fromMat=None, cplx=False, init=0, pos=None):\n self.name = name\n self.type = couplingType\n\n if latex is not None:\n self.latex = latex\n else:\n self.latex = self.name\n\n self.shape = shape\n self.is_matrix = ( shape != () )\n self.nb = self.shape[0]*self.shape[1] if self.is_matrix else 1\n self.cplx = cplx\n\n self.initialValue = init if shape == () else np.zeros(shape)\n\n if fromMat is not None:\n self.pos = pos\n self.latex = '{' + fromMat.latex + '}' + self.name.replace(fromMat.name, '')\n return\n\n if couplingType not in self.couplings:\n self.couplings[couplingType] = []\n\n self.pos = sum([c.nb for cList in self.couplings.values() for c in cList])\n self.couplings[couplingType].append(self)\n\n def as_explicit(self, toList=False):\n if not self.is_matrix:\n return self\n\n nameFunc = lambda x: self.name+'_{' + str(1 + x // self.shape[1]) + str(1 + x % self.shape[1]) + '}'\n initFunc = lambda x: list(self.initialValue)[x // self.shape[1]][x % self.shape[1]]\n arrayFunc = np.vectorize(lambda x: Coupling(nameFunc(x), self.type, fromMat=self, init=initFunc(x), pos=self.pos+x))\n array = arrayFunc(np.reshape(range(self.nb), self.shape))\n\n if not toList:\n return array\n\n return [*array.flat]\n\n\nclass RGEsolver():\n \"\"\" This class contains the RGEs of the model, as well as pre-defined functions\n used to solve and plot them.\n\n The three following arguments may be provided:\n - initialScale:\n The energy scale at which the initial values are given\n - tmin, tmax :\n The lower and upper energy scales between which the running couplings are computed and plotted\n\n The initialScale can be different from tmin and tmax, the only requirement being that the initial value of the\n couplings are all given at the same scale.\"\"\"\n\n translation = {'GaugeCouplings': 'Gauge Couplings',\n 'Yukawas': 'Yukawa Couplings',\n 'QuarticTerms': 'Quartic Couplings',\n 'TrilinearTerms' : 'Trilinear Couplings',\n 'ScalarMasses': 'Scalar Mass Couplings',\n 'FermionMasses': 'Fermion Mass Couplings',\n 'Vevs': 'Vacuum-expectation Values'}\n\n def __init__(self, name, initialScale = 0, tmin = 0, tmax = 20):\n if initialScale < tmin or initialScale > tmax:\n exit(f\"The initial running scale must lie in the interval [tmin={tmin}, tmax={tmax}]\")\n\n self.name = name\n Coupling.couplings = {}\n\n self.initialScale = initialScale\n self.tmin = tmin\n self.tmax = tmax\n\n self.kappa = lambda n: 1/(4*np.pi)**(2*n)\n self.kappaString = '1/(4*np.pi)**(2*n)'\n\n self.tList = []\n self.solutions = {}\n self.loops = {'GaugeCouplings' : 2, \n 'Yukawas' : 2, \n 'QuarticTerms' : 2, \n 'ScalarMasses' : 2, \n 'Vevs' : 2}\n\n # Gauge Couplings\n self.g1 = Coupling('g1', 'GaugeCouplings', latex='g_1')\n self.g2 = Coupling('g2', 'GaugeCouplings', latex='g_2')\n self.g3 = Coupling('g3', 'GaugeCouplings', latex='g_3')\n\n # Yukawa Couplings\n self.yt = Coupling('yt', 'Yukawas', latex='y_t')\n self.yb = Coupling('yb', 'Yukawas', latex='y_b')\n self.ytau = Coupling('ytau', 'Yukawas', latex='y_\\\\tau')\n\n # Quartic Couplings\n self.lambda_ = Coupling('lambda_', 'QuarticTerms', latex='\\\\lambda')\n\n # Scalar Mass Couplings\n self.mu = Coupling('mu', 'ScalarMasses', latex='\\\\mu')\n\n # Vacuum-expectation Values\n # For vevs the gauge must be fixed. Let's use for instance the Landau gauge :\n self.xiGauge = 0\n\n self.v = Coupling('v', 'Vevs')\n\n self.couplings = Coupling.couplings\n self.matrixCouplings = {c.name: np.vectorize(lambda x: x.name)(c.as_explicit())\n for cList in self.couplings.values()\n for c in cList if c.is_matrix}\n\n\n def extractCouplings(self, couplingsArray, couplingType):\n ret = []\n for c in self.couplings[couplingType]:\n if not c.is_matrix:\n ret.append(couplingsArray[c.pos])\n else:\n ret.append(np.matrix(np.reshape([couplingsArray[p] for p in range(c.pos, c.pos+c.nb)], c.shape)))\n return ret\n\n\n def fixGauge(self, xi):\n self.xiGauge = xi\n\n\n def betaFunction(self, t, couplingsArray):\n \"\"\" This function generates the numerical values of the model RGEs. It is called by the\n solver to provide the derivative of the couplings with respect to the energy scale.\"\"\"\n\n g1, g2, g3 = self.extractCouplings(couplingsArray, 'GaugeCouplings')\n yt, yb, ytau = self.extractCouplings(couplingsArray, 'Yukawas')\n lambda_, = self.extractCouplings(couplingsArray, 'QuarticTerms')\n mu, = self.extractCouplings(couplingsArray, 'ScalarMasses')\n v, = self.extractCouplings(couplingsArray, 'Vevs')\n\n bg1, bg2, bg3 = 3*[0]\n byt, byb, bytau = 3*[0]\n blambda_ = 0\n bmu = 0\n bv = 0\n\n if self.loops['GaugeCouplings'] >= 1:\n bg1 += beta_g1(1, g1,g2,g3,yt,yb,ytau)*self.kappa(1)*np.log(10)\n bg2 += beta_g2(1, g2,g1,g3,yt,yb,ytau)*self.kappa(1)*np.log(10)\n bg3 += beta_g3(1, g3,g1,g2,yt,yb)*self.kappa(1)*np.log(10)\n if self.loops['GaugeCouplings'] >= 2:\n bg1 += beta_g1(2, g1,g2,g3,yt,yb,ytau)*self.kappa(2)*np.log(10)\n bg2 += beta_g2(2, g2,g1,g3,yt,yb,ytau)*self.kappa(2)*np.log(10)\n bg3 += beta_g3(2, g3,g1,g2,yt,yb)*self.kappa(2)*np.log(10)\n\n if self.loops['Yukawas'] >= 1:\n byt += beta_yt(1, g1,g2,g3,yt,yb,ytau,lambda_)*self.kappa(1)*np.log(10)\n byb += beta_yb(1, g1,g2,g3,yt,yb,ytau,lambda_)*self.kappa(1)*np.log(10)\n bytau += beta_ytau(1, g1,g2,yt,yb,ytau,g3,lambda_)*self.kappa(1)*np.log(10)\n if self.loops['Yukawas'] >= 2:\n byt += beta_yt(2, g1,g2,g3,yt,yb,ytau,lambda_)*self.kappa(2)*np.log(10)\n byb += beta_yb(2, g1,g2,g3,yt,yb,ytau,lambda_)*self.kappa(2)*np.log(10)\n bytau += beta_ytau(2, g1,g2,yt,yb,ytau,g3,lambda_)*self.kappa(2)*np.log(10)\n\n if self.loops['QuarticTerms'] >= 1:\n blambda_ += beta_lambda_(1, g1,g2,yt,yb,ytau,lambda_,g3)*self.kappa(1)*np.log(10)\n if self.loops['QuarticTerms'] >= 2:\n blambda_ += beta_lambda_(2, g1,g2,yt,yb,ytau,lambda_,g3)*self.kappa(2)*np.log(10)\n\n if self.loops['ScalarMasses'] >= 1:\n bmu += beta_mu(1, g1,g2,yt,yb,ytau,lambda_,mu,g3)*self.kappa(1)*np.log(10)\n if self.loops['ScalarMasses'] >= 2:\n bmu += beta_mu(2, g1,g2,yt,yb,ytau,lambda_,mu,g3)*self.kappa(2)*np.log(10)\n\n if self.loops['Vevs'] >= 1:\n bv += beta_v(1, g1,g2,yt,yb,ytau,v,self.xiGauge,g3,lambda_)*self.kappa(1)*np.log(10)\n if self.loops['Vevs'] >= 2:\n bv += beta_v(2, g1,g2,yt,yb,ytau,v,self.xiGauge,g3,lambda_)*self.kappa(2)*np.log(10)\n\n return [bg1, bg2, bg3, byt, byb, bytau, blambda_, bmu, bv]\n\n\n def printInitialConditions(self, returnString=False):\n \"\"\" This function displays the current running scheme and the initial values of the couplings.\n\n Its output may be copy-pasted 'as-is' by user to modify these parameters before solving the RGEs.\"\"\"\n\n # Display the running scheme\n\n outputString = \"\\n# Running scheme :\\n\\n\"\n\n s = f\"{self.name}.loops = \"\n outputString += s + str(self.loops).replace(', ', ',\\n ' + ' '*len(s)) + '\\n'\n\n # Display the initial values of the couplings\n for cType, cList in self.couplings.items():\n outputString += f\"\\n# {self.translation[cType]}\\n\\n\"\n for c in cList:\n s = f\"{self.name}.{c.name}.initialValue = \"\n if not c.is_matrix:\n s += str(c.initialValue)\n else:\n sVal = '['\n sVal += (',\\n ' + len(s)*' ').join([ str(el).replace(' ', ', ') for el in c.initialValue])\n sVal += ']\\n'\n s += sVal\n outputString += s + '\\n'\n\n if returnString:\n return outputString\n\n print(outputString)\n\n\n ##################\n # Solve function #\n ##################\n\n def solve(self, step=.1, Npoints=None):\n \"\"\" This function performs the actual solving of the system of RGEs, using scipy.ode.\n\n Either the step of the numerical integration may be provided by the user with 'step=[value]',\n OR the number of integration points with 'Npoints=[integer value]'.\"\"\"\n\n self.allCouplings = flatten([c.as_explicit(toList=True) for cList in self.couplings.values() for c in cList])\n\n time0 = time.time()\n y0 = flatten([(c.initialValue if not c.is_matrix else [*c.initialValue.flat]) for c in self.allCouplings])\n\n tmin = self.tmin\n tmax = self.tmax\n t0 = self.initialScale\n\n if Npoints is None:\n dt = step\n else:\n dt = (tmax-tmin)/(Npoints-1)\n\n solutions = {}\n for c in self.allCouplings:\n solutions[c.name] = []\n tList = []\n\n solver = ode(self.betaFunction).set_integrator('zvode', method='bdf')\n solver.set_initial_value(y0, t0)\n\n # Solve upwards\n while solver.successful() and solver.t < tmax + dt/2:\n tList.append(solver.t)\n for i, c in enumerate(self.allCouplings):\n y = solver.y[i]\n if abs(y.imag) > 1e-10 and not c.cplx:\n c.cplx = True\n elif y.imag == 0:\n y = y.real\n\n solutions[c.name].append(y)\n\n solver.integrate(solver.t+dt)\n\n if t0 > tmin:\n # If t0 > tmin, complete the solving going downwards\n solutions2 = {}\n for c in self.allCouplings:\n solutions2[c.name] = []\n tList2 = []\n\n solver.set_initial_value(y0, t0)\n # Solve downwards\n while solver.successful() and solver.t > tmin - dt/2:\n solver.integrate(solver.t-dt)\n\n tList2.append(solver.t)\n for i, c in enumerate(self.allCouplings):\n y = solver.y[i]\n if abs(y.imag) > 1e-10 and not c.cplx:\n c.cplx = True\n elif y.imag == 0:\n y = y.real\n\n solutions2[c.name].append(y)\n\n\n # Combine the two regions\n tList = tList2[::-1] + tList\n for c in self.allCouplings:\n solutions[c.name] = solutions2[c.name][::-1] + solutions[c.name]\n\n self.tList, self.solutions = np.array(tList), {k:np.array(v) for k,v in solutions.items()}\n\n for k,v in self.matrixCouplings.items():\n self.solutions[k] = np.zeros(v.shape).tolist()\n for i, l in enumerate(self.solutions[k]):\n for j in range(len(l)):\n self.solutions[k][i][j] = self.solutions[v[i,j]].tolist()\n self.solutions[k] = np.array(self.solutions[k]).transpose([2,0,1])\n\n print(f\"System of RGEs solved in {time.time()-time0:.3f} seconds.\")\n\n\n #################\n # Plot function #\n #################\n\n subPos = {1: [111], 2: [121, 122], 3:[221, 222, 212],\n 4: [221, 222, 223, 224], 5:[231, 232, 233, 223, 224],\n 6: [231, 232, 233, 234, 235, 236],\n 7: [241, 242, 243, 244, 231, 232, 233]}\n\n def plot(self, figSize=(600, 600), subPlots=True, which={}, whichNot={}, printLoopLevel=True):\n \"\"\" Plot the running couplings.\n\n Several options may be given to this function:\n - figSize=(x,y):\n The figure dimensions in pixels.\n - subPlots=True/False :\n If True, plot all the various couplings in the same window. If False,\n produces one figure by coupling type.\n - which=... :\n The user may want to plot only one or several (types of) couplings. Usage:\n\n >>> which='GaugeCouplings'\n\n >>> which=('GaugeCouplings', 'QuarticTerms')\n\n >>> which={'GaugeCouplings': 'all', 'Yukawas': ['yt', 'yb']}\n\n >>> which={'GaugeCouplings': ['g1', 'g2], 'Yukawas': 'Yu_{33}'}\n - whichNot=... :\n Which types of coupling types are NOT to be plotted. Same usage as which.\n Note that 'which' and 'whichNot' cannot be used simultaneously.\n - printLoopLevel=True/False :\n The loop-levels of the computation are displayed in the title of the plots.\n \"\"\"\n\n if self.solutions == {}:\n print(\"The system of RGEs must be solved before plotting the results.\")\n return\n\n allCouplingsByType = {cType:[] for cType in self.couplings}\n\n for c in self.allCouplings:\n if not all([el == 0 for el in self.solutions[c.name]]):\n allCouplingsByType[c.type].append(c)\n\n if which != {} and whichNot != {}:\n print(\"Error in 'plot' function: Arguments 'which' and 'whichNot' cannot be used simultaneously.\")\n return\n\n ########################################\n # Identify the couplings to be plotted #\n ########################################\n\n if type(which) == str:\n which = {which: 'all'}\n elif type(which) == tuple:\n which = {el: 'all' for el in which}\n if type(whichNot) == str:\n which = {which: 'all'}\n elif type(whichNot) == tuple:\n whichNot = {el: 'all' for el in whichNot}\n\n for cType, cList in list(allCouplingsByType.items()):\n couplingsToDelete = []\n toDelete = False\n if cList == []:\n toDelete = True\n if which != {}:\n if cType not in which:\n toDelete = True\n elif which[cType] != 'all':\n if type(which[cType]) == str:\n which[cType] = [which[cType]]\n tmpList = []\n for el in which[cType]:\n if el not in self.matrixCouplings:\n tmpList.append(el)\n else:\n tmpList += [*self.matrixCouplings[el].flat]\n couplingsToDelete = [c for c in cList if c.name not in tmpList]\n if whichNot != {}:\n if cType in whichNot:\n if whichNot[cType] == 'all':\n toDelete = True\n else:\n if type(whichNot[cType]) == str:\n whichNot[cType] = [whichNot[cType]]\n tmpList = []\n for el in whichNot[cType]:\n if el not in self.matrixCouplings:\n tmpList.append(el)\n else:\n tmpList += [*self.matrixCouplings[el].flat]\n couplingsToDelete = [c for c in cList if c.name in tmpList]\n\n if toDelete:\n del allCouplingsByType[cType]\n\n if couplingsToDelete != []:\n for c in couplingsToDelete:\n if c in allCouplingsByType[cType]:\n allCouplingsByType[cType].remove(c)\n\n\n ###################\n # Actual plotting #\n ###################\n\n if subPlots:\n plt.figure(figsize=(figSize[0]/80., figSize[0]/80.), dpi=80)\n\n for i, (cType, cList) in enumerate(allCouplingsByType.items()):\n title = self.translation[cType]\n if printLoopLevel:\n title = f\"{self.loops[cType]}-loop \" + title\n if not subPlots:\n plt.figure(figsize=(figSize[0]/80., figSize[0]/80.), dpi=80)\n plt.suptitle(title)\n else:\n plt.subplot(self.subPos[len(allCouplingsByType)][i])\n plt.title(title)\n\n cNames = []\n for c in cList:\n if not c.cplx:\n plt.plot(self.tList, self.solutions[c.name])\n cNames.append('$' + c.latex + '$')\n else:\n plt.plot(self.tList, np.real(self.solutions[c.name]))\n plt.plot(self.tList, np.imag(self.solutions[c.name]))\n cNames.append('$\\\\Re(' + c.latex + ')$')\n cNames.append('$\\\\Im(' + c.latex + ')$')\n\n plt.legend(cNames)\n plt.xlabel(r't',fontsize=17-len(allCouplingsByType))\n\n\n #########################\n # Save / load functions #\n #########################\n\n def save(self, fileName):\n try:\n import pickle\n except:\n print(\"Error: unable to load the 'pickle' module.\")\n return\n\n storeKappa = self.kappa\n self.kappa = None\n\n try:\n if '.' not in fileName:\n fileName += '.save'\n print(f\"Saving the RGE object in file '{fileName}'...\", end='')\n file = open(fileName, 'wb')\n pickle.dump(self, file)\n except BaseException as e:\n print(\"\\nAn error occurred while saving the rge object :\")\n print(e)\n return\n else:\n file.close()\n print(\" Done.\")\n\n self.kappa = storeKappa\n\n def load(fileName):\n import os\n try:\n import pickle\n except:\n print(\"Error: unable to load the 'pickle' module.\")\n return\n\n if not os.path.exists(fileName):\n print(f\"Error: The file '{fileName}' doesn't exist.\")\n return None\n\n try:\n print(f\"Loading the RGE object from file '{fileName}'...\", end='')\n file = open(fileName, 'rb')\n rge = pickle.load(file)\n except BaseException as e:\n print(\"\\nAn error occurred while loading the rge object :\")\n print(e)\n else:\n print(\" Done.\")\n finally:\n file.close()\n\n rge.kappa = eval('lambda n:' + rge.kappaString)\n return rge\n\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.log", "numpy.imag", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "numpy.real", "numpy.vectorize", "matplotlib.pyplot.suptitle", "numpy.array", "numpy.zeros", "scipy.integrate.ode", "matplotlib.pyplot.figure" ] ]
alexrockhill/autoreject
[ "20e5b346bf85eb63a1db6b5134b4f8227d3694ed" ]
[ "examples/plot_visualize_bad_epochs.py" ]
[ "\"\"\"\n===============================\nVisualize bad sensors per trial\n===============================\n\nThis example demonstrates how to use :mod:`autoreject` to\nvisualize the bad sensors in each trial\n\"\"\"\n\n# Author: Mainak Jas <mainak.jas@telecom-paristech.fr>\n# Denis A. Engemann <denis.engemann@gmail.com>\n# License: BSD (3-clause)\n\n# sphinx_gallery_thumbnail_number = 2\n\n# %%\n# First, we download the data from OpenfMRI. We will download the tarfile,\n# extract the necessary files and delete the tar from the disk\n\nimport os\nimport tarfile\n\nimport autoreject\nfrom autoreject.utils import fetch_file\n\nsubject_id = 16 # OpenfMRI format of subject numbering\n\nsrc_url = ('http://openfmri.s3.amazonaws.com/tarballs/'\n 'ds117_R0.1.1_sub016_raw.tgz')\nsubject = \"sub%03d\" % subject_id\n\nprint(\"processing subject: %s\" % subject)\nbase_path = os.path.join(\n os.path.dirname(autoreject.__file__), '..', 'examples')\ntarget = os.path.join(base_path, 'ds117_R0.1.1_sub016_raw.tgz')\nif not os.path.exists(os.path.join(base_path, 'ds117')):\n if not os.path.exists(target):\n fetch_file(src_url, target)\n tf = tarfile.open(target)\n print('Extracting files. This may take a while ...')\n tf.extractall(path=base_path, members=tf.getmembers()[-25:-9:3])\n os.remove(target)\n\n# %%\n# We will create epochs with data starting 200 ms before trigger onset\n# and continuing up to 800 ms after that. The data contains visual stimuli for\n# famous faces, unfamiliar faces, as well as scrambled faces.\n\ntmin, tmax = -0.2, 0.8\nevents_id = {'famous/first': 5, 'famous/immediate': 6, 'famous/long': 7}\n\n# %%\n# Let us now load all the epochs into memory and concatenate them\n\nimport mne # noqa\n\nepochs = list()\nfor run in range(3, 7):\n run_fname = os.path.join(base_path, 'ds117', 'sub%03d' % subject_id, 'MEG',\n 'run_%02d_raw.fif' % run)\n raw = mne.io.read_raw_fif(run_fname, preload=True)\n raw.pick_types(eeg=True, meg=False, stim=True) # less memory + computation\n raw.filter(1., 40., l_trans_bandwidth=0.5, n_jobs=1, verbose='INFO')\n\n raw.set_channel_types({'EEG061': 'eog', 'EEG062': 'eog',\n 'EEG063': 'ecg', 'EEG064': 'misc'})\n raw.rename_channels({'EEG061': 'EOG061', 'EEG062': 'EOG062',\n 'EEG063': 'ECG063', 'EEG064': 'MISC'})\n\n events = mne.find_events(raw, stim_channel='STI101',\n consecutive='increasing',\n min_duration=0.003, verbose=True)\n # Read epochs\n mne.io.set_eeg_reference(raw)\n\n epoch = mne.Epochs(raw, events, events_id, tmin, tmax, proj=True,\n baseline=None,\n preload=False, reject=None, decim=4)\n epochs.append(epoch)\n\n # Same `dev_head_t` for all runs so that we can concatenate them.\n epoch.info['dev_head_t'] = epochs[0].info['dev_head_t']\n\n\nepochs = mne.epochs.concatenate_epochs(epochs)\n# %%\n# Now, we apply autoreject\n\nfrom autoreject import AutoReject, compute_thresholds # noqa\n\nthis_epoch = epochs['famous']\nexclude = [] # XXX\npicks = mne.pick_types(epochs.info, meg=False, eeg=True, stim=False,\n eog=False, exclude=exclude)\n\n# %%\n# Note that :class:`autoreject.AutoReject` by design supports multiple\n# channels. If no picks are passed separate solutions will be computed for each\n# channel type and internally combines. This then readily supports cleaning\n# unseen epochs from the different channel types used during fit.\n# Here we only use a subset of channels to save time.\n\n# %%\n# Also note that once the parameters are learned, any data can be repaired\n# that contains channels that were used during fit. This also means that time\n# may be saved by fitting :class:`autoreject.AutoReject` on a\n# representative subsample of the data.\n\nar = AutoReject(picks=picks, random_state=42, n_jobs=1, verbose='tqdm')\n\nepochs_ar, reject_log = ar.fit_transform(this_epoch, return_log=True)\n\n# %%\n# We can visualize the cross validation curve over two variables\n\nimport numpy as np # noqa\nimport matplotlib.pyplot as plt # noqa\nimport matplotlib.patches as patches # noqa\nfrom autoreject import set_matplotlib_defaults # noqa\n\nset_matplotlib_defaults(plt, style='seaborn-white')\nloss = ar.loss_['eeg'].mean(axis=-1) # losses are stored by channel type.\n\nplt.matshow(loss.T * 1e6, cmap=plt.get_cmap('viridis'))\nplt.xticks(range(len(ar.consensus)), ['%.1f' % c for c in ar.consensus])\nplt.yticks(range(len(ar.n_interpolate)), ar.n_interpolate)\n\n# Draw rectangle at location of best parameters\nax = plt.gca()\nidx, jdx = np.unravel_index(loss.argmin(), loss.shape)\nrect = patches.Rectangle((idx - 0.5, jdx - 0.5), 1, 1, linewidth=2,\n edgecolor='r', facecolor='none')\nax.add_patch(rect)\nax.xaxis.set_ticks_position('bottom')\nplt.xlabel(r'Consensus percentage $\\kappa$')\nplt.ylabel(r'Max sensors interpolated $\\rho$')\nplt.title('Mean cross validation error (x 1e6)')\nplt.colorbar()\nplt.show()\n\n# %%\n# ... and visualize the bad epochs and sensors. Bad sensors which have been\n# interpolated are in blue. Bad sensors which are not interpolated are in red.\n# Bad trials are also in red.\n\nscalings = dict(eeg=40e-6)\nreject_log.plot_epochs(this_epoch, scalings=scalings)\n\n# %%\n# ... and the epochs after cleaning with autoreject\n\nepochs_ar.plot(scalings=scalings)\n\n# %%\n# The epochs dropped by autoreject are also stored in epochs.drop_log\n\nepochs_ar.plot_drop_log()\n\n# %%\n# Finally, the evoked before and after autoreject, for sanity check. We use\n# the ``spatial_colors`` argument from MNE as it allows us to see that\n# the eyeblinks have not yet been cleaned but the bad channels have been\n# repaired.\n\nylim = dict(eeg=(-15, 15))\nepochs.average().plot(ylim=ylim, spatial_colors=True)\nepochs_ar.average().plot(ylim=ylim, spatial_colors=True)\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.title", "matplotlib.patches.Rectangle", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
anantgupta129/Fine-DE-TR-Object-Detection-Panoptic-Segmentation-with-Transformers
[ "6720f8e7e2530a67fd7ae31d2fa9aef7c914a7bb" ]
[ "autolabellingstuff.py" ]
[ "# !pip install git+https://github.com/cocodataset/panopticapi.git\n# !pip install imantics\n# !python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'\n\nimport io\nimport itertools\nimport json\nimport os\nimport warnings\n\nimport cv2\nimport numpy as np\nimport torch\nimport torchvision.transforms as T\nfrom imantics import Mask\nfrom panopticapi.utils import rgb2id\nfrom PIL import Image\nfrom pycocotools import mask as coco_mask\nfrom seaborn import color_palette\n\n# from detectron2.detectron2.config import get_cfg\n# from detectron2.detectron2.utils.visualizer import Visualizer\n# from detectron2.detectron2.data import MetadataCatalog\n# from IPython.display import set_matplotlib_formats\n# %config InlineBackend.figure_format = 'retina'\n# set_matplotlib_formats('retina')\ntorch.set_grad_enabled(False)\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\npalette = itertools.cycle(color_palette())\n\n# standard PyTorch mean-std input image normalization\ntransform = T.Compose([\n T.Resize(600),\n T.ToTensor(),\n T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(\"Selected Device: \", device)\n\ntorch.hub._validate_not_a_forked_repo=lambda a,b,c: True\nmodel, postprocessor = torch.hub.load(\n 'facebookresearch/detr', 'detr_resnet101_panoptic', pretrained=True, return_postprocessor=True, num_classes=250\n)\nmodel.to(device)\nmodel.eval()\n\n\ndef convert_coco_poly_to_mask(segmentations, height, width):\n # this function is from facebookresearch_detr_master/datasets/coco.py\n masks = []\n for polygons in segmentations:\n rles = coco_mask.frPyObjects(polygons, height, width)\n mask = coco_mask.decode(rles)\n if len(mask.shape) < 3:\n mask = mask[..., None]\n mask = torch.as_tensor(mask, dtype=torch.uint8)\n mask = mask.any(dim=2)\n masks.append(mask)\n if masks:\n masks = torch.stack(masks, dim=0)\n else:\n masks = torch.zeros((0, height, width), dtype=torch.uint8)\n return masks\n\n\ndef detect_masks(im):\n im_w, im_h = im.size\n img = transform(im).unsqueeze(0)\n out = model(img.to(device))\n\n # scores = out[\"pred_logits\"].softmax(-1)[..., :-1].max(-1)[0]\n # threshold the confidence\n # keep = scores > 0.85\n result = postprocessor(out, torch.as_tensor(img.shape[-2:]).unsqueeze(0))[0]\n \n # The segmentation is stored in a special-format png\n panoptic_seg = Image.open(io.BytesIO(result['png_string']))\n panoptic_seg = np.array(panoptic_seg, dtype=np.uint8).copy()\n # We retrieve the ids corresponding to each mask\n panoptic_seg_id = rgb2id(panoptic_seg)\n # Finally we color each mask individually\n masks = []\n for id in range(panoptic_seg_id.max() + 1):\n panoptic_seg = np.zeros(list(panoptic_seg_id.shape) + [3])\n panoptic_seg[panoptic_seg_id == id] = np.asarray(next(palette)) * 255\n panoptic_seg = np.array(panoptic_seg, dtype=np.uint8)\n panoptic_seg = cv2.cvtColor(panoptic_seg, cv2.COLOR_BGR2GRAY)\n panoptic_seg = cv2.resize(panoptic_seg, (im_w,im_h))\n masks.append(panoptic_seg)\n\n return masks, result\n\n\ndef remove_overlap(mask, lab_mask):\n row, col = mask.shape\n for i in range(row):\n for j in range(col):\n if mask[i,j]!=0 and lab_mask[i,j]!=0:\n mask[i,j]=0\n return mask\n\n\ndef get_bbox_segmask(mask,h,w):\n polygons = Mask(mask).polygons()\n segment = []\n for i in polygons.segmentation:\n if len(i)>20:\n segment.append(i)\n if not segment:\n return [], []\n\n mask = convert_coco_poly_to_mask([segment], h, w).squeeze()\n\n bboxes = []\n contours,_ = cv2.findContours(mask.numpy(), 1, 2)\n for cnt in contours:\n # M = cv2.moments(cnt)\n x,y,w,h = cv2.boundingRect(cnt)\n bboxes.append((x,y,w,h))\n \n # combining all the bboxes to form a big bbox\n if len(bboxes)>0:\n x_min, y_min, w_max, h_max = np.inf, np.inf, 0, 0\n for x,y,w,h in bboxes:\n if x_min>x:\n x_min = x\n if y_min>y:\n y_min = y\n if w_max<x+w:\n w_max = x+w\n if h_max<y+h:\n h_max = y+h\n \n bbox = [x_min, y_min, w_max-x_min, h_max-y_min]\n else:\n bbox = bboxes[0]\n return bbox, segment\n\n\nif __name__=='__main__':\n\n folders = os.listdir(\"data\")\n folders.remove(\"coco_val2017\")\n im_id = 0\n anno_id = 0\n category_id = 0\n \n for category in folders:\n if os.path.isfile(f\"data/{category}/updated_coco.json\"):\n print(f\"Skipping {category} as file already updated\")\n category_id += 1\n continue\n print(f\"Reading From {category}\")\n data = json.load(open(f\"data/{category}/coco.json\")) \n k = 0\n images_info = data['images']\n\n res_file = {\n \"info\":{\n \"description\": \"Construction Material Panoptic Segmentation & Object Detection Data\", \n \"url\": \"\", \n \"version\": \"1.0\", \n \"year\": 2021, \n \"contributor\": \"https://theschoolof.ai/\", \n \"date_created\": \"AUG 2021\"\n },\n \"licenses\": [{\"name\": \"\", \"id\": 0, \"url\": \"\"}],\n \"images\": [],\n \"annotations\": [],\n \"categories\": [\n {\n \"supercategory\": \"construction material\", \n \"isthing\": 1, \n \"id\": category_id, \n \"name\": category\n }\n ]\n }\n for i in range(len(images_info)):\n image = images_info[i]\n im_name = image['file_name']\n image_id = image['id']\n h,w = image['height'], image['width']\n\n im_path = os.path.join(f\"data/{category}/images\", im_name)\n if not os.path.isfile(im_path):\n continue \n \n im = Image.open(im_path).convert('RGB')\n \n if im.size!=(w,h):\n continue \n if k==len(data['annotations']): # this cond'n is true then are more images then annotions\n break\n\n masks, result = detect_masks(im)\n print(f\"Image {im_id}\", end='\\r')\n lab_mask_corr = []\n flag = False\n while True:\n if k<len(data['annotations']) and data['annotations'][k]['image_id']==image_id:\n flag = True\n lab_mask_corr.extend(data['annotations'][k]['segmentation'])\n res_file[\"annotations\"].append(\n {\n \"id\": anno_id,\n \"image_id\": im_id,\n \"category_id\": category_id,\n \"segmentation\": data['annotations'][k]['segmentation'],\n \"area\": data['annotations'][k]['area'],\n \"bbox\": data['annotations'][k]['bbox'],\n \"iscrowd\": 0,\n \"attributes\": data['annotations'][k]['attributes']\n }\n )\n k += 1\n anno_id += 1\n else:\n break\n # checking lab_mask_corr, for some images no anotations\n if flag:\n res_file[\"images\"].append(\n {\n \"id\":im_id,\n \"file_name\": im_path,\n \"height\":h,\n \"width\":w,\n \"license\": 0\n }\n )\n if k<len(data['annotations']) and result[\"segments_info\"] and lab_mask_corr: \n\n lab_mask = convert_coco_poly_to_mask([lab_mask_corr], h,w).squeeze().numpy()\n for i in range(len(masks)):\n mask = masks[i]\n\n mask = remove_overlap(mask, lab_mask)\n bbox, cor_seg = get_bbox_segmask(mask, h,w)\n if not bbox:\n continue\n if result[\"segments_info\"][i]['isthing']:\n # here we are manuplating the our labelled data the, these category_id will be assigned when we will combine it with coco_val\n res_file[\"annotations\"].append(\n {\n \"id\": anno_id,\n \"image_id\": im_id,\n \"category_id\": \"assing_later:miscellaneous\", \n \"segmentation\": cor_seg,\n \"area\": bbox[2]*bbox[3],\n \"bbox\": bbox,\n \"iscrowd\": 0,\n \"attributes\": data['annotations'][k]['attributes']\n }\n )\n anno_id += 1\n else:\n res_file[\"annotations\"].append(\n {\n \"id\": anno_id,\n \"image_id\": im_id,\n \"category_id\": \"assing_later:{}\".format(result[\"segments_info\"][i][\"category_id\"]), \n \"segmentation\": cor_seg,\n \"area\": bbox[2]*bbox[3],\n \"bbox\": bbox,\n \"iscrowd\": 0,\n \"attributes\": data['annotations'][k]['attributes']\n }\n )\n anno_id += 1\n \n im_id += 1\n print(f\"finished {category}: {category_id}\")\n category_id += 1\n\n with open(f\"data/{category}/updated_coco.json\", \"w\") as f:\n f.write(json.dumps(res_file))\n\n\n\n" ]
[ [ "torch.zeros", "torch.set_grad_enabled", "torch.cuda.is_available", "torch.stack", "numpy.array", "torch.hub.load", "torch.as_tensor" ] ]
Super-Dainiu/DATA130007.01-Community-Detection-Link-Prediction-and-Node-Classification-on-Ego-Facebook-and-Cites
[ "1b5077342756ba6dc587a2af49abd2451319e5df" ]
[ "link-prediction/GIC/gic-dgl/train.py" ]
[ "\"Implementation is based on https://github.com/dmlc/dgl/tree/master/examples/pytorch/dgi\"\n\nimport argparse, time\nimport numpy as np\nimport networkx as nx\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom dgl import DGLGraph\nfrom dgl.data import register_data_args, load_data, Coauthor, AmazonCoBuy\nfrom dgl.transform import add_self_loop, remove_self_loop\n\nfrom gic import GIC, Classifier\nimport scipy.sparse as sp\nfrom collections import Counter\nimport random\nfrom sklearn.preprocessing import OneHotEncoder\nfrom statistics import mean,stdev\n\nfrom utils import get_train_val_test_split, sample_per_class, remove_underrepresented_classes, count_parameters, _sample_mask\n\ndef evaluate(model, features, labels, mask):\n model.eval()\n with torch.no_grad():\n logits = model(features)\n logits = logits[mask]\n labels = labels[mask]\n _, indices = torch.max(logits, dim=1)\n correct = torch.sum(indices == labels)\n return correct.item() * 1.0 / len(labels)\n \ndef main(args):\n \n torch.manual_seed(1234)\n \n if args.dataset == 'cora' or args.dataset == 'citeseer' or args.dataset == 'pubmed':\n data = load_data(args)\n features = torch.FloatTensor(data.features)\n \n \n \n labels = torch.LongTensor(data.labels)\n in_feats = features.shape[1]\n g = data.graph\n if args.dataset == 'cora':\n g.remove_edges_from(nx.selfloop_edges(g))\n g.add_edges_from(zip(g.nodes(), g.nodes()))\n g = DGLGraph(g)\n attr_matrix = data.features\n labels = data.labels\n \n else:\n if args.dataset == 'physics':\n data = Coauthor('physics')\n if args.dataset == 'cs':\n data = Coauthor('cs')\n if args.dataset == 'computers':\n data = AmazonCoBuy('computers')\n if args.dataset == 'photo':\n data = AmazonCoBuy('photo')\n \n g = data\n g = data[0]\n attr_matrix = g.ndata['feat']\n labels = g.ndata['label']\n \n features = torch.FloatTensor(g.ndata['feat'])\n \n \n ### LCC of the graph\n n_components=1\n sparse_graph = g.adjacency_matrix_scipy(return_edge_ids=False)\n _, component_indices = sp.csgraph.connected_components(sparse_graph)\n component_sizes = np.bincount(component_indices)\n components_to_keep = np.argsort(component_sizes)[::-1][:n_components] # reverse order to sort descending\n nodes_to_keep = [\n idx for (idx, component) in enumerate(component_indices) if component in components_to_keep\n ]\n\n\n adj_matrix = sparse_graph[nodes_to_keep][:, nodes_to_keep]\n num_nodes = len(nodes_to_keep)\n g = adj_matrix\n g = DGLGraph(g)\n g = remove_self_loop(g)\n g = add_self_loop(g)\n g = DGLGraph(g)\n \n \n g.ndata['feat'] = attr_matrix[nodes_to_keep]\n features = torch.FloatTensor(g.ndata['feat'].float())\n if args.dataset == 'cora' or args.dataset == 'pubmed':\n features = features / (features.norm(dim=1)+ 1e-8)[:, None]\n g.ndata['label'] = labels[nodes_to_keep]\n labels = torch.LongTensor(g.ndata['label'])\n \n \n\n in_feats = features.shape[1]\n \n unique_l = np.unique(labels, return_counts=False)\n n_classes = len(unique_l)\n n_nodes = g.number_of_nodes()\n n_edges = g.number_of_edges()\n \n \n print('Number of nodes',n_nodes,'Number of edges', n_edges)\n \n \n \n enc = OneHotEncoder()\n enc.fit(labels.reshape(-1,1))\n ylabels = enc.transform(labels.reshape(-1,1)).toarray()\n \n\n for beta in [args.beta]:\n for K in [args.num_clusters]:\n for alpha in [args.alpha]:\n accs = []\n t_st = time.time()\n \n sets = \"imbalanced\"\n \n for k in range(2): #number of differnet trainings\n #print(k)\n \n random_state = np.random.RandomState()\n if sets==\"imbalanced\":\n train_idx, val_idx, test_idx = get_train_val_test_split(random_state,\n ylabels,\n train_examples_per_class=None, val_examples_per_class=None,\n test_examples_per_class=None,\n train_size=20*n_classes, val_size=30*n_classes, test_size=None)\n elif sets==\"balanced\":\n train_idx, val_idx, test_idx = get_train_val_test_split(random_state,\n ylabels,\n train_examples_per_class=20, val_examples_per_class=30,\n test_examples_per_class=None,\n train_size=None, val_size=None, test_size=None)\n else:\n (\"No such set configuration (imbalanced/balanced)\")\n\n\n n_nodes = len(nodes_to_keep)\n train_mask = np.zeros(n_nodes)\n train_mask[train_idx] = 1\n val_mask = np.zeros(n_nodes)\n val_mask[val_idx] = 1\n test_mask = np.zeros(n_nodes)\n test_mask[test_idx] = 1\n train_mask = torch.BoolTensor(train_mask)\n val_mask = torch.BoolTensor(val_mask)\n test_mask = torch.BoolTensor(test_mask)\n \n \"\"\"\n Planetoid Split for CORA, CiteSeer, PubMed\n train_mask = torch.BoolTensor(data.train_mask)\n val_mask = torch.BoolTensor(data.val_mask)\n test_mask = torch.BoolTensor(data.test_mask)\n train_mask2 = torch.BoolTensor(data.train_mask)\n val_mask2 = torch.BoolTensor(data.val_mask)\n test_mask2 = torch.BoolTensor(data.test_mask)\n \"\"\"\n \n if args.gpu < 0:\n cuda = False\n\n else:\n cuda = True\n torch.cuda.set_device(args.gpu)\n features = features.cuda()\n labels = labels.cuda()\n train_mask = train_mask.cuda()\n val_mask = val_mask.cuda()\n test_mask = test_mask.cuda()\n \n \n gic = GIC(g,\n in_feats,\n args.n_hidden,\n args.n_layers,\n nn.PReLU(args.n_hidden),\n args.dropout,\n K,\n beta, \n alpha\n )\n\n if cuda:\n gic.cuda()\n\n gic_optimizer = torch.optim.Adam(gic.parameters(),\n lr=args.gic_lr,\n weight_decay=args.weight_decay)\n\n # train GIC\n cnt_wait = 0\n best = 1e9\n best_t = 0\n dur = []\n\n \n\n for epoch in range(args.n_gic_epochs):\n gic.train()\n if epoch >= 3:\n t0 = time.time()\n\n gic_optimizer.zero_grad()\n loss = gic(features)\n #print(loss)\n loss.backward()\n gic_optimizer.step()\n\n if loss < best:\n best = loss\n best_t = epoch\n cnt_wait = 0\n torch.save(gic.state_dict(), 'best_gic.pkl')\n else:\n cnt_wait += 1\n\n if cnt_wait == args.patience:\n #print('Early stopping!')\n break\n\n if epoch >= 3:\n dur.append(time.time() - t0)\n\n #print(\"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | \"\n #\"ETputs(KTEPS) {:.2f}\".format(epoch, np.mean(dur), loss.item(),\n #n_edges / np.mean(dur) / 1000))\n \n # train classifier\n #print('Loading {}th epoch'.format(best_t))\n gic.load_state_dict(torch.load('best_gic.pkl'))\n embeds = gic.encoder(features, corrupt=False)\n embeds = embeds / (embeds+ 1e-8).norm(dim=1)[:, None]\n embeds = embeds.detach()\n \n \n \n # create classifier model \n classifier = Classifier(args.n_hidden, n_classes)\n if cuda:\n classifier.cuda()\n\n classifier_optimizer = torch.optim.Adam(classifier.parameters(),\n lr=args.classifier_lr,\n weight_decay=args.weight_decay)\n\n\n dur = []\n best_a = 0\n cnt_wait = 0\n for epoch in range(args.n_classifier_epochs):\n classifier.train()\n if epoch >= 3:\n t0 = time.time()\n\n classifier_optimizer.zero_grad()\n preds = classifier(embeds)\n loss = F.nll_loss(preds[train_mask], labels[train_mask])\n loss.backward()\n classifier_optimizer.step()\n\n if epoch >= 3:\n dur.append(time.time() - t0)\n\n acc = evaluate(classifier, embeds, labels, val_mask) #+ evaluate(classifier, embeds, labels, train_mask)\n\n if acc > best_a and epoch > 100:\n best_a = acc\n best_t = epoch\n\n torch.save(classifier.state_dict(), 'best_class.pkl')\n\n #print(\"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | \"\n #\"ETputs(KTEPS) {:.2f}\".format(epoch, np.mean(dur), loss.item(),\n #acc, n_edges / np.mean(dur) / 1000))\n\n \n acc = evaluate(classifier, embeds, labels, test_mask)\n accs.append(acc)\n\n \n\n \n \n print('=================== ',' alpha', alpha, ' beta ', beta, 'K', K)\n print(args.dataset, ' Acc (mean)', mean(accs),' (std)',stdev(accs))\n print('=================== time', int((time.time() - t_st)/60))\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='GIC')\n register_data_args(parser)\n parser.add_argument(\"--dropout\", type=float, default=0,\n help=\"dropout probability\")\n parser.add_argument(\"--gpu\", type=int, default=0,\n help=\"gpu\")\n parser.add_argument(\"--gic-lr\", type=float, default=1e-3,\n help=\"dgi learning rate\")\n parser.add_argument(\"--classifier-lr\", type=float, default=1e-2,\n help=\"classifier learning rate\")\n parser.add_argument(\"--n-gic-epochs\", type=int, default=2000,\n help=\"number of training epochs\")\n parser.add_argument(\"--n-classifier-epochs\", type=int, default=1000,\n help=\"number of training epochs\")\n parser.add_argument(\"--n-hidden\", type=int, default=64,\n help=\"number of hidden gcn units\")\n parser.add_argument(\"--n-layers\", type=int, default=1,\n help=\"number of hidden gcn layers\")\n parser.add_argument(\"--weight-decay\", type=float, default=0.,\n help=\"Weight for L2 loss\")\n parser.add_argument(\"--patience\", type=int, default=50,\n help=\"early stop patience condition\")\n parser.add_argument(\"--self-loop\", action='store_true',\n help=\"graph self-loop (default=False)\")\n parser.add_argument('--b', dest='beta', type=int, default=100, help='')\n parser.add_argument('--c', dest='num_clusters', type=int, default=128, help='')\n parser.add_argument('--a', dest='alpha', type=float, default=0.5, help='')\n \n parser.set_defaults(self_loop=False)\n args = parser.parse_args()\n #print(args)\n \n main(args)\n" ]
[ [ "torch.BoolTensor", "torch.LongTensor", "torch.max", "torch.cuda.set_device", "numpy.unique", "torch.load", "torch.manual_seed", "torch.nn.PReLU", "sklearn.preprocessing.OneHotEncoder", "torch.sum", "torch.nn.functional.nll_loss", "torch.no_grad", "numpy.bincount", "torch.FloatTensor", "numpy.argsort", "numpy.random.RandomState", "numpy.zeros", "scipy.sparse.csgraph.connected_components" ] ]
anonymouscjc/Computational-Cost-Aware-Control-Using-Hierarchical-Reinforcement-Learning
[ "e554bbc267328b1d3c6d815189a62b81b1ba0edf" ]
[ "stable_baselines/deepq/dqn.py" ]
[ "from functools import partial\n\nimport tensorflow as tf\nimport numpy as np\nimport gym\nimport os\n\nfrom stable_baselines import logger\nfrom stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\nfrom stable_baselines.common.vec_env import VecEnv\nfrom stable_baselines.common.schedules import LinearSchedule\nfrom stable_baselines.deepq.build_graph import build_train\nfrom stable_baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\nfrom stable_baselines.deepq.policies import DQNPolicy\nfrom stable_baselines.a2c.utils import total_episode_reward_logger\n\nimport functools\nfrom stable_baselines.her.utils import HERGoalEnvWrapper\nfrom stable_baselines.her.replay_buffer import HindsightExperienceReplayWrapper, KEY_TO_GOAL_STRATEGY\nfrom stable_baselines.common.math_util import unscale_action, scale_action\nfrom stable_baselines.ppo2.ppo2 import safe_mean, get_schedule_fn\nimport statistics\n\nclass DQN(OffPolicyRLModel):\n \"\"\"\n The DQN model class.\n DQN paper: https://arxiv.org/abs/1312.5602\n Dueling DQN: https://arxiv.org/abs/1511.06581\n Double-Q Learning: https://arxiv.org/abs/1509.06461\n Prioritized Experience Replay: https://arxiv.org/abs/1511.05952\n\n :param policy: (DQNPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)\n :param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)\n :param gamma: (float) discount factor\n :param learning_rate: (float) learning rate for adam optimizer\n :param buffer_size: (int) size of the replay buffer\n :param exploration_fraction: (float) fraction of entire training period over which the exploration rate is\n annealed\n :param exploration_final_eps: (float) final value of random action probability\n :param exploration_initial_eps: (float) initial value of random action probability\n :param train_freq: (int) update the model every `train_freq` steps. set to None to disable printing\n :param batch_size: (int) size of a batched sampled from replay buffer for training\n :param double_q: (bool) Whether to enable Double-Q learning or not.\n :param learning_starts: (int) how many steps of the model to collect transitions for before learning starts\n :param target_network_update_freq: (int) update the target network every `target_network_update_freq` steps.\n :param prioritized_replay: (bool) if True prioritized replay buffer will be used.\n :param prioritized_replay_alpha: (float)alpha parameter for prioritized replay buffer.\n It determines how much prioritization is used, with alpha=0 corresponding to the uniform case.\n :param prioritized_replay_beta0: (float) initial value of beta for prioritized replay buffer\n :param prioritized_replay_beta_iters: (int) number of iterations over which beta will be annealed from initial\n value to 1.0. If set to None equals to max_timesteps.\n :param prioritized_replay_eps: (float) epsilon to add to the TD errors when updating priorities.\n :param param_noise: (bool) Whether or not to apply noise to the parameters of the policy.\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :param tensorboard_log: (str) the log location for tensorboard (if None, no logging)\n :param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance\n :param full_tensorboard_log: (bool) enable additional logging when using tensorboard\n WARNING: this logging can take a lot of space quickly\n :param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).\n If None (default), use random seed. Note that if you want completely deterministic\n results, you must set `n_cpu_tf_sess` to 1.\n :param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations\n If None, the number of cpu of the current machine will be used.\n \"\"\"\n def _create_replay_wrapper(self, env):\n \"\"\"\n Wrap the environment in a HERGoalEnvWrapper\n if needed and create the replay buffer wrapper.\n \"\"\"\n if not isinstance(env, HERGoalEnvWrapper):\n env = HERGoalEnvWrapper(env)\n\n self.env = env\n self.n_sampled_goal = 4\n self.goal_selection_strategy = 'future'\n # NOTE: we cannot do that check directly with VecEnv\n # maybe we can try calling `compute_reward()` ?\n # assert isinstance(self.env, gym.GoalEnv), \"HER only supports gym.GoalEnv\"\n\n self.replay_wrapper = functools.partial(HindsightExperienceReplayWrapper,\n n_sampled_goal=self.n_sampled_goal,\n goal_selection_strategy=self.goal_selection_strategy,\n wrapped_env=self.env)\n\n def __init__(self, policy, env, gamma=0.99, learning_rate=5e-4, buffer_size=50000, exploration_fraction=0.1,\n exploration_final_eps=0.02, exploration_initial_eps=1.0, train_freq=1, batch_size=32, double_q=True,\n learning_starts=1000, target_network_update_freq=500, prioritized_replay=False,\n prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None,\n prioritized_replay_eps=1e-6, param_noise=False,\n n_cpu_tf_sess=None, verbose=0, tensorboard_log=None,\n _init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False, seed=None, n_actions=2):\n\n # TODO: replay_buffer refactoring\n super(DQN, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose, policy_base=DQNPolicy,\n requires_vec_env=False, policy_kwargs=policy_kwargs, seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)\n\n # for HER algorithm\n if self.env is not None and 'Fetch' in self.env.__str__():\n self._create_replay_wrapper(self.env)\n self.observation_space = self.env.observation_space\n\n self.param_noise = param_noise\n self.learning_starts = learning_starts\n self.train_freq = train_freq\n self.prioritized_replay = prioritized_replay\n self.prioritized_replay_eps = prioritized_replay_eps\n self.batch_size = batch_size\n self.target_network_update_freq = target_network_update_freq\n self.prioritized_replay_alpha = prioritized_replay_alpha\n self.prioritized_replay_beta0 = prioritized_replay_beta0\n self.prioritized_replay_beta_iters = prioritized_replay_beta_iters\n self.exploration_final_eps = exploration_final_eps\n self.exploration_initial_eps = exploration_initial_eps\n self.exploration_fraction = exploration_fraction\n self.buffer_size = buffer_size\n self.learning_rate = learning_rate\n self.gamma = gamma\n self.tensorboard_log = tensorboard_log\n self.full_tensorboard_log = full_tensorboard_log\n self.double_q = double_q\n\n self.graph = None\n self.sess = None\n self._train_step = None\n self.step_model = None\n self.update_target = None\n self.act = None\n self.proba_step = None\n self.replay_buffer = None\n self.beta_schedule = None\n self.exploration = None\n self.params = None\n self.summary = None\n\n self.n_actions = n_actions\n self.macro_len = 5\n self.macro_count = 0 # when macro_count % macro_len == 0, resample macro action\n\n if _init_setup_model:\n self.setup_model()\n\n def _get_pretrain_placeholders(self):\n policy = self.step_model\n return policy.obs_ph, tf.placeholder(tf.int32, [None]), policy.q_values\n\n def setup_model(self):\n\n with SetVerbosity(self.verbose):\n # decision net: produce categorical distribution\n self.action_space = gym.spaces.discrete.Discrete(self.n_actions)\n\n assert not isinstance(self.action_space, gym.spaces.Box), \\\n \"Error: DQN cannot output a gym.spaces.Box action space.\"\n\n # If the policy is wrap in functool.partial (e.g. to disable dueling)\n # unwrap it to check the class type\n if isinstance(self.policy, partial):\n test_policy = self.policy.func\n else:\n test_policy = self.policy\n assert issubclass(test_policy, DQNPolicy), \"Error: the input policy for the DQN model must be \" \\\n \"an instance of DQNPolicy.\"\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.set_random_seed(self.seed)\n self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n\n self.act, self._train_step, self.update_target, self.step_model = build_train(\n q_func=partial(self.policy, **self.policy_kwargs),\n ob_space=self.observation_space,\n ac_space=self.action_space,\n optimizer=optimizer,\n gamma=self.gamma,\n grad_norm_clipping=10,\n param_noise=self.param_noise,\n sess=self.sess,\n full_tensorboard_log=self.full_tensorboard_log,\n double_q=self.double_q\n )\n self.proba_step = self.step_model.proba_step\n self.params = tf_util.get_trainable_vars(\"deepq\")\n\n # Initialize the parameters and copy them to the target network.\n tf_util.initialize(self.sess)\n self.update_target(sess=self.sess)\n\n self.summary = tf.summary.merge_all()\n\n def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name=\"DQN\",\n reset_num_timesteps=True, replay_wrapper=None, distinct_replay_buffer=False):\n\n new_tb_log = self._init_num_timesteps(reset_num_timesteps)\n for i, m in enumerate(self.sub_models):\n m.learning_rate = get_schedule_fn(m.learning_rate)\n if len(self.replay_wrappers) != 0:\n m.replay_buffer = self.replay_wrappers[i](m.replay_buffer)\n m._setup_learn()\n\n with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \\\n as writer:\n self._setup_learn()\n\n # Create the replay buffer\n if self.prioritized_replay:\n self.replay_buffer = PrioritizedReplayBuffer(self.buffer_size, alpha=self.prioritized_replay_alpha)\n if self.prioritized_replay_beta_iters is None:\n prioritized_replay_beta_iters = total_timesteps\n else:\n prioritized_replay_beta_iters = self.prioritized_replay_beta_iters\n self.beta_schedule = LinearSchedule(prioritized_replay_beta_iters,\n initial_p=self.prioritized_replay_beta0,\n final_p=1.0)\n else:\n self.replay_buffer = ReplayBuffer(self.buffer_size)\n self.beta_schedule = None\n\n if replay_wrapper is not None:\n assert not self.prioritized_replay, \"Prioritized replay buffer is not supported by HER\"\n self.replay_buffer = replay_wrapper(self.replay_buffer)\n\n # Create the schedule for exploration starting from 1.\n self.exploration = LinearSchedule(schedule_timesteps=int(self.exploration_fraction * total_timesteps),\n initial_p=self.exploration_initial_eps,\n final_p=self.exploration_final_eps)\n\n episode_rewards = [0.0]\n episode_successes = []\n obs = self.env.reset()\n reset = True\n macro_count = 0\n macro_len = self.macro_len\n macro_choices = []\n n_updates = 0\n\n for step in range(total_timesteps):\n if callback is not None:\n # Only stop training if return value is False, not when it is None. This is for backwards\n # compatibility with callbacks that have no return statement.\n if callback(locals(), globals()) is False:\n break\n\n # Take action and update exploration to the newest value\n kwargs = {}\n if not self.param_noise:\n update_eps = self.exploration.value(self.num_timesteps)\n update_param_noise_threshold = 0.\n else:\n update_eps = 0.\n # Compute the threshold such that the KL divergence between perturbed and non-perturbed\n # policy is comparable to eps-greedy exploration with eps = exploration.value(t).\n # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017\n # for detailed explanation.\n update_param_noise_threshold = \\\n -np.log(1. - self.exploration.value(self.num_timesteps) +\n self.exploration.value(self.num_timesteps) / float(self.env.action_space.n))\n kwargs['reset'] = reset\n kwargs['update_param_noise_threshold'] = update_param_noise_threshold\n kwargs['update_param_noise_scale'] = True\n with self.sess.as_default():\n if reset or macro_count % macro_len == 0:\n macro_action = self.act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]\n # macro_action = 1\n macro_obs = obs\n reward_in_one_macro = 0\n macro_count += 1\n macro_choices.append(macro_action)\n\n # use sub_model to decide action\n # env_action = self.sub_models[macro_action]\n current_sub = self.sub_models[macro_action]\n if self.num_timesteps < self.learning_starts or np.random.rand() < current_sub.random_exploration:\n # actions sampled from action space are from range specific to the environment\n # but algorithm operates on tanh-squashed actions therefore simple scaling is used\n unscaled_action = self.env.action_space.sample()\n action = scale_action(self.env.action_space, unscaled_action)\n else:\n action = current_sub.policy_tf.step(obs[None], deterministic=False).flatten()\n # Add noise to the action (improve exploration,\n # not needed in general)\n if current_sub.action_noise is not None:\n action = np.clip(action + current_sub.action_noise(), -1, 1)\n # inferred actions need to be transformed to environment action_space before stepping\n unscaled_action = unscale_action(self.env.action_space, action)\n assert action.shape == self.env.action_space.shape\n\n reset = False\n new_obs, rew, done, info = self.env.step(unscaled_action)\n episode_rewards[-1] += rew\n # rew -= self.args.policy_cost_coef * self.args.sub_policy_costs[macro_action]\n reward_in_one_macro += rew - self.args.policy_cost_coef * self.args.sub_policy_costs[macro_action]\n # Store transition in the replay buffer.\n if macro_count % macro_len == 0 or done:\n self.replay_buffer.add(macro_obs, macro_action, reward_in_one_macro, new_obs, float(done))\n for i, m in enumerate(self.sub_models):\n if distinct_replay_buffer:\n if i == macro_action:\n m.replay_buffer.add(obs, action, rew, new_obs, float(done))\n else:\n m.replay_buffer.add(obs, action, rew, new_obs, float(done))\n obs = new_obs\n\n if writer is not None:\n ep_rew = np.array([rew]).reshape((1, -1))\n ep_done = np.array([done]).reshape((1, -1))\n total_episode_reward_logger(self.episode_reward, ep_rew, ep_done, writer,\n self.num_timesteps)\n\n # print(\"step: %d, done: %d\" % (self.num_timesteps, done))\n if done:\n maybe_is_success = info.get('is_success')\n if maybe_is_success is not None:\n episode_successes.append(float(maybe_is_success))\n if not isinstance(self.env, VecEnv):\n obs = self.env.reset()\n episode_rewards.append(0.0)\n reset = True\n macro_action = None\n macro_count = 0\n prev_macro_choices = macro_choices\n macro_choices = []\n\n\n\n # Do not train if the warmup phase is not over\n # or if there are not enough samples in the replay buffer\n can_sample = self.replay_buffer.can_sample(self.batch_size)\n if can_sample and self.num_timesteps > self.learning_starts \\\n and self.num_timesteps % self.train_freq == 0:\n # Minimize the error in Bellman's equation on a batch sampled from replay buffer.\n # pytype:disable=bad-unpacking\n if self.prioritized_replay:\n assert self.beta_schedule is not None, \\\n \"BUG: should be LinearSchedule when self.prioritized_replay True\"\n experience = self.replay_buffer.sample(self.batch_size,\n beta=self.beta_schedule.value(self.num_timesteps))\n (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience\n else:\n obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(self.batch_size)\n weights, batch_idxes = np.ones_like(rewards), None\n # pytype:enable=bad-unpacking\n\n if writer is not None:\n # run loss backprop with summary, but once every 100 steps save the metadata\n # (memory, compute time, ...)\n if (1 + self.num_timesteps) % 100 == 0:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1,\n dones, weights, sess=self.sess, options=run_options,\n run_metadata=run_metadata)\n writer.add_run_metadata(run_metadata, 'step%d' % self.num_timesteps)\n else:\n summary, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1,\n dones, weights, sess=self.sess)\n writer.add_summary(summary, self.num_timesteps)\n else:\n _, td_errors = self._train_step(obses_t, actions, rewards, obses_tp1, obses_tp1, dones, weights,\n sess=self.sess)\n\n if self.prioritized_replay:\n new_priorities = np.abs(td_errors) + self.prioritized_replay_eps\n assert isinstance(self.replay_buffer, PrioritizedReplayBuffer)\n self.replay_buffer.update_priorities(batch_idxes, new_priorities)\n\n if can_sample and self.num_timesteps > self.learning_starts and \\\n self.num_timesteps % self.target_network_update_freq == 0:\n # Update target network periodically.\n self.update_target(sess=self.sess)\n\n\n if step % self.sub_models[0].train_freq == 0:\n mb_infos_vals = []\n for m in self.sub_models:\n # Update policy, critics and target networks\n for grad_step in range(m.gradient_steps):\n # Break if the warmup phase is not over\n # or if there are not enough samples in the replay buffer\n if not m.replay_buffer.can_sample(m.batch_size) \\\n or self.num_timesteps < m.learning_starts:\n break\n n_updates += 1\n # Compute current learning_rate\n frac = 1.0 - step / total_timesteps\n current_lr = m.learning_rate(frac)\n # Update policy and critics (q functions)\n mb_infos_vals.append(m._train_step(step, writer, current_lr))\n # Update target network\n if (step + grad_step) % m.target_update_interval == 0:\n # Update target network\n m.sess.run(m.target_update_op)\n\n if len(episode_rewards[-101:-1]) == 0:\n mean_100ep_reward = -np.inf\n else:\n mean_100ep_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)\n\n num_episodes = len(episode_rewards)\n # print(done, log_interval, len(episode_rewards), self.num_timesteps)\n if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0:\n logger.record_tabular(\"steps\", self.num_timesteps)\n prev_macro_choices = np.array(prev_macro_choices)\n macro_choices_ratio = ['%.2f' % ((prev_macro_choices[prev_macro_choices == i]).size / prev_macro_choices.size) for i in range(self.n_actions)]\n logger.record_tabular(\"macro choices\", macro_choices_ratio)\n logger.record_tabular(\"episodes\", num_episodes)\n if len(episode_successes) > 0:\n logger.logkv(\"success rate\", np.mean(episode_successes[-100:]))\n logger.record_tabular(\"mean 100 episode reward\", mean_100ep_reward)\n logger.record_tabular(\"% time spent exploring\",\n int(100 * self.exploration.value(self.num_timesteps)))\n logger.logkv(\"n_updates_of_sub\", n_updates)\n logger.dump_tabular()\n print(\"macro choices\", prev_macro_choices)\n\n self.num_timesteps += 1\n\n return self\n\n def eval(self, total_episodes, callback=None, log_interval=100, tb_log_name=\"DQN\",\n reset_num_timesteps=True, replay_wrapper=None):\n\n new_tb_log = self._init_num_timesteps(reset_num_timesteps)\n with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \\\n as writer:\n episode_rewards = [0.0]\n episode_successes = []\n obs = self.env.reset()\n reset = True\n macro_count = 0\n macro_len = self.macro_len\n macro_choices = []\n n_updates = 0\n macro_action = None\n\n # for step in range(total_timesteps):\n while True:\n with self.sess.as_default():\n if reset or macro_count % macro_len == 0:\n # macro_action = self.act(np.array(obs)[None], update_eps=0, **{})[0]\n macro_actions, _, _ = self.step_model.step(np.array(obs)[None], deterministic=False)\n macro_action = macro_actions[0]\n # macro_action = 1\n macro_obs = obs\n reward_in_one_macro = 0\n macro_count += 1\n macro_choices.append(macro_action)\n\n current_sub = self.sub_models[macro_action]\n action = current_sub.policy_tf.step(obs[None], deterministic=True).flatten()\n # Add noise to the action (improve exploration,\n # not needed in general)\n if current_sub.action_noise is not None:\n action = np.clip(action + current_sub.action_noise(), -1, 1)\n # inferred actions need to be transformed to environment action_space before stepping\n unscaled_action = unscale_action(self.env.action_space, action)\n assert action.shape == self.env.action_space.shape\n\n reset = False\n new_obs, rew, done, info = self.env.step(unscaled_action)\n episode_rewards[-1] += rew\n rew -= self.args.policy_cost_coef * self.args.sub_policy_costs[macro_action]\n reward_in_one_macro += rew\n obs = new_obs\n\n\n # print(\"step: %d, done: %d\" % (self.num_timesteps, done))\n if done:\n maybe_is_success = info.get('is_success')\n if maybe_is_success is not None:\n episode_successes.append(float(maybe_is_success))\n if not isinstance(self.env, VecEnv):\n obs = self.env.reset()\n episode_rewards.append(0.0)\n reset = True\n macro_action = None\n macro_count = 0\n print(\"=\" * 70)\n print(\"macro_choices:\", macro_choices)\n print(\"return:\", episode_rewards[-2])\n print(\"=\" * 70)\n prev_macro_choices = macro_choices\n macro_choices = []\n if len(episode_rewards) - 1 == total_episodes:\n break\n\n if len(episode_rewards[-101:-1]) == 0:\n mean_100ep_reward = -np.inf\n else:\n mean_100ep_reward = round(float(np.mean(episode_rewards[-101:-1])), 1)\n\n num_episodes = len(episode_rewards)\n # print(done, log_interval, len(episode_rewards), self.num_timesteps)\n if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0:\n logger.record_tabular(\"steps\", self.num_timesteps)\n logger.record_tabular(\"macro choices\", np.mean(prev_macro_choices))\n logger.record_tabular(\"episodes\", num_episodes)\n if len(episode_successes) > 0:\n logger.logkv(\"success rate\", np.mean(episode_successes[-100:]))\n logger.record_tabular(\"mean 100 episode reward\", mean_100ep_reward)\n logger.logkv(\"n_updates_of_sub\", n_updates)\n logger.dump_tabular()\n print(\"macro choices\", prev_macro_choices)\n\n self.num_timesteps += 1\n\n return self\n def predict(self, observation, state=None, mask=None, deterministic=True, args=None):\n observation = np.array(observation)\n vectorized_env = self._is_vectorized_observation(observation, self.observation_space)\n\n observation = observation.reshape((-1,) + self.observation_space.shape)\n with self.sess.as_default():\n if self.macro_count % self.macro_len == 0:\n macro_actions, _, _ = self.step_model.step(observation, deterministic=deterministic)\n # macro_actions = self.act(observation, update_eps=0)\n self.macro_act = macro_actions[0] # not supporting vectorized_env\n if args.eval_certain_sub != None:\n self.macro_act = args.eval_certain_sub\n self.macro_count += 1\n\n # Sample from sub_policy\n current_sub = self.sub_models[self.macro_act]\n action = current_sub.policy_tf.step(observation, deterministic=deterministic)\n action = action.reshape((-1,) + self.env.action_space.shape) # reshape to the correct action shape\n # inferred actions need to be transformed to environment action_space before stepping\n unscaled_action = unscale_action(self.env.action_space, action)\n\n unscaled_action = unscaled_action[0] # not supporting vectorized_env\n\n return self.macro_act, unscaled_action, None\n\n def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):\n observation = np.array(observation)\n vectorized_env = self._is_vectorized_observation(observation, self.observation_space)\n\n observation = observation.reshape((-1,) + self.observation_space.shape)\n actions_proba = self.proba_step(observation, state, mask)\n\n if actions is not None: # comparing the action distribution, to given actions\n actions = np.array([actions])\n assert isinstance(self.action_space, gym.spaces.Discrete)\n actions = actions.reshape((-1,))\n assert observation.shape[0] == actions.shape[0], \"Error: batch sizes differ for actions and observations.\"\n actions_proba = actions_proba[np.arange(actions.shape[0]), actions]\n # normalize action proba shape\n actions_proba = actions_proba.reshape((-1, 1))\n if logp:\n actions_proba = np.log(actions_proba)\n\n if not vectorized_env:\n if state is not None:\n raise ValueError(\"Error: The environment must be vectorized when using recurrent policies.\")\n actions_proba = actions_proba[0]\n\n return actions_proba\n\n def get_parameter_list(self):\n return self.params\n\n def save(self, save_path, cloudpickle=False):\n # params\n data = {\n \"double_q\": self.double_q,\n \"param_noise\": self.param_noise,\n \"learning_starts\": self.learning_starts,\n \"train_freq\": self.train_freq,\n \"prioritized_replay\": self.prioritized_replay,\n \"prioritized_replay_eps\": self.prioritized_replay_eps,\n \"batch_size\": self.batch_size,\n \"target_network_update_freq\": self.target_network_update_freq,\n \"prioritized_replay_alpha\": self.prioritized_replay_alpha,\n \"prioritized_replay_beta0\": self.prioritized_replay_beta0,\n \"prioritized_replay_beta_iters\": self.prioritized_replay_beta_iters,\n \"exploration_final_eps\": self.exploration_final_eps,\n \"exploration_fraction\": self.exploration_fraction,\n \"learning_rate\": self.learning_rate,\n \"gamma\": self.gamma,\n \"verbose\": self.verbose,\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"policy\": self.policy,\n \"n_envs\": self.n_envs,\n \"n_cpu_tf_sess\": self.n_cpu_tf_sess,\n \"seed\": self.seed,\n \"_vectorize_action\": self._vectorize_action,\n \"policy_kwargs\": self.policy_kwargs\n }\n\n params_to_save = self.get_parameters()\n\n self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)\n" ]
[ [ "tensorflow.Graph", "numpy.log", "numpy.ones_like", "numpy.abs", "numpy.arange", "tensorflow.RunOptions", "tensorflow.RunMetadata", "tensorflow.placeholder", "tensorflow.summary.merge_all", "numpy.random.rand", "tensorflow.train.AdamOptimizer", "numpy.mean", "numpy.array" ] ]
indra622/FBK-fairseq
[ "4357af09ef2ad1594f75a5b7bcc02d5b10cad2e5" ]
[ "fairseq/modules/dynamic_convolution.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\r\n#\r\n# This source code is licensed under the MIT license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom fairseq import utils\r\nfrom fairseq.incremental_decoding_utils import with_incremental_state\r\nfrom fairseq.modules.fairseq_dropout import FairseqDropout\r\n\r\nfrom .unfold import unfold1d\r\n\r\n\r\ndef DynamicConv(\r\n input_size,\r\n kernel_size=1,\r\n padding_l=None,\r\n num_heads=1,\r\n weight_dropout=0.0,\r\n weight_softmax=False,\r\n renorm_padding=False,\r\n bias=False,\r\n conv_bias=False,\r\n query_size=None,\r\n in_proj=False,\r\n):\r\n if torch.cuda.is_available():\r\n try:\r\n from fairseq.modules.dynamicconv_layer import DynamicconvLayer\r\n\r\n return DynamicconvLayer(\r\n input_size,\r\n kernel_size=kernel_size,\r\n padding_l=padding_l,\r\n num_heads=num_heads,\r\n weight_dropout=weight_dropout,\r\n weight_softmax=weight_softmax,\r\n renorm_padding=renorm_padding,\r\n bias=bias,\r\n conv_bias=conv_bias,\r\n query_size=query_size,\r\n )\r\n except ImportError as e:\r\n print(e)\r\n return DynamicConv1dTBC(\r\n input_size,\r\n kernel_size=kernel_size,\r\n padding_l=padding_l,\r\n num_heads=num_heads,\r\n weight_dropout=weight_dropout,\r\n weight_softmax=weight_softmax,\r\n renorm_padding=renorm_padding,\r\n bias=bias,\r\n conv_bias=conv_bias,\r\n query_size=query_size,\r\n )\r\n\r\n\r\ndef Linear(in_features, out_features, bias=True):\r\n m = nn.Linear(in_features, out_features, bias)\r\n nn.init.xavier_uniform_(m.weight)\r\n if bias:\r\n nn.init.constant_(m.bias, 0.0)\r\n return m\r\n\r\n\r\n@with_incremental_state\r\nclass DynamicConv1dTBC(nn.Module):\r\n \"\"\"Dynamic lightweight convolution taking T x B x C inputs\r\n Args:\r\n input_size: # of channels of the input\r\n kernel_size: convolution channels\r\n padding_l: padding to the left when using \"same\" padding\r\n num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size)\r\n weight_dropout: the drop rate of the DropConnect to drop the weight\r\n weight_softmax: normalize the weight with softmax before the convolution\r\n renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1)\r\n bias: use bias\r\n conv_bias: bias of the convolution\r\n query_size: specified when feeding a different input as the query\r\n in_proj: project the input and generate the filter together\r\n\r\n Shape:\r\n Input: TxBxC, i.e. (timesteps, batch_size, input_size)\r\n Output: TxBxC, i.e. (timesteps, batch_size, input_size)\r\n\r\n Attributes:\r\n weight: the learnable weights of the module of shape\r\n `(num_heads, 1, kernel_size)`\r\n bias: the learnable bias of the module of shape `(input_size)`\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n input_size,\r\n kernel_size=1,\r\n padding_l=None,\r\n num_heads=1,\r\n weight_dropout=0.0,\r\n weight_softmax=False,\r\n renorm_padding=False,\r\n bias=False,\r\n conv_bias=False,\r\n query_size=None,\r\n in_proj=False,\r\n ):\r\n super().__init__()\r\n self.input_size = input_size\r\n self.query_size = input_size if query_size is None else query_size\r\n self.kernel_size = kernel_size\r\n self.padding_l = padding_l\r\n self.num_heads = num_heads\r\n self.weight_dropout_module = FairseqDropout(\r\n weight_dropout, module_name=self.__class__.__name__\r\n )\r\n self.weight_softmax = weight_softmax\r\n self.renorm_padding = renorm_padding\r\n\r\n if in_proj:\r\n self.weight_linear = Linear(\r\n self.input_size, self.input_size + num_heads * kernel_size * 1\r\n )\r\n else:\r\n self.weight_linear = Linear(\r\n self.query_size, num_heads * kernel_size * 1, bias=bias\r\n )\r\n if conv_bias:\r\n self.conv_bias = nn.Parameter(torch.Tensor(input_size))\r\n else:\r\n self.conv_bias = None\r\n self.reset_parameters()\r\n\r\n @property\r\n def in_proj(self):\r\n return (\r\n self.weight_linear.out_features\r\n == self.input_size + self.num_heads * self.kernel_size\r\n )\r\n\r\n def reset_parameters(self):\r\n self.weight_linear.reset_parameters()\r\n if self.conv_bias is not None:\r\n nn.init.constant_(self.conv_bias, 0.0)\r\n\r\n def forward(self, x, incremental_state=None, query=None, unfold=None):\r\n \"\"\"Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C\r\n args:\r\n x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)\r\n incremental_state: A dict to keep the state\r\n unfold: unfold the input or not. If not, we use the matrix trick instead\r\n query: use the specified query to predict the conv filters\r\n \"\"\"\r\n unfold = (\r\n x.size(0) > 512 if unfold is None else unfold\r\n ) # use unfold mode as default for long sequence to save memory\r\n unfold = unfold or (incremental_state is not None)\r\n assert query is None or not self.in_proj\r\n\r\n if query is None:\r\n query = x\r\n if unfold:\r\n output = self._forward_unfolded(x, incremental_state, query)\r\n else:\r\n output = self._forward_expanded(x, incremental_state, query)\r\n\r\n if self.conv_bias is not None:\r\n output = output + self.conv_bias.view(1, 1, -1)\r\n return output\r\n\r\n def _forward_unfolded(self, x, incremental_state, query):\r\n \"\"\"The conventional implementation of convolutions.\r\n Unfolding the input by having a window shifting to the right.\"\"\"\r\n T, B, C = x.size()\r\n K, H = self.kernel_size, self.num_heads\r\n R = C // H\r\n assert R * H == C == self.input_size\r\n\r\n if self.in_proj:\r\n proj = self.weight_linear(x)\r\n x = proj.narrow(2, 0, self.input_size).contiguous()\r\n weight = (\r\n proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1)\r\n )\r\n else:\r\n weight = self.weight_linear(query).view(T * B * H, -1)\r\n\r\n # renorm_padding is only implemented in _forward_expanded\r\n assert not self.renorm_padding or incremental_state is not None\r\n\r\n if incremental_state is not None:\r\n input_buffer = self._get_input_buffer(incremental_state)\r\n if input_buffer is None:\r\n input_buffer = x.new()\r\n x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)\r\n if self.kernel_size > 1:\r\n self._set_input_buffer(\r\n incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :]\r\n )\r\n x_unfold = x_unfold.view(T * B * H, R, -1)\r\n else:\r\n padding_l = self.padding_l\r\n if K > T and padding_l == K - 1:\r\n weight = weight.narrow(1, K - T, T)\r\n K, padding_l = T, T - 1\r\n # unfold the input: T x B x C --> T' x B x C x K\r\n x_unfold = unfold1d(x, K, padding_l, 0)\r\n x_unfold = x_unfold.view(T * B * H, R, K)\r\n\r\n if self.weight_softmax and not self.renorm_padding:\r\n weight = F.softmax(weight, dim=1)\r\n weight = weight.narrow(1, 0, K)\r\n\r\n if incremental_state is not None:\r\n weight = weight[:, -x_unfold.size(2) :]\r\n K = weight.size(1)\r\n\r\n if self.weight_softmax and self.renorm_padding:\r\n weight = F.softmax(weight, dim=1)\r\n\r\n weight = self.weight_dropout_module(weight, inplace=False)\r\n\r\n output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1\r\n output = output.view(T, B, C)\r\n return output\r\n\r\n def _forward_expanded(self, x, incremental_stat, query):\r\n \"\"\"Turn the convolution filters into band matrices and do matrix multiplication.\r\n This is faster when the sequence is short, but less memory efficient.\r\n This is not used in the decoder during inference.\r\n \"\"\"\r\n T, B, C = x.size()\r\n K, H = self.kernel_size, self.num_heads\r\n R = C // H\r\n assert R * H == C == self.input_size\r\n if self.in_proj:\r\n proj = self.weight_linear(x)\r\n x = proj.narrow(2, 0, self.input_size).contiguous()\r\n weight = (\r\n proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1)\r\n )\r\n else:\r\n weight = self.weight_linear(query).view(T * B * H, -1)\r\n\r\n if not self.renorm_padding:\r\n if self.weight_softmax:\r\n weight = F.softmax(weight, dim=1)\r\n weight = self.weight_dropout_module(weight, inplace=False)\r\n weight = weight.narrow(1, 0, K).contiguous()\r\n weight = weight.view(T, B * H, K).transpose(0, 1)\r\n\r\n x = x.view(T, B * H, R).transpose(0, 1)\r\n if self.weight_softmax and self.renorm_padding:\r\n # turn the convolution filters into band matrices\r\n weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float(\"-inf\"))\r\n weight_expanded.as_strided(\r\n (B * H, T, K), (T * (T + K - 1), T + K, 1)\r\n ).copy_(weight)\r\n weight_expanded = weight_expanded.narrow(2, self.padding_l, T)\r\n # normalize the weight over valid positions like self-attention\r\n weight_expanded = F.softmax(weight_expanded, dim=2)\r\n weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False)\r\n else:\r\n P = self.padding_l\r\n # For efficieny, we cut the kernel size and reduce the padding when the kernel is larger than the length\r\n if K > T and P == K - 1:\r\n weight = weight.narrow(2, K - T, T)\r\n K, P = T, T - 1\r\n # turn the convolution filters into band matrices\r\n weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False)\r\n weight_expanded.as_strided(\r\n (B * H, T, K), (T * (T + K - 1), T + K, 1)\r\n ).copy_(weight)\r\n weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T\r\n output = torch.bmm(weight_expanded, x)\r\n output = output.transpose(0, 1).contiguous().view(T, B, C)\r\n return output\r\n\r\n def reorder_incremental_state(self, incremental_state, new_order):\r\n input_buffer = self._get_input_buffer(incremental_state)\r\n if input_buffer is not None:\r\n input_buffer = input_buffer.index_select(1, new_order)\r\n self._set_input_buffer(incremental_state, input_buffer)\r\n\r\n def _get_input_buffer(self, incremental_state):\r\n return utils.get_incremental_state(self, incremental_state, \"input_buffer\")\r\n\r\n def _set_input_buffer(self, incremental_state, new_buffer):\r\n return utils.set_incremental_state(\r\n self, incremental_state, \"input_buffer\", new_buffer\r\n )\r\n\r\n def extra_repr(self):\r\n s = \"{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}\".format(\r\n self.input_size,\r\n self.kernel_size,\r\n self.padding_l,\r\n self.num_heads,\r\n self.weight_softmax,\r\n self.conv_bias is not None,\r\n self.renorm_padding,\r\n self.in_proj,\r\n )\r\n\r\n if self.query_size != self.input_size:\r\n s += \", query_size={}\".format(self.query_size)\r\n if self.weight_dropout_module.p > 0.0:\r\n s += \", weight_dropout={}\".format(self.weight_dropout_module.p)\r\n return s\r\n" ]
[ [ "torch.nn.functional.softmax", "torch.Tensor", "torch.nn.init.constant_", "torch.nn.Linear", "torch.bmm", "torch.cuda.is_available", "torch.nn.init.xavier_uniform_" ] ]
rah9eu/p3
[ "530628be7b7a8dd3e6199c3bebebdbf104005e5f" ]
[ "nnvm/tvm/topi/tests/python/test_topi_pooling.py" ]
[ "\"\"\"Test code for pooling\"\"\"\nimport numpy as np\nimport tvm\nimport topi\nimport math\nfrom topi.util import get_const_tuple\n\ndef verify_pool(n, ic, ih, kh, sh, padding, pool_type, ceil_mode):\n iw = ih\n kw = kh\n sw = sh\n ph, pw = padding\n A = tvm.placeholder((n, ic, ih, iw), name='A')\n B = topi.nn.pool(A, kernel=[kh, kw], stride=[sh, sw], padding=padding,\n pool_type=pool_type, ceil_mode=ceil_mode)\n B = topi.nn.relu(B)\n dtype = A.dtype\n\n bshape = get_const_tuple(B.shape)\n ashape = get_const_tuple(A.shape)\n if ceil_mode:\n assert bshape[2] == int(math.ceil(float(ashape[2] - kh + ph * 2) / sh) + 1)\n assert bshape[3] == int(math.ceil(float(ashape[3] - kw + pw * 2) / sw) + 1)\n else:\n assert bshape[2] == int(math.floor(float(ashape[2] - kh + ph * 2) / sh) + 1)\n assert bshape[3] == int(math.floor(float(ashape[3] - kw + pw * 2) / sw) + 1)\n\n\n a_np = np.random.uniform(size=(n, ic, ih, iw)).astype(dtype)\n pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype)\n no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw)))\n pad_np[np.ix_(*no_zero)] = a_np\n _, oc, oh, ow = get_const_tuple(B.shape)\n b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)\n\n if pool_type == 'avg':\n for i in range(oh):\n for j in range(ow):\n b_np[:,:,i,j] = np.mean(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3))\n elif pool_type =='max':\n for i in range(oh):\n for j in range(ow):\n b_np[:,:,i,j] = np.max(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3))\n b_np = np.maximum(b_np, 0.0)\n\n def check_device(device):\n if not tvm.module.enabled(device):\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n s = topi.generic.schedule_pool(B)\n ctx = tvm.context(device, 0)\n a = tvm.nd.array(a_np, ctx)\n b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx)\n f = tvm.build(s, [A, B], device)\n f(a, b)\n np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)\n\n for device in ['cuda', 'opencl', 'metal', 'rocm']:\n check_device(device)\n\ndef test_pool():\n verify_pool(1, 256, 32, 2, 2, [0, 0], 'avg', False)\n verify_pool(1, 256, 31, 3, 3, [1, 2], 'avg', False)\n verify_pool(1, 256, 32, 2, 2, [0, 0], 'max', False)\n verify_pool(1, 256, 31, 3, 3, [2, 1], 'max', False)\n verify_pool(1, 256, 31, 3, 3, [2, 1], 'max', True)\n\n\n\ndef verify_global_pool(n, c, h, w, pool_type):\n A = tvm.placeholder((n, c, h, w), name='A')\n B = topi.nn.global_pool(A, pool_type=pool_type)\n B = topi.nn.relu(B)\n\n a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)\n if pool_type == 'avg':\n b_np = np.mean(a_np, axis=(2,3), keepdims=True)\n elif pool_type =='max':\n b_np = np.max(a_np, axis=(2,3), keepdims=True)\n b_np = np.maximum(b_np, 0.0)\n\n def check_device(device):\n if not tvm.module.enabled(device):\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n s = topi.generic.schedule_global_pool(B)\n ctx = tvm.context(device, 0)\n a = tvm.nd.array(a_np, ctx)\n b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)\n f = tvm.build(s, [A, B], device)\n f(a, b)\n np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)\n\n for device in ['cuda', 'opencl', 'metal', 'rocm']:\n check_device(device)\n\ndef test_global_pool():\n verify_global_pool(1, 1024, 7, 7, 'avg')\n verify_global_pool(4, 1024, 7, 7, 'avg')\n verify_global_pool(1, 1024, 7, 7, 'max')\n verify_global_pool(4, 1024, 7, 7, 'max')\n\n\nif __name__ == \"__main__\":\n test_pool()\n test_global_pool()\n" ]
[ [ "numpy.ix_", "numpy.maximum", "numpy.max", "numpy.mean", "numpy.random.uniform", "numpy.zeros" ] ]
ankdesh/cadl
[ "f791cd0efc5a0787b058fa4f39c873dbdadb97a0" ]
[ "session-5/libs/deepdream.py" ]
[ "\"\"\"Deep Dream using the Inception v5 network.\n\nCreative Applications of Deep Learning w/ Tensorflow.\nKadenze, Inc.\nCopyright Parag K. Mital, June 2016.\n\"\"\"\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom scipy.ndimage.filters import gaussian_filter\nfrom skimage.transform import resize\nfrom scipy.misc import imsave\nfrom . import inception, vgg16, i2v\nfrom . import gif\n\n\ndef get_labels(model='inception'):\n \"\"\"Return labels corresponding to the `neuron_i` parameter of deep dream.\n\n Parameters\n ----------\n model : str, optional\n Which model to load. Must be one of: ['inception'], 'i2v_tag', 'i2v',\n 'vgg16', or 'vgg_face'.\n\n Raises\n ------\n ValueError\n Unknown model. Must be one of: ['inception'], 'i2v_tag', 'i2v',\n 'vgg16', or 'vgg_face'.\n \"\"\"\n if model == 'inception':\n net = inception.get_inception_model()\n return net['labels']\n elif model == 'i2v_tag':\n net = i2v.get_i2v_tag_model()\n return net['labels']\n elif model == 'vgg16':\n net = vgg16.get_vgg_model()\n return net['labels']\n elif model == 'vgg_face':\n net = vgg16.get_vgg_face_model()\n return net['labels']\n else:\n raise ValueError(\"Unknown model or this model does not have labels!\")\n\n\ndef get_layer_names(model='inception'):\n \"\"\"Retun every layer's index and name in the given model.\n\n Parameters\n ----------\n model : str, optional\n Which model to load. Must be one of: ['inception'], 'i2v_tag', 'i2v',\n 'vgg16', or 'vgg_face'.\n\n Returns\n -------\n names : list of tuples\n The index and layer's name for every layer in the given model.\n \"\"\"\n g = tf.Graph()\n with tf.Session(graph=g):\n if model == 'inception':\n net = inception.get_inception_model()\n elif model == 'vgg_face':\n net = vgg16.get_vgg_face_model()\n elif model == 'vgg16':\n net = vgg16.get_vgg_model()\n elif model == 'i2v':\n net = i2v.get_i2v_model()\n elif model == 'i2v-tag':\n net = i2v.get_i2v_tag_model()\n\n tf.import_graph_def(net['graph_def'], name='net')\n names = [(i, op.name) for i, op in enumerate(g.get_operations())]\n return names\n\n\ndef _setup(input_img, model, downsize):\n \"\"\"Internal use only. Load the given model's graph and preprocess an image.\n\n Parameters\n ----------\n input_img : np.ndarray\n Image to process with the model's normalizaiton process.\n model : str\n Which model to load. Must be one of: ['inception'], 'i2v_tag', 'i2v',\n 'vgg16', or 'vgg_face'.\n downsize : bool\n Optionally crop/resize the input image to the standard shape. Only\n applies to inception network which is all convolutional.\n\n Returns\n -------\n net, img, preprocess, deprocess : dict, np.ndarray, function, function\n net : The networks graph_def and labels\n img : The preprocessed input image\n preprocess: Function for preprocessing an image\n deprocess: Function for deprocessing an image\n\n Raises\n ------\n ValueError\n If model is unknown.\n \"\"\"\n if model == 'inception':\n net = inception.get_inception_model()\n img = inception.preprocess(input_img, resize=downsize, crop=downsize)[np.newaxis]\n deprocess, preprocess = inception.deprocess, inception.preprocess\n elif model == 'vgg_face':\n net = vgg16.get_vgg_face_model()\n img = vgg16.preprocess(input_img)[np.newaxis]\n deprocess, preprocess = vgg16.deprocess, vgg16.preprocess\n elif model == 'vgg16':\n net = vgg16.get_vgg_model()\n img = vgg16.preprocess(input_img)[np.newaxis]\n deprocess, preprocess = vgg16.deprocess, vgg16.preprocess\n elif model == 'i2v':\n net = i2v.get_i2v_model()\n img = i2v.preprocess(input_img)[np.newaxis]\n deprocess, preprocess = i2v.deprocess, i2v.preprocess\n elif model == 'i2v_tag':\n net = i2v.get_i2v_tag_model()\n img = i2v.preprocess(input_img)[np.newaxis]\n deprocess, preprocess = i2v.deprocess, i2v.preprocess\n else:\n raise ValueError(\n \"Unknown model name! Supported: \" +\n \"['inception', 'vgg_face', 'vgg16', 'i2v', 'i2v_tag']\")\n\n return net, img, preprocess, deprocess\n\n\ndef _apply(img,\n gradient,\n it_i,\n decay=0.998,\n sigma=1.5,\n blur_step=10,\n step=1.0,\n crop=0,\n crop_step=1,\n pth=0):\n \"\"\"Interal use only. Apply the gradient to an image with the given params.\n\n Parameters\n ----------\n img : np.ndarray\n Tensor to apply gradient ascent to.\n gradient : np.ndarray\n Gradient to ascend to.\n it_i : int\n Current iteration (used for step modulos)\n decay : float, optional\n Amount to decay.\n sigma : float, optional\n Sigma for Gaussian Kernel.\n blur_step : int, optional\n How often to blur.\n step : float, optional\n Step for gradient ascent.\n crop : int, optional\n Amount to crop from each border.\n crop_step : int, optional\n How often to crop.\n pth : int, optional\n Percentile to mask out.\n\n Returns\n -------\n img : np.ndarray\n Ascended image.\n \"\"\"\n gradient /= (np.std(gradient) + 1e-10)\n img += gradient * step\n img *= decay\n\n if pth:\n mask = (np.abs(img) < np.percentile(np.abs(img), pth))\n img = img - img * mask\n\n if blur_step and it_i % blur_step == 0:\n for ch_i in range(3):\n img[..., ch_i] = gaussian_filter(img[..., ch_i], sigma)\n\n if crop and it_i % crop_step == 0:\n height, width, *ch = img[0].shape\n\n # Crop a 1 pixel border from height and width\n img = img[:, crop:-crop, crop:-crop, :]\n\n # Resize\n img = resize(img[0], (height, width), order=3,\n clip=False, preserve_range=True\n )[np.newaxis].astype(np.float32)\n\n\ndef deep_dream(input_img,\n downsize=False,\n model='inception',\n layer_i=-1,\n neuron_i=-1,\n n_iterations=100,\n save_gif=None,\n save_images='imgs',\n device='/cpu:0',\n **kwargs):\n \"\"\"Deep Dream with the given parameters.\n\n Parameters\n ----------\n input_img : np.ndarray\n Image to apply deep dream to. Should be 3-dimenionsal H x W x C\n RGB uint8 or float32.\n downsize : bool, optional\n Whether or not to downsize the image. Only applies to\n model=='inception'.\n model : str, optional\n Which model to load. Must be one of: ['inception'], 'i2v_tag', 'i2v',\n 'vgg16', or 'vgg_face'.\n layer_i : int, optional\n Which layer to use for finding the gradient. E.g. the softmax layer\n for inception is -1, for vgg networks it is -2. Use the function\n \"get_layer_names\" to find the layer number that you need.\n neuron_i : int, optional\n Which neuron to use. -1 for the entire layer.\n n_iterations : int, optional\n Number of iterations to dream.\n save_gif : bool, optional\n Save a GIF.\n save_images : str, optional\n Folder to save images to.\n device : str, optional\n Which device to use, e.g. ['/cpu:0'] or '/gpu:0'.\n **kwargs : dict\n See \"_apply\" for additional parameters.\n\n Returns\n -------\n imgs : list of np.array\n Images of every iteration\n \"\"\"\n net, img, preprocess, deprocess = _setup(input_img, model, downsize)\n batch, height, width, *ch = img.shape\n\n g = tf.Graph()\n with tf.Session(graph=g) as sess, g.device(device):\n\n tf.import_graph_def(net['graph_def'], name='net')\n names = [op.name for op in g.get_operations()]\n input_name = names[0] + ':0'\n x = g.get_tensor_by_name(input_name)\n\n layer = g.get_tensor_by_name(names[layer_i] + ':0')\n layer_shape = sess.run(tf.shape(layer), feed_dict={x: img})\n layer_vec = np.ones(layer_shape) / layer_shape[-1]\n layer_vec[..., neuron_i] = 1.0 - (1.0 / layer_shape[-1])\n\n ascent = tf.gradients(layer, x)\n\n imgs = []\n for it_i in range(n_iterations):\n print(it_i, np.min(img), np.max(img))\n if neuron_i == -1:\n this_res = sess.run(\n ascent, feed_dict={x: img})[0]\n else:\n this_res = sess.run(\n ascent, feed_dict={x: img, layer: layer_vec})[0]\n\n _apply(img, this_res, it_i, **kwargs)\n imgs.append(deprocess(img[0]))\n\n if save_images is not None:\n imsave(os.path.join(save_images,\n 'frame{}.png'.format(it_i)), imgs[-1])\n\n if save_gif is not None:\n gif.build_gif(imgs, saveto=save_gif)\n\n return imgs\n\n\ndef guided_dream(input_img,\n guide_img=None,\n downsize=False,\n layers=[162, 183, 184, 247],\n label_i=962,\n layer_i=-1,\n feature_loss_weight=1.0,\n tv_loss_weight=1.0,\n l2_loss_weight=1.0,\n softmax_loss_weight=1.0,\n model='inception',\n neuron_i=920,\n n_iterations=100,\n save_gif=None,\n save_images='imgs',\n device='/cpu:0',\n **kwargs):\n \"\"\"Deep Dream v2. Use an optional guide image and other techniques.\n\n Parameters\n ----------\n input_img : np.ndarray\n Image to apply deep dream to. Should be 3-dimenionsal H x W x C\n RGB uint8 or float32.\n guide_img : np.ndarray, optional\n Optional image to find features at different layers for. Must pass in\n a list of layers that you want to find features for. Then the guided\n dream will try to match this images features at those layers.\n downsize : bool, optional\n Whether or not to downsize the image. Only applies to\n model=='inception'.\n layers : list, optional\n A list of layers to find features for in the \"guide_img\".\n label_i : int, optional\n Which label to use for the softmax layer. Use the \"get_labels\" function\n to find the index corresponding the object of interest. If None, not\n used.\n layer_i : int, optional\n Which layer to use for finding the gradient. E.g. the softmax layer\n for inception is -1, for vgg networks it is -2. Use the function\n \"get_layer_names\" to find the layer number that you need.\n feature_loss_weight : float, optional\n Weighting for the feature loss from the guide_img.\n tv_loss_weight : float, optional\n Total variational loss weighting. Enforces smoothness.\n l2_loss_weight : float, optional\n L2 loss weighting. Enforces smaller values and reduces saturation.\n softmax_loss_weight : float, optional\n Softmax loss weighting. Must set label_i.\n model : str, optional\n Which model to load. Must be one of: ['inception'], 'i2v_tag', 'i2v',\n 'vgg16', or 'vgg_face'.\n neuron_i : int, optional\n Which neuron to use. -1 for the entire layer.\n n_iterations : int, optional\n Number of iterations to dream.\n save_gif : bool, optional\n Save a GIF.\n save_images : str, optional\n Folder to save images to.\n device : str, optional\n Which device to use, e.g. ['/cpu:0'] or '/gpu:0'.\n **kwargs : dict\n See \"_apply\" for additional parameters.\n\n Returns\n -------\n imgs : list of np.ndarray\n Images of the dream.\n \"\"\"\n net, img, preprocess, deprocess = _setup(input_img, model, downsize)\n print(img.shape, input_img.shape)\n print(img.min(), img.max())\n\n if guide_img is not None:\n guide_img = preprocess(guide_img.copy(), model)[np.newaxis]\n assert(guide_img.shape == img.shape)\n batch, height, width, *ch = img.shape\n\n g = tf.Graph()\n with tf.Session(graph=g) as sess, g.device(device):\n tf.import_graph_def(net['graph_def'], name='net')\n names = [op.name for op in g.get_operations()]\n input_name = names[0] + ':0'\n x = g.get_tensor_by_name(input_name)\n\n features = [names[layer_i] + ':0' for layer_i in layers]\n feature_loss = tf.Variable(0.0)\n for feature_i in features:\n layer = g.get_tensor_by_name(feature_i)\n if guide_img is None:\n feature_loss += tf.reduce_mean(layer)\n else:\n # Reshape it to 2D vector\n layer = tf.reshape(layer, [-1, 1])\n # Do the same for our guide image\n guide_layer = sess.run(layer, feed_dict={x: guide_img})\n guide_layer = guide_layer.reshape(-1, 1)\n # Now calculate their dot product\n correlation = tf.matmul(guide_layer.T, layer)\n feature_loss += feature_loss_weight * tf.reduce_mean(correlation)\n softmax_loss = tf.Variable(0.0)\n if label_i is not None:\n layer = g.get_tensor_by_name(names[layer_i] + ':0')\n layer_shape = sess.run(tf.shape(layer), feed_dict={x: img})\n layer_vec = np.ones(layer_shape) / layer_shape[-1]\n layer_vec[..., neuron_i] = 1.0 - 1.0 / layer_shape[1]\n softmax_loss += softmax_loss_weight * tf.reduce_mean(tf.nn.l2_loss(layer - layer_vec))\n\n dx = tf.square(x[:, :height - 1, :width - 1, :] - x[:, :height - 1, 1:, :])\n dy = tf.square(x[:, :height - 1, :width - 1, :] - x[:, 1:, :width - 1, :])\n tv_loss = tv_loss_weight * tf.reduce_mean(tf.pow(dx + dy, 1.2))\n l2_loss = l2_loss_weight * tf.reduce_mean(tf.nn.l2_loss(x))\n\n ascent = tf.gradients(feature_loss + softmax_loss + tv_loss + l2_loss, x)[0]\n sess.run(tf.initialize_all_variables())\n imgs = []\n for it_i in range(n_iterations):\n this_res, this_feature_loss, this_softmax_loss, this_tv_loss, this_l2_loss = sess.run(\n [ascent, feature_loss, softmax_loss, tv_loss, l2_loss], feed_dict={x: img})\n print('feature:', this_feature_loss,\n 'softmax:', this_softmax_loss,\n 'tv', this_tv_loss,\n 'l2', this_l2_loss)\n\n _apply(img, -this_res, it_i, **kwargs)\n imgs.append(deprocess(img[0]))\n\n if save_images is not None:\n imsave(os.path.join(save_images,\n 'frame{}.png'.format(it_i)), imgs[-1])\n\n if save_gif is not None:\n gif.build_gif(imgs, saveto=save_gif)\n\n return imgs\n" ]
[ [ "numpy.max", "tensorflow.nn.l2_loss", "scipy.ndimage.filters.gaussian_filter", "tensorflow.Graph", "tensorflow.import_graph_def", "tensorflow.Variable", "tensorflow.gradients", "numpy.std", "tensorflow.initialize_all_variables", "tensorflow.Session", "tensorflow.square", "tensorflow.matmul", "tensorflow.shape", "numpy.min", "tensorflow.pow", "numpy.abs", "tensorflow.reduce_mean", "tensorflow.reshape", "numpy.ones" ] ]
issagaliyeva/udacity_deep_learning
[ "ffe8783a3b4f502aa47cab9196be9a4099dfcafd" ]
[ "intro-to-pytorch/fc_model.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass Network(nn.Module):\n def __init__(self, input_size, output_size, hidden_layers, drop_p=0.5):\n ''' Builds a feedforward network with arbitrary hidden layers.\n \n Arguments\n ---------\n input_size: integer, size of the input layer\n output_size: integer, size of the output layer\n hidden_layers: list of integers, the sizes of the hidden layers\n \n '''\n super().__init__()\n # Input to a hidden layer\n self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])\n\n # Add a variable number of more hidden layers\n layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])\n self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])\n\n self.output = nn.Linear(hidden_layers[-1], output_size)\n\n self.dropout = nn.Dropout(p=drop_p)\n\n def forward(self, x):\n ''' Forward pass through the network, returns the output logits '''\n\n for each in self.hidden_layers:\n x = F.relu(each(x))\n x = self.dropout(x)\n x = self.output(x)\n\n return F.log_softmax(x, dim=1)\n\n\ndef validation(model, testloader, criterion):\n accuracy = 0\n test_loss = 0\n for images, labels in testloader:\n images = images.resize_(images.size()[0], 784)\n\n output = model.forward(images)\n test_loss += criterion(output, labels).item()\n\n ## Calculating the accuracy \n # Model's output is log-softmax, take exponential to get the probabilities\n ps = torch.exp(output)\n # Class with highest probability is our predicted class, compare with true label\n equality = (labels.data == ps.max(1)[1])\n # Accuracy is number of correct predictions divided by all predictions, just take the mean\n accuracy += equality.type_as(torch.FloatTensor()).mean()\n\n return test_loss, accuracy\n\n\ndef train(model, trainloader, testloader, criterion, optimizer, epochs=5, print_every=40):\n steps = 0\n running_loss = 0\n for e in range(epochs):\n # Model in training mode, dropout is on\n model.train()\n for images, labels in trainloader:\n steps += 1\n\n # Flatten images into a 784 long vector\n images.resize_(images.size()[0], 784)\n\n optimizer.zero_grad()\n\n output = model.forward(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n # Model in inference mode, dropout is off\n model.eval()\n\n # Turn off gradients for validation, will speed up inference\n with torch.no_grad():\n test_loss, accuracy = validation(model, testloader, criterion)\n\n print(\"Epoch: {}/{}.. \".format(e + 1, epochs),\n \"Training Loss: {:.3f}.. \".format(running_loss / print_every),\n \"Test Loss: {:.3f}.. \".format(test_loss / len(testloader)),\n \"Test Accuracy: {:.3f}\".format(accuracy / len(testloader)))\n\n running_loss = 0\n\n # Make sure dropout and grads are on for training\n model.train()\n" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.log_softmax", "torch.exp", "torch.nn.Linear", "torch.FloatTensor", "torch.no_grad" ] ]
asistradition/inferelator_ng
[ "56ef2ce3b1ace35b9b2b2821a0e78746563c309a" ]
[ "inferelator_ng/tests/test_bbsr.py" ]
[ "import unittest, os\nimport pandas as pd\nimport pandas.util.testing as pdt\nimport numpy as np\nfrom inferelator_ng import kvs_controller\nfrom inferelator_ng import bbsr_python\nfrom inferelator_ng import bayes_stats\nfrom inferelator_ng import regression\n\nmy_dir = os.path.dirname(__file__)\n\nclass TestBBSRrunnerPython(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(TestBBSRrunnerPython, self).__init__(*args, **kwargs)\n # Extra behavior: only run if KVSClient can reach the host:\n self.kvs = None # dummy value on failure\n try:\n self.kvs = kvs_controller.KVSController()\n except Exception as e:\n if str(e) == 'Missing host':\n print('Test test_bbsr.py exiting since KVS host is not running')\n print('Try rerunning tests with python $LOCALREPO/kvsstcp.py --execcmd \"nosetests --nocapture -v\"')\n self.missing_kvs_host = True\n\n # Mock out Slurm process IDs so that KVS can access this process ID in bbsr_python.py\n os.environ['SLURM_PROCID'] = str(0) \n os.environ['SLURM_NTASKS'] = str(1)\n\n def get_kvs(self):\n result = self.kvs\n if result is None:\n self.fail(\"Test requires missing KVS host.\")\n return result\n\n def setUp(self):\n # Check for os.environ['SLURM_NTASKS']\n self.rank = 0\n self.brd = bbsr_python.BBSR\n \n def run_bbsr(self):\n kvs = self.get_kvs()\n return self.brd(self.X, self.Y, self.clr, self.priors, kvs=kvs).run()\n\n def set_all_zero_priors(self):\n self.priors = pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2'])\n\n def set_all_zero_clr(self):\n self.clr = pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2'])\n\n def assert_matrix_is_square(self, size, matrix):\n self.assertEqual(matrix.shape, (size, size))\n\n def test_two_genes(self):\n self.set_all_zero_priors()\n self.set_all_zero_clr()\n self.X = pd.DataFrame([0, 0], index = ['gene1', 'gene2'], columns = ['ss'])\n self.Y = pd.DataFrame([0, 0], index = ['gene1', 'gene2'], columns = ['ss'])\n\n (betas, resc) = self.run_bbsr()\n self.assert_matrix_is_square(2, betas)\n self.assert_matrix_is_square(2, resc)\n pdt.assert_frame_equal(betas, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n pdt.assert_frame_equal(resc, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n\n '''\n def test_fails_with_one_gene(self):\n self.set_all_zero_priors()\n self.set_all_zero_clr()\n self.X = pd.DataFrame([0], index = ['gene1'], columns = ['ss'])\n self.Y = pd.DataFrame([0], index = ['gene1'], columns = ['ss'])\n self.assertRaises(CalledProcessError, self.brd.run, self.X, self.Y, self.clr, self.priors)\n '''\n\n def test_two_genes_nonzero(self):\n self.set_all_zero_priors()\n self.set_all_zero_clr()\n self.X = pd.DataFrame([1, 2], index = ['gene1', 'gene2'], columns = ['ss'])\n self.Y = pd.DataFrame([1, 2], index = ['gene1', 'gene2'], columns = ['ss'])\n (betas, resc) = self.run_bbsr()\n self.assert_matrix_is_square(2, betas)\n self.assert_matrix_is_square(2, resc)\n pdt.assert_frame_equal(betas, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n pdt.assert_frame_equal(resc, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n\n # BBSR fails when there's only one column in the design (or response) matrix\n # That seems like unexpected behavior to me. If it is expected, there should be checks for it earlier -Nick DV\n '''\n @unittest.skip(\"\"\"\n There's some unexpected behavior in bayesianRegression.R: a 2 x 1 matrix is getting transformed into a NaN matrix\n ss\n gene1 NaN\n gene2 NaN\n attr(,\"scaled:center\")\n gene1 gene2\n 1 2\n attr(,\"scaled:scale\")\n gene1 gene2\n 0 0\n \"\"\")\n '''\n def test_two_genes_nonzero_clr_nonzero(self):\n self.set_all_zero_priors()\n self.X = pd.DataFrame([1, 2], index = ['gene1', 'gene2'], columns = ['ss'])\n self.Y = pd.DataFrame([1, 2], index = ['gene1', 'gene2'], columns = ['ss'])\n self.clr = pd.DataFrame([[.1, .1],[.1, .2]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2'])\n (betas, resc) = self.run_bbsr()\n self.assert_matrix_is_square(2, betas)\n self.assert_matrix_is_square(2, resc)\n pdt.assert_frame_equal(betas, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n pdt.assert_frame_equal(resc, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n\n def test_two_genes_nonzero_clr_two_conditions_negative_influence(self):\n self.set_all_zero_priors()\n self.X = pd.DataFrame([[1, 2], [2, 1]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n self.Y = pd.DataFrame([[1, 2], [2, 1]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n self.clr = pd.DataFrame([[.1, .1],[.1, .2]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2'])\n (betas, resc) = self.run_bbsr()\n self.assert_matrix_is_square(2, betas)\n self.assert_matrix_is_square(2, resc)\n pdt.assert_frame_equal(betas, pd.DataFrame([[0, -1],[-1, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n pdt.assert_frame_equal(resc, pd.DataFrame([[0, 1],[1, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n\n def test_two_genes_nonzero_clr_two_conditions_zero_gene1_negative_influence(self):\n self.set_all_zero_priors()\n self.X = pd.DataFrame([[0, 2], [2, 0]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n self.Y = pd.DataFrame([[0, 1], [1, 0]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n self.clr = pd.DataFrame([[.1, .1],[.1, .2]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2'])\n (betas, resc) = self.run_bbsr()\n self.assert_matrix_is_square(2, betas)\n self.assert_matrix_is_square(2, resc)\n pdt.assert_frame_equal(betas, pd.DataFrame([[0, -1],[-1, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n pdt.assert_frame_equal(resc, pd.DataFrame([[0, 1],[1, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n\n def test_two_genes_zero_clr_two_conditions_zero_betas(self):\n self.set_all_zero_priors()\n self.set_all_zero_clr()\n self.X = pd.DataFrame([[1, 2], [2, 1]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n self.Y = pd.DataFrame([[1, 2], [2, 1]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n (betas, resc) = self.run_bbsr()\n self.assert_matrix_is_square(2, betas)\n self.assert_matrix_is_square(2, resc)\n pdt.assert_frame_equal(betas, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n pdt.assert_frame_equal(resc, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n\n def test_two_genes_zero_clr_two_conditions_zero_gene1_zero_betas(self):\n self.set_all_zero_priors()\n self.set_all_zero_clr()\n self.X = pd.DataFrame([[0, 2], [2, 0]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n self.Y = pd.DataFrame([[0, 1], [1, 0]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n (betas, resc) = self.run_bbsr()\n self.assert_matrix_is_square(2, betas)\n self.assert_matrix_is_square(2, resc)\n pdt.assert_frame_equal(betas, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n pdt.assert_frame_equal(resc, pd.DataFrame([[0, 0],[0, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n\n def test_two_genes_nonzero_clr_two_conditions_positive_influence(self):\n self.set_all_zero_priors()\n self.X = pd.DataFrame([[1, 2], [1, 2]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n self.Y = pd.DataFrame([[1, 2], [1, 2]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n self.clr = pd.DataFrame([[.1, .1],[.1, .2]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2'])\n (betas, resc) = self.run_bbsr()\n self.assert_matrix_is_square(2, betas)\n self.assert_matrix_is_square(2, resc)\n pdt.assert_frame_equal(betas, pd.DataFrame([[0, 1],[1, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n pdt.assert_frame_equal(resc, pd.DataFrame([[0, 1],[1, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n\n def test_Best_Subset_Regression_all_zero_predictors(self):\n self.X = np.array([[0, 0], [0, 0]])\n self.Y = np.array([1, 2])\n g = np.matrix([1, 1])\n betas = bayes_stats.best_subset_regression(self.X, self.Y, g)\n self.assertTrue((betas == [ 0., 0.]).all())\n\n def test_PredictErrorReduction_all_zero_predictors(self):\n self.X = np.array([[0, 0], [0, 0]])\n self.Y = np.array([1, 2])\n betas = np.array([ 0., 0.])\n result = regression.predict_error_reduction(self.X, self.Y, betas)\n self.assertTrue((result == [ 0., 0.]).all())\n\n def test_two_genes_nonzero_clr_two_conditions_zero_gene1_positive_influence(self):\n self.set_all_zero_priors()\n self.X = pd.DataFrame([[0, 2], [0, 2]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n self.Y = pd.DataFrame([[1, 2], [1, 2]], index = ['gene1', 'gene2'], columns = ['ss1', 'ss2'])\n self.clr = pd.DataFrame([[.1, .1],[.1, .2]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2'])\n (betas, resc) = self.run_bbsr()\n self.assert_matrix_is_square(2, betas)\n self.assert_matrix_is_square(2, resc)\n pdt.assert_frame_equal(betas, pd.DataFrame([[0, 1],[1, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n pdt.assert_frame_equal(resc, pd.DataFrame([[0, 1],[1, 0]], index = ['gene1', 'gene2'], columns = ['gene1', 'gene2']).astype(float))\n" ]
[ [ "numpy.matrix", "numpy.array", "pandas.DataFrame" ] ]
Moon-sung-woo/VAE_Tacotron_korean
[ "1029665edb7edf382704b7dde97693699045ba5d" ]
[ "f0/yin.py" ]
[ "# adapted from https://github.com/patriceguyot/Yin\n\nimport numpy as np\n\n\ndef differenceFunction(x, N, tau_max):\n \"\"\"\n Compute difference function of data x. This corresponds to equation (6) in [1]\n This solution is implemented directly with Numpy fft.\n\n\n :param x: audio data\n :param N: length of data\n :param tau_max: integration window size\n :return: difference function\n :rtype: list\n \"\"\"\n\n x = np.array(x, np.float64)\n w = x.size\n tau_max = min(tau_max, w)\n x_cumsum = np.concatenate((np.array([0.]), (x * x).cumsum()))\n size = w + tau_max\n p2 = (size // 32).bit_length()\n nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32)\n size_pad = min(x * 2 ** p2 for x in nice_numbers if x * 2 ** p2 >= size)\n fc = np.fft.rfft(x, size_pad)\n conv = np.fft.irfft(fc * fc.conjugate())[:tau_max]\n return x_cumsum[w:w - tau_max:-1] + x_cumsum[w] - x_cumsum[:tau_max] - 2 * conv\n\n\ndef cumulativeMeanNormalizedDifferenceFunction(df, N):\n \"\"\"\n Compute cumulative mean normalized difference function (CMND).\n\n This corresponds to equation (8) in [1]\n\n :param df: Difference function\n :param N: length of data\n :return: cumulative mean normalized difference function\n :rtype: list\n \"\"\"\n\n cmndf = df[1:] * range(1, N) / np.cumsum(df[1:]).astype(float) #scipy method\n return np.insert(cmndf, 0, 1)\n\n\ndef getPitch(cmdf, tau_min, tau_max, harmo_th=0.1):\n \"\"\"\n Return fundamental period of a frame based on CMND function.\n\n :param cmdf: Cumulative Mean Normalized Difference function\n :param tau_min: minimum period for speech\n :param tau_max: maximum period for speech\n :param harmo_th: harmonicity threshold to determine if it is necessary to compute pitch frequency\n :return: fundamental period if there is values under threshold, 0 otherwise\n :rtype: float\n \"\"\"\n tau = tau_min\n while tau < tau_max:\n if cmdf[tau] < harmo_th:\n while tau + 1 < tau_max and cmdf[tau + 1] < cmdf[tau]:\n tau += 1\n return tau\n tau += 1\n\n return 0 # if unvoiced\n\n\ndef compute_yin(sig, sr, w_len=512, w_step=256, f0_min=100, f0_max=500,\n harmo_thresh=0.1):\n \"\"\"\n\n Compute the Yin Algorithm. Return fundamental frequency and harmonic rate.\n\n :param sig: Audio signal (list of float)\n :param sr: sampling rate (int)\n :param w_len: size of the analysis window (samples)\n :param w_step: size of the lag between two consecutives windows (samples)\n :param f0_min: Minimum fundamental frequency that can be detected (hertz)\n :param f0_max: Maximum fundamental frequency that can be detected (hertz)\n :param harmo_tresh: Threshold of detection. The yalgorithmù return the first minimum of the CMND function below this treshold.\n\n :returns:\n\n * pitches: list of fundamental frequencies,\n * harmonic_rates: list of harmonic rate values for each fundamental frequency value (= confidence value)\n * argmins: minimums of the Cumulative Mean Normalized DifferenceFunction\n * times: list of time of each estimation\n :rtype: tuple\n \"\"\"\n\n tau_min = int(sr / f0_max)\n tau_max = int(sr / f0_min)\n\n timeScale = range(0, len(sig) - w_len, w_step) # time values for each analysis window\n times = [t/float(sr) for t in timeScale]\n frames = [sig[t:t + w_len] for t in timeScale]\n\n pitches = [0.0] * len(timeScale)\n harmonic_rates = [0.0] * len(timeScale)\n argmins = [0.0] * len(timeScale)\n\n for i, frame in enumerate(frames):\n # Compute YIN\n df = differenceFunction(frame, w_len, tau_max)\n cmdf = cumulativeMeanNormalizedDifferenceFunction(df, tau_max)\n p = getPitch(cmdf, tau_min, tau_max, harmo_thresh)\n\n # Get results\n if np.argmin(cmdf) > tau_min:\n argmins[i] = float(sr / np.argmin(cmdf))\n if p != 0: # A pitch was found\n pitches[i] = float(sr / p)\n harmonic_rates[i] = cmdf[p]\n else: # No pitch, but we compute a value of the harmonic rate\n harmonic_rates[i] = min(cmdf)\n\n return pitches, harmonic_rates, argmins, times\n" ]
[ [ "numpy.fft.rfft", "numpy.cumsum", "numpy.argmin", "numpy.insert", "numpy.array" ] ]
boristown/tpu-master
[ "99e65ce1b9376e6b2c1e8ce9defd2d80cf197bb5" ]
[ "models/official/resnet/resnet_model.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains definitions for post- and pre-activation forms of ResNet and ResNet-RS models.\n\nResidual networks (ResNets) were proposed in:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Identity Mappings in Deep Residual Networks. arXiv:1603.05027\n[3] Irwan Bello, William Fedus, Xianzhi Du, Ekin D. Cubuk, Aravind Srinivas,\nTsung-Yi Lin, Jonathon Shlens, Barret Zoph\n Revisiting ResNets: Improved Training and Scaling Strategies.\n arXiv:2103.07579\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport tensorflow.compat.v1 as tf\n\nimport resnet_layers\n\nMOVING_AVERAGE_DECAY = 0.9\nEPSILON = 1e-5\n\nLAYER_BN_RELU = 'bn_relu'\nLAYER_EVONORM_B0 = 'evonorm_b0'\nLAYER_EVONORM_S0 = 'evonorm_s0'\nLAYER_EVONORMS = [\n LAYER_EVONORM_B0,\n LAYER_EVONORM_S0,\n]\n\n\ndef norm_activation(\n inputs, is_training, layer=LAYER_BN_RELU, nonlinearity=True,\n init_zero=False, data_format='channels_first',\n bn_momentum=MOVING_AVERAGE_DECAY):\n \"\"\"Normalization-activation layer.\"\"\"\n if layer == LAYER_BN_RELU:\n return batch_norm_relu(\n inputs, is_training, relu=nonlinearity,\n init_zero=init_zero, data_format=data_format,\n bn_momentum=bn_momentum)\n elif layer in LAYER_EVONORMS:\n return evonorm(\n inputs, is_training, layer=layer, nonlinearity=nonlinearity,\n init_zero=init_zero, data_format=data_format)\n else:\n raise ValueError('Unknown normalization-activation layer: {}'.format(layer))\n\n\ndef batch_norm_relu(inputs, is_training, relu=True, init_zero=False,\n data_format='channels_first',\n bn_momentum=MOVING_AVERAGE_DECAY):\n \"\"\"Performs a batch normalization followed by a ReLU.\n\n Args:\n inputs: `Tensor` of shape `[batch, channels, ...]`.\n is_training: `bool` for whether the model is training.\n relu: `bool` if False, omits the ReLU operation.\n init_zero: `bool` if True, initializes scale parameter of batch\n normalization with 0 instead of 1 (default).\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n bn_momentum: `float` momentum for batch norm layer.\n\n Returns:\n A normalized `Tensor` with the same `data_format`.\n \"\"\"\n if init_zero:\n gamma_initializer = tf.zeros_initializer()\n else:\n gamma_initializer = tf.ones_initializer()\n\n if data_format == 'channels_first':\n axis = 1\n else:\n axis = 3\n\n inputs = tf.layers.batch_normalization(\n inputs=inputs,\n axis=axis,\n momentum=bn_momentum,\n epsilon=EPSILON,\n center=True,\n scale=True,\n training=is_training,\n fused=True,\n gamma_initializer=gamma_initializer)\n\n if relu:\n inputs = tf.nn.relu(inputs)\n return inputs\n\n\ndef _instance_std(inputs,\n epsilon=EPSILON,\n data_format='channels_first'):\n \"\"\"Instance standard deviation.\"\"\"\n axes = [1, 2] if data_format == 'channels_last' else [2, 3]\n _, variance = tf.nn.moments(inputs, axes=axes, keepdims=True)\n return tf.sqrt(variance + epsilon)\n\n\ndef _batch_std(inputs,\n training,\n decay=MOVING_AVERAGE_DECAY,\n epsilon=EPSILON,\n data_format='channels_first',\n name='moving_variance'):\n \"\"\"Batch standard deviation.\"\"\"\n if data_format == 'channels_last':\n var_shape, axes = (1, 1, 1, inputs.shape[3]), [0, 1, 2]\n else:\n var_shape, axes = (1, inputs.shape[1], 1, 1), [0, 2, 3]\n moving_variance = tf.get_variable(\n name=name,\n shape=var_shape,\n initializer=tf.initializers.ones(),\n dtype=tf.float32,\n collections=[\n tf.GraphKeys.MOVING_AVERAGE_VARIABLES,\n tf.GraphKeys.GLOBAL_VARIABLES\n ],\n trainable=False)\n if training:\n _, variance = tf.nn.moments(inputs, axes, keep_dims=True)\n variance = tf.cast(variance, tf.float32)\n update_op = tf.assign_sub(\n moving_variance,\n (moving_variance - variance) * (1 - decay))\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op)\n else:\n variance = moving_variance\n std = tf.sqrt(variance + epsilon)\n return tf.cast(std, inputs.dtype)\n\n\ndef _get_shape_list(tensor):\n \"\"\"Returns tensor's shape as a list which can be unpacked.\"\"\"\n static_shape = tensor.shape.as_list()\n if not any([x is None for x in static_shape]):\n return static_shape\n\n dynamic_shape = tf.shape(tensor)\n ndims = tensor.shape.ndims\n\n # Return mixture of static and dynamic dims.\n shapes = [\n static_shape[i] if static_shape[i] is not None else dynamic_shape[i]\n for i in range(ndims)\n ]\n return shapes\n\n\ndef _group_std(inputs,\n epsilon=EPSILON,\n data_format='channels_first',\n num_groups=32):\n \"\"\"Grouped standard deviation along the channel dimension.\"\"\"\n axis = 3 if data_format == 'channels_last' else 1\n while num_groups > 1:\n if inputs.shape[axis] % num_groups == 0:\n break\n num_groups -= 1\n if data_format == 'channels_last':\n _, h, w, c = inputs.shape.as_list()\n x = tf.reshape(inputs, [-1, h, w, num_groups, c // num_groups])\n _, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)\n else:\n _, c, h, w = inputs.shape.as_list()\n x = tf.reshape(inputs, [-1, num_groups, c // num_groups, h, w])\n _, variance = tf.nn.moments(x, [2, 3, 4], keep_dims=True)\n std = tf.sqrt(variance + epsilon)\n std = tf.broadcast_to(std, _get_shape_list(x))\n return tf.reshape(std, _get_shape_list(inputs))\n\n\ndef evonorm(inputs,\n is_training,\n layer=LAYER_EVONORM_B0,\n nonlinearity=True,\n init_zero=False,\n decay=MOVING_AVERAGE_DECAY,\n epsilon=EPSILON,\n num_groups=32,\n data_format='channels_first'):\n \"\"\"Apply an EvoNorm transformation (an alternative to BN-ReLU).\n\n Hanxiao Liu, Andrew Brock, Karen Simonyan, Quoc V. Le.\n Evolving Normalization-Activation Layers.\n https://arxiv.org/abs/2004.02967\n\n Args:\n inputs: `Tensor` whose shape is either `[batch, channels, ...]` with\n the \"channels_first\" format or `[batch, height, width, channels]`\n with the \"channels_last\" format.\n is_training: `bool` for whether the model is training.\n layer: `String` specifies the EvoNorm instantiation.\n nonlinearity: `bool` if False, apply an affine transform only.\n init_zero: `bool` if True, initializes scale parameter of batch\n normalization with 0 instead of 1 (default).\n decay: `float` a scalar decay used in the moving average.\n epsilon: `float` a small float added to variance to avoid dividing by zero.\n num_groups: `int` the number of groups per layer, used only when `layer` ==\n LAYER_EVONORM_S0.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n A normalized `Tensor` with the same `data_format`.\n \"\"\"\n if init_zero:\n gamma_initializer = tf.zeros_initializer()\n else:\n gamma_initializer = tf.ones_initializer()\n\n if data_format == 'channels_last':\n var_shape = (1, 1, 1, inputs.shape[3])\n else:\n var_shape = (1, inputs.shape[1], 1, 1)\n with tf.variable_scope(None, default_name='evonorm'):\n beta = tf.get_variable(\n 'beta',\n shape=var_shape,\n dtype=inputs.dtype,\n initializer=tf.zeros_initializer())\n gamma = tf.get_variable(\n 'gamma',\n shape=var_shape,\n dtype=inputs.dtype,\n initializer=gamma_initializer)\n if nonlinearity:\n v = tf.get_variable(\n 'v',\n shape=var_shape,\n dtype=inputs.dtype,\n initializer=tf.ones_initializer())\n if layer == LAYER_EVONORM_S0:\n den = _group_std(\n inputs,\n epsilon=epsilon,\n data_format=data_format,\n num_groups=num_groups)\n inputs = inputs * tf.nn.sigmoid(v * inputs) / den\n elif layer == LAYER_EVONORM_B0:\n left = _batch_std(\n inputs,\n decay=decay,\n epsilon=epsilon,\n data_format=data_format,\n training=is_training)\n right = v * inputs + _instance_std(\n inputs, epsilon=epsilon, data_format=data_format)\n inputs = inputs / tf.maximum(left, right)\n else:\n raise ValueError('Unknown EvoNorm layer: {}'.format(layer))\n return inputs * gamma + beta\n\n\ndef dropblock(net, is_training, keep_prob, dropblock_size,\n data_format='channels_first'):\n \"\"\"DropBlock: a regularization method for convolutional neural networks.\n\n DropBlock is a form of structured dropout, where units in a contiguous\n region of a feature map are dropped together. DropBlock works better than\n dropout on convolutional layers due to the fact that activation units in\n convolutional layers are spatially correlated.\n See https://arxiv.org/pdf/1810.12890.pdf for details.\n\n Args:\n net: `Tensor` input tensor.\n is_training: `bool` for whether the model is training.\n keep_prob: `float` or `Tensor` keep_prob parameter of DropBlock. \"None\"\n means no DropBlock.\n dropblock_size: `int` size of blocks to be dropped by DropBlock.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n Returns:\n A version of input tensor with DropBlock applied.\n Raises:\n if width and height of the input tensor are not equal.\n \"\"\"\n\n if not is_training or keep_prob is None:\n return net\n\n tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(\n dropblock_size, net.shape))\n\n if data_format == 'channels_last':\n _, width, height, _ = net.get_shape().as_list()\n else:\n _, _, width, height = net.get_shape().as_list()\n if width != height:\n raise ValueError('Input tensor with width!=height is not supported.')\n\n dropblock_size = min(dropblock_size, width)\n # seed_drop_rate is the gamma parameter of DropBlcok.\n seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (\n width - dropblock_size + 1)**2\n\n # Forces the block to be inside the feature map.\n w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))\n valid_block_center = tf.logical_and(\n tf.logical_and(w_i >= int(dropblock_size // 2),\n w_i < width - (dropblock_size - 1) // 2),\n tf.logical_and(h_i >= int(dropblock_size // 2),\n h_i < width - (dropblock_size - 1) // 2))\n\n valid_block_center = tf.expand_dims(valid_block_center, 0)\n valid_block_center = tf.expand_dims(\n valid_block_center, -1 if data_format == 'channels_last' else 0)\n\n randnoise = tf.random_uniform(net.shape, dtype=tf.float32)\n block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast(\n (1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1\n block_pattern = tf.cast(block_pattern, dtype=tf.float32)\n\n if dropblock_size == width:\n block_pattern = tf.reduce_min(\n block_pattern,\n axis=[1, 2] if data_format == 'channels_last' else [2, 3],\n keepdims=True)\n else:\n if data_format == 'channels_last':\n ksize = [1, dropblock_size, dropblock_size, 1]\n else:\n ksize = [1, 1, dropblock_size, dropblock_size]\n block_pattern = -tf.nn.max_pool(\n -block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME',\n data_format='NHWC' if data_format == 'channels_last' else 'NCHW')\n\n percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(\n tf.size(block_pattern), tf.float32)\n\n net = net / tf.cast(percent_ones, net.dtype) * tf.cast(\n block_pattern, net.dtype)\n return net\n\n\ndef fixed_padding(inputs, kernel_size, data_format='channels_first'):\n \"\"\"Pads the input along the spatial dimensions independently of input size.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]` or\n `[batch, height, width, channels]` depending on `data_format`.\n kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`\n operations. Should be a positive integer.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n A padded `Tensor` of the same `data_format` with size either intact\n (if `kernel_size == 1`) or padded (if `kernel_size > 1`).\n \"\"\"\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs\n\n\ndef conv2d_fixed_padding(inputs, filters, kernel_size, strides,\n data_format='channels_first'):\n \"\"\"Strided 2-D convolution with explicit padding.\n\n The padding is consistent and is based only on `kernel_size`, not on the\n dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.\n filters: `int` number of filters in the convolution.\n kernel_size: `int` size of the kernel to be used in the convolution.\n strides: `int` strides of the convolution.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n A `Tensor` of shape `[batch, filters, height_out, width_out]`.\n \"\"\"\n kernel_size = strides = 1 #ins boristown 20210924\n\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format=data_format)\n\n return tf.layers.conv2d(\n inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(),\n data_format=data_format)\n\n\ndef residual_block(inputs, filters, is_training, strides,\n use_projection=False, data_format='channels_first',\n dropblock_keep_prob=None, dropblock_size=None,\n pre_activation=False, norm_act_layer=LAYER_BN_RELU,\n resnetd_shortcut=False, se_ratio=None,\n drop_connect_rate=None, bn_momentum=MOVING_AVERAGE_DECAY):\n \"\"\"Standard building block for residual networks with BN after convolutions.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first two convolutions. Note that\n the third and final convolution will use 4 times as many filters.\n is_training: `bool` for whether the model is in training.\n strides: `int` block stride. If greater than 1, this block will ultimately\n downsample the input.\n use_projection: `bool` for whether this block should use a projection\n shortcut (versus the default identity shortcut). This is usually `True`\n for the first block of a block group, which may change the number of\n filters and the resolution.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n dropblock_keep_prob: unused; needed to give method same signature as other\n blocks\n dropblock_size: unused; needed to give method same signature as other\n blocks\n pre_activation: whether to use pre-activation ResNet (ResNet-v2).\n norm_act_layer: name of the normalization-activation layer.\n resnetd_shortcut: `bool` if True, apply the resnetd style modification to\n the shortcut connection.\n se_ratio: `float` or None. Squeeze-and-Excitation ratio for the SE layer.\n drop_connect_rate: `float` or None. Drop connect rate for this block.\n bn_momentum: `float` momentum for batch norm layer.\n\n Returns:\n The output `Tensor` of the block.\n \"\"\"\n del dropblock_keep_prob\n del dropblock_size\n del resnetd_shortcut\n del se_ratio\n del drop_connect_rate\n\n shortcut = inputs\n if pre_activation:\n inputs = norm_activation(inputs, is_training, data_format=data_format,\n layer=norm_act_layer, bn_momentum=bn_momentum)\n if use_projection:\n # Projection shortcut in first layer to match filters and strides\n shortcut = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=1, strides=strides,\n data_format=data_format)\n if not pre_activation:\n shortcut = norm_activation(\n shortcut, is_training, nonlinearity=False, data_format=data_format,\n layer=norm_act_layer, bn_momentum=bn_momentum)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n inputs = norm_activation(inputs, is_training, data_format=data_format,\n layer=norm_act_layer, bn_momentum=bn_momentum)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=1,\n data_format=data_format)\n if pre_activation:\n return inputs + shortcut\n else:\n inputs = norm_activation(\n inputs, is_training, nonlinearity=False, init_zero=True,\n data_format=data_format, layer=norm_act_layer, bn_momentum=bn_momentum)\n\n return tf.nn.relu(inputs + shortcut)\n\n\ndef bottleneck_block(inputs, filters, is_training, strides,\n use_projection=False, data_format='channels_first',\n dropblock_keep_prob=None, dropblock_size=None,\n pre_activation=False, norm_act_layer=LAYER_BN_RELU,\n resnetd_shortcut=False, se_ratio=None,\n drop_connect_rate=None, bn_momentum=MOVING_AVERAGE_DECAY):\n \"\"\"Bottleneck block variant for residual networks with BN after convolutions.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first two convolutions. Note that\n the third and final convolution will use 4 times as many filters.\n is_training: `bool` for whether the model is in training.\n strides: `int` block stride. If greater than 1, this block will ultimately\n downsample the input.\n use_projection: `bool` for whether this block should use a projection\n shortcut (versus the default identity shortcut). This is usually `True`\n for the first block of a block group, which may change the number of\n filters and the resolution.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n dropblock_keep_prob: `float` or `Tensor` keep_prob parameter of DropBlock.\n \"None\" means no DropBlock.\n dropblock_size: `int` size parameter of DropBlock. Will not be used if\n dropblock_keep_prob is \"None\".\n pre_activation: whether to use pre-activation ResNet (ResNet-v2).\n norm_act_layer: name of the normalization-activation layer.\n resnetd_shortcut: `bool` if True, apply the resnetd style modification to\n the shortcut connection.\n se_ratio: `float` or None. Squeeze-and-Excitation ratio for the SE layer.\n drop_connect_rate: `float` or None. Drop connect rate for this block.\n bn_momentum: `float` momentum for batch norm layer.\n\n Returns:\n The output `Tensor` of the block.\n \"\"\"\n shortcut = inputs\n if pre_activation:\n inputs = norm_activation(inputs, is_training, data_format=data_format,\n layer=norm_act_layer, bn_momentum=bn_momentum)\n if use_projection:\n # Projection shortcut only in first block within a group. Bottleneck blocks\n # end with 4 times the number of filters.\n filters_out = 4 * filters\n if resnetd_shortcut and strides == 2:\n shortcut = tf.keras.layers.AveragePooling2D(\n pool_size=(2, 2), strides=(2, 2), padding='same',\n data_format=data_format)(inputs)\n shortcut = conv2d_fixed_padding(\n inputs=shortcut, filters=filters_out, kernel_size=1, strides=1,\n data_format=data_format)\n else:\n shortcut = conv2d_fixed_padding(\n inputs=inputs, filters=filters_out, kernel_size=1, strides=strides,\n data_format=data_format)\n\n if not pre_activation:\n shortcut = norm_activation(\n shortcut, is_training, nonlinearity=False,\n data_format=data_format, layer=norm_act_layer,\n bn_momentum=bn_momentum)\n shortcut = dropblock(\n shortcut, is_training=is_training, data_format=data_format,\n keep_prob=dropblock_keep_prob, dropblock_size=dropblock_size)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=1, strides=1,\n data_format=data_format)\n inputs = norm_activation(inputs, is_training, data_format=data_format,\n layer=norm_act_layer, bn_momentum=bn_momentum)\n inputs = dropblock(\n inputs, is_training=is_training, data_format=data_format,\n keep_prob=dropblock_keep_prob, dropblock_size=dropblock_size)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n inputs = norm_activation(inputs, is_training, data_format=data_format,\n layer=norm_act_layer, bn_momentum=bn_momentum)\n inputs = dropblock(\n inputs, is_training=is_training, data_format=data_format,\n keep_prob=dropblock_keep_prob, dropblock_size=dropblock_size)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,\n data_format=data_format)\n\n if pre_activation:\n return inputs + shortcut\n else:\n inputs = norm_activation(inputs, is_training, nonlinearity=False,\n init_zero=True, data_format=data_format,\n layer=norm_act_layer, bn_momentum=bn_momentum)\n inputs = dropblock(\n inputs, is_training=is_training, data_format=data_format,\n keep_prob=dropblock_keep_prob, dropblock_size=dropblock_size)\n\n if se_ratio is not None and se_ratio > 0 and se_ratio <= 1:\n inputs = resnet_layers.squeeze_excitation(\n inputs, in_filters=4 * filters,\n se_ratio=se_ratio, data_format='channels_last')\n\n if drop_connect_rate is not None:\n tf.logging.info('using drop_connect: {}'.format(drop_connect_rate))\n inputs = resnet_layers.drop_connect(\n inputs, is_training, drop_connect_rate)\n\n return tf.nn.relu(inputs + shortcut)\n\n\ndef block_group(inputs, filters, block_fn, blocks, strides, is_training, name,\n data_format='channels_first', dropblock_keep_prob=None,\n dropblock_size=None, pre_activation=False,\n norm_act_layer=LAYER_BN_RELU, se_ratio=None,\n resnetd_shortcut=False, drop_connect_rate=None,\n bn_momentum=MOVING_AVERAGE_DECAY):\n \"\"\"Creates one group of blocks for the ResNet model.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first convolution of the layer.\n block_fn: `function` for the block to use within the model\n blocks: `int` number of blocks contained in the layer.\n strides: `int` stride to use for the first convolution of the layer. If\n greater than 1, this layer will downsample the input.\n is_training: `bool` for whether the model is training.\n name: `str`name for the Tensor output of the block layer.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n dropblock_keep_prob: `float` or `Tensor` keep_prob parameter of DropBlock.\n \"None\" means no DropBlock.\n dropblock_size: `int` size parameter of DropBlock. Will not be used if\n dropblock_keep_prob is \"None\".\n pre_activation: whether to use pre-activation ResNet (ResNet-v2).\n norm_act_layer: name of the normalization-activation layer.\n se_ratio: `float` or None. Squeeze-and-Excitation ratio for the SE layer.\n resnetd_shortcut: `bool` if True, apply the resnetd style modification to\n the shortcut connection in downsampling blocks.\n drop_connect_rate: `float` or None. Drop connect rate for this block.\n bn_momentum: `float` momentum for batch norm layer.\n\n Returns:\n The output `Tensor` of the block layer.\n \"\"\"\n # Only the first block per block_group uses projection shortcut and strides.\n inputs = block_fn(inputs, filters, is_training, strides,\n use_projection=True, data_format=data_format,\n dropblock_keep_prob=dropblock_keep_prob,\n dropblock_size=dropblock_size,\n pre_activation=pre_activation,\n norm_act_layer=norm_act_layer,\n se_ratio=se_ratio,\n resnetd_shortcut=resnetd_shortcut,\n drop_connect_rate=drop_connect_rate,\n bn_momentum=bn_momentum)\n\n for _ in range(1, blocks):\n inputs = block_fn(inputs, filters, is_training, 1,\n data_format=data_format,\n dropblock_keep_prob=dropblock_keep_prob,\n dropblock_size=dropblock_size,\n pre_activation=pre_activation,\n norm_act_layer=norm_act_layer,\n se_ratio=se_ratio,\n resnetd_shortcut=resnetd_shortcut,\n drop_connect_rate=drop_connect_rate,\n bn_momentum=bn_momentum)\n\n return tf.identity(inputs, name)\n\n\ndef resnet_generator(block_fn,\n layers,\n num_classes,\n data_format='channels_first',\n use_resnetd_stem=False,\n resnetd_shortcut=False,\n replace_stem_max_pool=False,\n skip_stem_max_pool=False,\n drop_connect_rate=None,\n se_ratio=None,\n dropout_rate=None,\n dropblock_keep_probs=None,\n dropblock_size=None,\n pre_activation=False,\n norm_act_layer=LAYER_BN_RELU,\n bn_momentum=MOVING_AVERAGE_DECAY):\n \"\"\"Generator for ResNet models.\n\n Args:\n block_fn: `function` for the block to use within the model. Either\n `residual_block` or `bottleneck_block`.\n layers: list of 4 `int`s denoting the number of blocks to include in each\n of the 4 block groups. Each group consists of blocks that take inputs of\n the same resolution.\n num_classes: `int` number of possible classes for image classification.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n use_resnetd_stem: `bool` whether to use ResNet-D stem.\n resnetd_shortcut: `bool` whether to use ResNet-D shortcut in blocks.\n replace_stem_max_pool: `bool` if True, replace the max pool in stem with\n a stride-2 conv,\n skip_stem_max_pool: `bool` if True, skip the max pool in stem and set the\n stride of the following block to 2,\n drop_connect_rate: `float` initial rate for drop-connect.\n se_ratio: `float` Squeeze-and-Excitation ratio for SE layers.\n dropout_rate: `float` drop rate for the dropout layer.\n dropblock_keep_probs: `list` of 4 elements denoting keep_prob of DropBlock\n for each block group. None indicates no DropBlock for the corresponding\n block group.\n dropblock_size: `int`: size parameter of DropBlock.\n pre_activation: whether to use pre-activation ResNet (ResNet-v2).\n norm_act_layer: name of the normalization-activation layer.\n bn_momentum: `float` momentum for batch norm layer.\n\n Returns:\n Model `function` that takes in `inputs` and `is_training` and returns the\n output `Tensor` of the ResNet model.\n\n Raises:\n if dropblock_keep_probs is not 'None' or a list with len 4.\n \"\"\"\n if dropblock_keep_probs is None:\n dropblock_keep_probs = [None] * 4\n if not isinstance(dropblock_keep_probs,\n list) or len(dropblock_keep_probs) != 4:\n raise ValueError('dropblock_keep_probs is not valid:', dropblock_keep_probs)\n\n def model(inputs, is_training):\n \"\"\"Creation of the model graph.\"\"\"\n if use_resnetd_stem:\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=32, kernel_size=3, strides=2,\n data_format=data_format)\n inputs = norm_activation(\n inputs, is_training, data_format=data_format,\n layer=norm_act_layer, bn_momentum=bn_momentum)\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=32, kernel_size=3, strides=1,\n data_format=data_format)\n inputs = norm_activation(\n inputs, is_training, data_format=data_format,\n layer=norm_act_layer, bn_momentum=bn_momentum)\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=64, kernel_size=3, strides=1,\n data_format=data_format)\n else:\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=64, kernel_size=7, strides=2,\n data_format=data_format)\n\n inputs = tf.identity(inputs, 'initial_conv')\n if not pre_activation:\n inputs = norm_activation(inputs, is_training, data_format=data_format,\n layer=norm_act_layer, bn_momentum=bn_momentum)\n\n if not skip_stem_max_pool:\n if replace_stem_max_pool:\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=64,\n kernel_size=3, strides=2, data_format=data_format)\n inputs = norm_activation(\n inputs, is_training, data_format=data_format,\n bn_momentum=bn_momentum)\n else:\n inputs = tf.layers.max_pooling2d(\n inputs=inputs, pool_size=3, strides=2, padding='SAME',\n data_format=data_format)\n inputs = tf.identity(inputs, 'initial_max_pool')\n\n custom_block_group = functools.partial(\n block_group,\n data_format=data_format,\n dropblock_size=dropblock_size,\n pre_activation=pre_activation,\n norm_act_layer=norm_act_layer,\n se_ratio=se_ratio,\n resnetd_shortcut=resnetd_shortcut,\n bn_momentum=bn_momentum)\n\n num_layers = len(layers) + 1\n stride_c2 = 2 if skip_stem_max_pool else 1\n\n # inputs = custom_block_group(\n # inputs=inputs, filters=64, block_fn=block_fn, blocks=layers[0],\n # strides=stride_c2, is_training=is_training, name='block_group1',\n # dropblock_keep_prob=dropblock_keep_probs[0],\n # drop_connect_rate=resnet_layers.get_drop_connect_rate(\n # drop_connect_rate, 2, num_layers))\n # inputs = custom_block_group(\n # inputs=inputs, filters=128, block_fn=block_fn, blocks=layers[1],\n # strides=2, is_training=is_training, name='block_group2',\n # dropblock_keep_prob=dropblock_keep_probs[1],\n # drop_connect_rate=resnet_layers.get_drop_connect_rate(\n # drop_connect_rate, 3, num_layers))\n # inputs = custom_block_group(\n # inputs=inputs, filters=256, block_fn=block_fn, blocks=layers[2],\n # strides=2, is_training=is_training, name='block_group3',\n # dropblock_keep_prob=dropblock_keep_probs[2],\n # drop_connect_rate=resnet_layers.get_drop_connect_rate(\n # drop_connect_rate, 4, num_layers))\n # inputs = custom_block_group(\n # inputs=inputs, filters=512, block_fn=block_fn, blocks=layers[3],\n # strides=2, is_training=is_training, name='block_group4',\n # dropblock_keep_prob=dropblock_keep_probs[3],\n # drop_connect_rate=resnet_layers.get_drop_connect_rate(\n # drop_connect_rate, 5, num_layers))\n\n inputs = custom_block_group(\n inputs=inputs, filters=16, block_fn=block_fn, blocks=layers[0],\n strides=stride_c2, is_training=is_training, name='block_group1',\n dropblock_keep_prob=dropblock_keep_probs[0],\n drop_connect_rate=resnet_layers.get_drop_connect_rate(\n drop_connect_rate, 2, num_layers))\n inputs = custom_block_group(\n inputs=inputs, filters=16, block_fn=block_fn, blocks=layers[1],\n strides=2, is_training=is_training, name='block_group2',\n dropblock_keep_prob=dropblock_keep_probs[1],\n drop_connect_rate=resnet_layers.get_drop_connect_rate(\n drop_connect_rate, 3, num_layers))\n inputs = custom_block_group(\n inputs=inputs, filters=16, block_fn=block_fn, blocks=layers[2],\n strides=2, is_training=is_training, name='block_group3',\n dropblock_keep_prob=dropblock_keep_probs[2],\n drop_connect_rate=resnet_layers.get_drop_connect_rate(\n drop_connect_rate, 4, num_layers))\n inputs = custom_block_group(\n inputs=inputs, filters=16, block_fn=block_fn, blocks=layers[3],\n strides=2, is_training=is_training, name='block_group4',\n dropblock_keep_prob=dropblock_keep_probs[3],\n drop_connect_rate=resnet_layers.get_drop_connect_rate(\n drop_connect_rate, 5, num_layers))\n\n if pre_activation:\n inputs = norm_activation(inputs, is_training, data_format=data_format,\n layer=norm_act_layer, bn_momentum=bn_momentum)\n\n # The activation is 7x7 so this is a global average pool.\n # TODO(huangyp): reduce_mean will be faster.\n if data_format == 'channels_last':\n pool_size = (inputs.shape[1], inputs.shape[2])\n else:\n pool_size = (inputs.shape[2], inputs.shape[3])\n inputs = tf.layers.average_pooling2d(\n inputs=inputs, pool_size=pool_size, strides=1, padding='VALID',\n data_format=data_format)\n inputs = tf.identity(inputs, 'final_avg_pool')\n \n #inputs = tf.reshape(\n # inputs, [-1, 2048 if block_fn is bottleneck_block else 512])\n inputs = tf.reshape(\n inputs, [-1, 2048 if block_fn is bottleneck_block else 16])\n\n if dropout_rate is not None:\n tf.logging.info('using dropout')\n inputs = tf.layers.dropout(\n inputs, rate=dropout_rate, training=is_training)\n\n inputs = tf.layers.dense(\n inputs=inputs,\n units=num_classes,\n kernel_initializer=tf.random_normal_initializer(stddev=.01))\n inputs = tf.identity(inputs, 'final_dense')\n return inputs\n\n model.default_image_size = 224\n return model\n\n\ndef resnet(resnet_depth, num_classes, data_format='channels_first',\n dropblock_keep_probs=None, dropblock_size=None,\n pre_activation=False, norm_act_layer=LAYER_BN_RELU,\n se_ratio=None, drop_connect_rate=None, use_resnetd_stem=False,\n resnetd_shortcut=False, skip_stem_max_pool=False,\n replace_stem_max_pool=False, dropout_rate=None,\n bn_momentum=MOVING_AVERAGE_DECAY):\n \"\"\"Returns the ResNet model for a given size and number of output classes.\"\"\"\n model_params = {\n 10: {'block': residual_block, 'layers': [1, 1, 1, 1]},\n 18: {'block': residual_block, 'layers': [2, 2, 2, 2]},\n 34: {'block': residual_block, 'layers': [3, 4, 6, 3]},\n 50: {'block': bottleneck_block, 'layers': [3, 4, 6, 3]},\n 101: {'block': bottleneck_block, 'layers': [3, 4, 23, 3]},\n 152: {'block': bottleneck_block, 'layers': [3, 8, 36, 3]},\n 200: {'block': bottleneck_block, 'layers': [3, 24, 36, 3]},\n 270: {'block': bottleneck_block, 'layers': [4, 29, 53, 4]},\n 350: {'block': bottleneck_block, 'layers': [4, 36, 72, 4]},\n 420: {'block': bottleneck_block, 'layers': [4, 44, 87, 4]}\n }\n\n if resnet_depth not in model_params:\n raise ValueError('Not a valid resnet_depth:', resnet_depth)\n\n if norm_act_layer in LAYER_EVONORMS and not pre_activation:\n raise ValueError('Evonorms require the pre-activation form.')\n\n params = model_params[resnet_depth]\n return resnet_generator(\n params['block'], params['layers'], num_classes,\n dropblock_keep_probs=dropblock_keep_probs,\n dropblock_size=dropblock_size,\n data_format=data_format,\n pre_activation=pre_activation,\n norm_act_layer=norm_act_layer,\n use_resnetd_stem=use_resnetd_stem,\n resnetd_shortcut=resnetd_shortcut,\n se_ratio=se_ratio,\n drop_connect_rate=drop_connect_rate,\n dropout_rate=dropout_rate,\n skip_stem_max_pool=skip_stem_max_pool,\n replace_stem_max_pool=replace_stem_max_pool,\n bn_momentum=bn_momentum)\n\n\nresnet_v1 = functools.partial(resnet, pre_activation=False)\nresnet_v2 = functools.partial(resnet, pre_activation=True)\nresnet_v1_generator = functools.partial(resnet_generator, pre_activation=False)\nresnet_v2_generator = functools.partial(resnet_generator, pre_activation=True)\n" ]
[ [ "tensorflow.compat.v1.assign_sub", "tensorflow.compat.v1.sqrt", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.identity", "tensorflow.compat.v1.nn.sigmoid", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.layers.max_pooling2d", "tensorflow.compat.v1.maximum", "tensorflow.compat.v1.random_uniform", "tensorflow.compat.v1.ones_initializer", "tensorflow.compat.v1.layers.batch_normalization", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.get_variable", "tensorflow.compat.v1.nn.moments", "tensorflow.compat.v1.nn.relu", "tensorflow.compat.v1.nn.max_pool", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.initializers.ones", "tensorflow.compat.v1.random_normal_initializer", "tensorflow.compat.v1.expand_dims", "tensorflow.compat.v1.variance_scaling_initializer", "tensorflow.compat.v1.keras.layers.AveragePooling2D", "tensorflow.compat.v1.layers.average_pooling2d", "tensorflow.compat.v1.add_to_collection", "tensorflow.compat.v1.reduce_min", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.layers.dropout", "tensorflow.compat.v1.range", "tensorflow.compat.v1.size", "tensorflow.compat.v1.pad" ] ]
janatalab/GEM-Experiments-POC
[ "06056b8f1639b0d49fc00c4993dcd3c0299013d3" ]
[ "Analysis_Code/GEM_powerAnalysis.py" ]
[ "'''\nA priori power analysis for single player GEM experiment.\nThis experiment involves solo tapper with adaptive metronome to make\nsure that we can replicate the single-tapper results of Fairhurst, Janata, and\nKeller, 2012, with the GEM system. The original Fairhurst et al. paper and\nsupplemental information are available here:\nhttps://doi.org/10.1093/cercor/bhs243\nGEM project details:\nAuthors: Lauren Fink, Scottie Alexander, Petr Janata\nContact: pjanata@ucdavis.edu\nRepository link: https://github.com/janatalab/GEM\n'''\n\nfrom numpy import sqrt\nfrom math import ceil\n\n# The effect size we want to detect is the difference between metronome\n# alpha=0 and alpha=0.25. Let's get our estimates from Fairhurst et al. (2012).\n# Like them, we plan to use repeated measures ANOVA.\n\n# -----------------------------------------------------------------------------#\n# Mean and SE of Behavioural Data from Fairhurst et al. (2012)\n# Supplemental Information.\n# -----------------------------------------------------------------------------#\n# VP adaptivity (α) Mean Asynchrony (ms) SD Asynchrony (ms)\n# 0 -26.70±3.43 23.64±1.91\n# 0.25 -23.18±2.88 21.41±1.90\n# 0.5 -21.33±2.76 22.09±2.21\n# 0.75 -20.79±2.31 24.09±2.55\n# 1 -18.39±3.23 28.74±3.10\n\n# -----------------------------------------------------------------------------#\n # Estimate Effect Size\n# -----------------------------------------------------------------------------#\n# Effect size: mean(treatment) - mean(control) / std(control)\n# TODO: should we use mean or SD asych?\n\n# If SD:\npooledSD = sqrt(1.91**2 + 1.90**2 / 2)\nES = (21.41 - 23.64) / pooledSD\n\n# If want to use mean:\n# pooledSD = np.sqrt(3.43**2 + 2.88**2 / 2)\n# ES = (23.18 - 26.7) / pooledSD\n\n# -----------------------------------------------------------------------------#\n # Set desired alpha and power\n# -----------------------------------------------------------------------------#\n# alpha = .05\n# power = .90\n\n# Look up appropriate Z values for these desired parameters\nZalpha = 1.96\nZpower = 1.282 #.84 = 80%\n\n# -----------------------------------------------------------------------------#\n # Determine sample size required\n# -----------------------------------------------------------------------------#\n# compute N, given the above parameters\nN = ((Zalpha + Zpower) / ES) **2\nN = ceil(N)\n\n# -----------------------------------------------------------------------------#\n # Example write-up of power analysis\n# -----------------------------------------------------------------------------#\n# A statistical power analysis was performed for sample size estimation, based\n# on data from Fairhurst et al. (2012) (N=16), comparing metronome\n# adaptivity = 0 to metronome adaptivity =.25. The effect size (ES) in this\n# study was -.95, considered to be large using Cohen's (1988) criteria.\n# With an alpha = .05 and power = 0.90, the projected sample size\n# needed with this effect size is approximately N = 12 for this simplest\n# comparison between conditions. Thus, our proposed sample size of 20 will be\n# more than adequate for the main objective of this study and should also allow\n# for expected attrition.\n\n\n# If use mean instead of SD asynchony, N = 14\n" ]
[ [ "numpy.sqrt" ] ]
XingqunHe/XenonPy
[ "5141598d043d85d2fb8da1cea07d96dcd20d7798" ]
[ "xenonpy/contrib/sample_codes/iQSPR_V/iQSPR_V.py" ]
[ "# IQSPR with focus on molecule variety: bring in new initial molecules from reservoir in every step of SMC\n\nimport numpy as np\n\nfrom xenonpy.inverse.base import BaseSMC, BaseProposal, BaseLogLikelihood\n\nclass SMCError(Exception):\n \"\"\"Base exception for SMC classes\"\"\"\n pass\n\nclass IQSPR_V(BaseSMC):\n\n def __init__(self, *, estimator, modifier):\n \"\"\"\n SMC iqspr runner.\n Parameters\n ----------\n estimator : BaseLogLikelihood or BaseLogLikelihoodSet\n Log likelihood estimator for given SMILES.\n modifier : BaseProposal\n Modify given SMILES to new ones.\n \"\"\"\n self._proposal = modifier\n self._log_likelihood = estimator\n\n def resample(self, sims, size, p):\n return np.random.choice(sims, size=size, p=p)\n\n @property\n def modifier(self):\n return self._proposal\n\n @modifier.setter\n def modifier(self, value):\n self._proposal = value\n\n @property\n def estimator(self):\n return self._log_likelihood\n\n @estimator.setter\n def estimator(self, value):\n self._log_likelihood = value\n \n def __call__(self, reservoir, beta, size=100, *, samples=None, ratio=0.5, yield_lpf=False):\n \"\"\"\n Run SMC\n Parameters\n ----------\n reservoir: list of object\n Samples to be drawn as new initial molecules in each step of SMC\n beta: list/1D-numpy of float or pd.Dataframe\n Annealing parameters for each step.\n If pd.Dataframe, column names should follow keys of mdl in BaseLogLikeihood or BaseLogLikelihoodSet\n size: int\n Sample size for each draw.\n samples: list of object\n Initial samples.\n ratio: float\n ratio of molecules to be replaced from reservoir in each step of SMC\n yield_lpf : bool\n Yield estimated log likelihood, probability and frequency of each samples. Default is ``False``.\n Yields\n -------\n samples: list of object\n New samples in each SMC iteration.\n llh: np.ndarray float\n Estimated values of log-likelihood of each samples.\n Only yield when ``yield_lpf=Ture``.\n p: np.ndarray of float\n Estimated probabilities of each samples.\n Only yield when ``yield_lpf=Ture``.\n freq: np.ndarray of float\n The number of unique samples in original samples.\n Only yield when ``yield_lpf=Ture``.\n \"\"\"\n\n # initial samples will be randomly picked from resevoir if samples are provided\n if samples is None:\n samples = np.random.choice(reservoir,size=size).tolist()\n # refill samples if len(samples) not equals given size\n elif len(samples) < size:\n samples = samples + np.random.choice(reservoir,size=size-len(samples)).tolist()\n \n res_size = int(size*ratio)\n smc_size = size - res_size\n\n try:\n unique, frequency = self.unique(samples)\n ll = self.log_likelihood(unique)\n\n if isinstance(beta, pd.DataFrame):\n beta = beta[ll.columns].values\n else:\n # assume only one row for beta (1-D list or numpy vector)\n beta = np.transpose(np.repeat([beta], ll.shape[1], axis=0))\n\n w = np.dot(ll.values, beta[0]) + np.log(frequency)\n w_sum = np.log(np.sum(np.exp(w - np.max(w)))) + np.max(w) # avoid underflow\n p = np.exp(w - w_sum)\n if yield_lpf:\n yield unique, ll, p, frequency\n else:\n yield unique\n\n # beta = np.delete(beta,0,0) #remove first \"row\"\n\n except SMCError as e:\n self.on_errors(0, samples, e)\n except Exception as e:\n raise e\n\n # translate between input representation and execute environment representation.\n # make sure beta is not changed (np.delete will return the deleted version, without changing original vector)\n for i, step in enumerate(np.delete(beta, 0, 0)):\n try:\n re_samples = self.resample(unique, smc_size, p)\n samples = self.proposal(re_samples + np.random.choice(reservoir,size=res_size).tolist()) \n\n unique, frequency = self.unique(samples)\n\n # annealed likelihood in log - adjust with copy counts\n ll = self.log_likelihood(unique)\n w = np.dot(ll.values, step) + np.log(frequency)\n w_sum = np.log(np.sum(np.exp(w - np.max(w)))) + np.max(w) # avoid underflow\n p = np.exp(w - w_sum)\n if yield_lpf:\n yield unique, ll, p, frequency\n else:\n yield unique\n\n except SMCError as e:\n self.on_errors(i + 1, samples, e)\n except Exception as e:\n raise e\n \n" ]
[ [ "numpy.dot", "numpy.log", "numpy.random.choice", "numpy.max", "numpy.delete", "numpy.repeat", "numpy.exp" ] ]
rmlarose/OpenFermion-FQE
[ "54489126725fe3bb83218b6fde9d44f6cf130359", "54489126725fe3bb83218b6fde9d44f6cf130359" ]
[ "src/fqe/hamiltonians/hamiltonian.py", "src/fqe/algorithm/davidson.py" ]
[ "# Copyright 2020 Google LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Defines the base Hamiltonian class for OpenFermion-FQE.\"\"\"\n\n# TODO:\n# The base class Hamiltonian currently support dense and sparse hamiltonians.\n# All the code is funtional but there may be some minor type errors with\n# type hinting as sparse and dense return different types in a few places.\n# This will be corrected in a future version.\n\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Any, Tuple\n\nimport numpy as np\n\n\nclass Hamiltonian(metaclass=ABCMeta):\n \"\"\"Abstract class to mediate the functions of Hamiltonian with the\n emulator.\n\n TODO(ncrubin): Has the point below already been accomplisehd via\n DiagonalHamiltonian, DiagonalCoulombHamiltonian, GSOHamiltonian, etc?\n -\n Since the structure of the Hamiltonian may contain symmetries\n which can greatly speed up operations that act up on the object, defining\n unique classes for each case can be a key towards making the code more\n efficient.\n \"\"\"\n\n def __init__(self, e_0: complex = 0.0 + 0.0j):\n \"\"\"All hamiltonians share two basic types of information.\n\n Args:\n e_0: The scalar part of the Hamiltonian\n \"\"\"\n self._conserve_number = True\n self._e_0 = e_0\n\n @abstractmethod\n def dim(self) -> int:\n \"\"\"Returns the orbital dimension of the Hamiltonian arrays.\"\"\"\n return 0\n\n def calc_diag_transform(self) -> np.ndarray:\n \"\"\"Performs a unitary digaonlizing transformation of the one-body term\n and returns that transformation.\n \"\"\"\n return np.empty(0)\n\n @abstractmethod\n def rank(self) -> int:\n \"\"\"Returns the rank of the largest tensor.\"\"\"\n return 0\n\n def quadratic(self) -> bool:\n \"\"\"Returns True if the Hamiltonian is quadratic, else False.\"\"\"\n return False\n\n def diagonal(self) -> bool:\n \"\"\"Returns True if the Hamiltonian is diagonal, else False.\"\"\"\n return False\n\n def diagonal_coulomb(self) -> bool:\n \"\"\"Returns True if the Hamiltonian is diagonal coloumb, else False.\"\"\"\n return False\n\n def conserve_number(self) -> bool:\n \"\"\"Returns True if the Hamiltonian is number conserving, else False.\"\"\"\n return self._conserve_number\n\n def e_0(self):\n \"\"\"Returns the scalar potential of the Hamiltonian.\"\"\"\n return self._e_0\n\n def iht(self, time: float) -> Any:\n \"\"\"Return the matrices of the Hamiltonian prepared for time evolution.\n\n Args:\n time: The time step.\n \"\"\"\n return tuple()\n\n def tensors(self) -> Tuple[np.ndarray, ...]:\n \"\"\"Returns all tensors in order of their rank.\"\"\"\n return tuple()\n\n def diag_values(self) -> np.ndarray:\n \"\"\"Returns the diagonal values packed into a single dimension.\"\"\"\n return np.empty(0)\n\n def transform(self, trans: np.ndarray) -> np.ndarray:\n \"\"\"Tranform the one body term using the provided matrix.\n\n Note: Care must be taken that this function does not transform the\n higher-body terms even if they exist.\n\n Args:\n trans: Unitary transformation.\n\n Returns:\n Transformed one-body Hamiltonian as a numpy.ndarray.\n \"\"\"\n return np.empty(0)\n", "# Copyright 2020 Google LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Reference implementation of Davidson-Liu diagonalization with numpy and FQE.\n\"\"\"\nfrom itertools import product\nimport copy\nimport time\n\nimport numpy as np\n\nimport openfermion as of\nimport fqe\nfrom fqe.unittest_data.build_lih_data import build_lih_data\nfrom fqe.hamiltonians.hamiltonian import Hamiltonian\n\n\nclass ConvergenceError(Exception):\n \"\"\"Error for failed convergence in Davidson-Liu diagonalization.\"\"\"\n pass\n\n\ndef davidsonliu(\n hmat: np.ndarray,\n nroots: int,\n guess_vecs=None,\n epsilon: float = 1.0e-8,\n verbose=False,\n):\n \"\"\"TODO: Add docstring.\"\"\"\n # check if the nroots is specified correctly\n if nroots < 1 or nroots > hmat.shape[0] // 2:\n raise ValueError(\"Number of roots is incorrectly specified\")\n dim = hmat.shape[0]\n\n # initialize the guess vectors if None\n if guess_vecs is None:\n guess_vecs = []\n for idx in range(nroots * 2):\n tmp_gv = np.zeros((dim, 1))\n tmp_gv[idx, 0] = 1\n guess_vecs.append(tmp_gv)\n\n old_thetas = np.array([np.infty] * nroots)\n hmat_diag = np.diagonal(hmat)\n while len(guess_vecs) <= dim:\n if verbose:\n print()\n current_num_gv = len(guess_vecs)\n\n # build subspace matrices\n # this can easily be improved to linear scaling with |guess_ves|\n # by storing the intermediate subspace matrix instead of rebuilding\n start_time = time.time()\n subspace_mat = np.zeros((len(guess_vecs), len(guess_vecs)),\n dtype=np.complex128)\n for i, j in product(range(len(guess_vecs)), repeat=2):\n if i >= j:\n val = guess_vecs[i].T @ hmat @ guess_vecs[j]\n if isinstance(val, (float, complex, np.complex128, np.complex)):\n subspace_mat[i, j] = val\n else:\n subspace_mat[i, j] = val[0, 0]\n subspace_mat[j, i] = subspace_mat[i, j]\n if verbose:\n print(\"subspace mat problem formation \", time.time() - start_time)\n\n # for nroots residuals\n start_time = time.time()\n w, v = np.linalg.eigh(subspace_mat)\n if verbose:\n print(\"subsapce eig problem time: \", time.time() - start_time)\n\n # if converged return\n if verbose:\n print(\n \"eig convergence {}, \".format(\n np.linalg.norm(w[:nroots] - old_thetas)),\n w[:nroots] - old_thetas,\n )\n if np.linalg.norm(w[:nroots] - old_thetas) < epsilon:\n\n # build eigenvectors\n eigenvectors = []\n for i in range(nroots):\n eigenvectors.append(\n sum([\n v[j, i] * guess_vecs[j] for j in range(current_num_gv)\n ]))\n\n return w[:nroots], eigenvectors\n\n # else set new roots to the old roots\n old_thetas = w[:nroots]\n if verbose:\n print(old_thetas)\n\n # update the subspace vecs with the vecs of the subspace problem with\n # the nroots lowest eigenvalues\n for i in range(nroots):\n start_time = time.time()\n # expand in the space of all existing guess_vecs\n subspace_eigvec_expanded = sum(\n [v[j, i] * guess_vecs[j] for j in range(current_num_gv)])\n\n residual = (hmat @ subspace_eigvec_expanded -\n w[i] * subspace_eigvec_expanded)\n # this is wrong. preconditioner is w[i] - np.diag(hmat)\n preconditioned_residual = np.multiply(\n residual.flatten(), np.reciprocal(w[i] - hmat_diag)).reshape(\n (-1, 1))\n if verbose:\n print(\"residual formation time \", time.time() - start_time)\n\n start_time = time.time()\n overlaps = []\n for idx in range(len(guess_vecs)):\n overlaps.append(guess_vecs[idx].T @ preconditioned_residual)\n for idx in range(len(guess_vecs)):\n preconditioned_residual -= overlaps[idx] * guess_vecs[idx]\n if verbose:\n print(\"orthogonalization time \", time.time() - start_time)\n # normalize and add to guess_vecs\n guess_vecs.append(preconditioned_residual /\n np.linalg.norm(preconditioned_residual))\n\n raise ConvergenceError(\"Maximal number of steps exceeded\")\n\n\ndef davidsonliu_fqe(\n hmat: Hamiltonian,\n nroots: int,\n guess_vecs,\n nele,\n sz,\n norb,\n epsilon: float = 1.0e-8,\n verbose=False,\n):\n \"\"\"TODO: Add docstring.\"\"\"\n if nroots < 1 or nroots > 2**(hmat.dim() - 1):\n raise ValueError(\"Number of roots is incorrectly specified\")\n\n gv_sector = list(guess_vecs[0].sectors())[0]\n for gv in guess_vecs:\n if list(gv.sectors())[0] != gv_sector:\n raise TypeError(\"Sectors don't match for guess vectors\")\n\n # get diagonal Hamiltonian as the preconditioner.\n # TODO: This should be changed to Slater-Condon rules construction and not\n # this hack!\n diagonal_ham = np.zeros_like(guess_vecs[0].sector(gv_sector).coeff)\n graph = guess_vecs[0].sector(gv_sector).get_fcigraph()\n empty_vec = np.zeros_like(diagonal_ham)\n comp_basis = fqe.Wavefunction([[nele, sz, norb]])\n old_ia, old_ib = None, None\n\n for ia in graph.string_alpha_all():\n for ib in graph.string_beta_all():\n # empty_vec = np.zeros_like(diagonal_ham)\n if old_ia is not None and old_ib is not None:\n empty_vec[old_ia, old_ib] = 0.0\n empty_vec[graph.index_alpha(ia), graph.index_beta(ib)] = 1.0\n assert np.isclose(np.sum(empty_vec), 1)\n old_ia, old_ib = graph.index_alpha(ia), graph.index_beta(ib)\n comp_basis.set_wfn(strategy=\"from_data\",\n raw_data={(nele, sz): empty_vec})\n diagonal_ham[graph.index_alpha(ia),\n graph.index_beta(ib)] = comp_basis.expectationValue(\n hmat).real\n\n old_thetas = np.array([np.infty] * nroots)\n while len(guess_vecs) <= graph.lena() * graph.lenb() / 2:\n if verbose:\n print()\n current_num_gv = len(guess_vecs)\n start_time = time.time()\n subspace_mat = np.zeros((len(guess_vecs), len(guess_vecs)),\n dtype=np.complex128)\n for i, j in product(range(len(guess_vecs)), repeat=2):\n if i >= j:\n subspace_mat[i, j] = guess_vecs[j].expectationValue(\n hmat, brawfn=guess_vecs[i])\n subspace_mat[j, i] = subspace_mat[i, j]\n if verbose:\n print(\"subspace mat problem formation \", time.time() - start_time)\n\n # for nroots residuals\n start_time = time.time()\n w, v = np.linalg.eigh(subspace_mat)\n if verbose:\n print(\"subsapce eig problem time: \", time.time() - start_time)\n\n # if converged return\n if verbose:\n print(\n \"eig convergence {}, \".format(\n np.linalg.norm(w[:nroots] - old_thetas)),\n w[:nroots] - old_thetas,\n )\n if np.linalg.norm(w[:nroots] - old_thetas) < epsilon:\n # build eigenvectors\n eigenvectors = []\n for i in range(nroots):\n eigenvectors.append(\n sum([\n v[j, i] * guess_vecs[j].sector(gv_sector).coeff\n for j in range(current_num_gv)\n ]))\n eigfuncs = []\n for eg in eigenvectors:\n new_wfn = copy.deepcopy(guess_vecs[0])\n new_wfn.set_wfn(strategy='from_data', raw_data={gv_sector: eg})\n eigfuncs.append(new_wfn)\n\n return w[:nroots], eigfuncs\n\n # else set new roots to the old roots\n old_thetas = w[:nroots]\n if verbose:\n print(\"Old Thetas: \", old_thetas)\n # update the subspace vecs with the vecs of the subspace problem with\n # the nroots lowest eigenvalues\n for i in range(nroots):\n # expand in the space of all existing guess_vecs\n subspace_eigvec_expanded = sum([\n v[j, i] * guess_vecs[j].sector(gv_sector).coeff\n for j in range(current_num_gv)\n ])\n subspace_eigvec = copy.deepcopy(guess_vecs[0])\n subspace_eigvec.set_wfn(\n strategy=\"from_data\",\n raw_data={gv_sector: subspace_eigvec_expanded},\n )\n # this should return a fresh wavefunction copy.deepcop\n residual = subspace_eigvec.apply(hmat)\n subspace_eigvec.scale(-w[i])\n residual = residual + subspace_eigvec\n\n preconditioner = copy.deepcopy(residual)\n preconditioner.set_wfn(\n strategy=\"from_data\",\n raw_data={gv_sector: np.reciprocal(w[i] - diagonal_ham)},\n )\n f_k_coeffs = np.multiply(\n preconditioner.sector(gv_sector).coeff,\n residual.sector(gv_sector).coeff,\n )\n f_k = copy.deepcopy(residual)\n f_k.set_wfn(strategy=\"from_data\", raw_data={gv_sector: f_k_coeffs})\n\n # orthogonalize preconditioned_residual\n overlaps = []\n # print(len(guess_vecs))\n for idx in range(len(guess_vecs)):\n overlaps.append(\n np.sum(\n np.multiply(\n guess_vecs[idx].get_coeff(gv_sector),\n f_k.get_coeff(gv_sector),\n )))\n\n for idx in range(len(guess_vecs)):\n f_k.sector(gv_sector).coeff -= (overlaps[idx] *\n guess_vecs[idx].sector(gv_sector).coeff)\n\n f_k.normalize()\n guess_vecs.append(f_k)\n\n eigenvectors = []\n for i in range(nroots):\n eigenvectors.append(\n sum([\n v[j, i] * guess_vecs[j].sector(gv_sector).coeff\n for j in range(current_num_gv)\n ]))\n eigfuncs = []\n for eg in eigenvectors:\n new_wfn = copy.deepcopy(guess_vecs[0])\n new_wfn.set_wfn(strategy='from_data', raw_data={gv_sector: eg})\n eigfuncs.append(new_wfn)\n\n return w[:nroots], eigfuncs\n\n # raise ConvergenceError(\"Maximal number of steps exceeded\")\n\n\ndef davidson_diagonalization(\n hamiltonian: fqe.restricted_hamiltonian.RestrictedHamiltonian,\n n_alpha: int,\n n_beta: int,\n nroots=1,\n guess_vecs=None):\n norb = hamiltonian.dim() # this should be the num_orbitals\n nele = n_alpha + n_beta\n sz = n_alpha - n_beta\n wfn = fqe.Wavefunction([[nele, sz, norb]])\n graph = wfn.sector((nele, sz)).get_fcigraph()\n\n # Generate Guess Vecs for Davidson-Liu\n if guess_vecs is None:\n guess_vec1_coeffs = np.zeros((graph.lena(), graph.lenb()))\n guess_vec2_coeffs = np.zeros((graph.lena(), graph.lenb()))\n alpha_hf = fqe.util.init_bitstring_groundstate(n_alpha)\n beta_hf = fqe.util.init_bitstring_groundstate(n_beta)\n guess_vec1_coeffs[graph.index_alpha(alpha_hf),\n graph.index_beta(beta_hf)] = 1.0\n guess_vec2_coeffs[graph.index_alpha(alpha_hf << 1),\n graph.index_beta(beta_hf << 1)] = 1.0\n\n guess_wfn1 = copy.deepcopy(wfn)\n guess_wfn2 = copy.deepcopy(wfn)\n guess_wfn1.set_wfn(\n strategy=\"from_data\",\n raw_data={(nele, sz): guess_vec1_coeffs},\n )\n guess_wfn2.set_wfn(\n strategy=\"from_data\",\n raw_data={(nele, sz): guess_vec2_coeffs},\n )\n fqe_random = fqe.Wavefunction([[nele, sz, norb]])\n fqe_random.set_wfn(strategy='random')\n fqe_random.sector((nele, sz)).coeff.imag = 0\n fqe_random.normalize()\n guess_vecs = [guess_wfn1, guess_wfn2, fqe_random]\n\n # run FQE-DL\n dl_w, dl_v = davidsonliu_fqe(hamiltonian,\n nroots,\n guess_vecs,\n nele=nele,\n sz=sz,\n norb=norb)\n return dl_w, dl_v\n\n\n# TODO: Make this a unit test?\nif __name__ == \"__main__\":\n eref = -8.877719570384043\n norb = 6\n nalpha = 2\n nbeta = 2\n sz = nalpha - nbeta\n nele = nalpha + nbeta\n h1e, h2e, lih_ground = build_lih_data(\"energy\")\n h2e_zeros = np.zeros_like(h2e)\n elec_hamil = fqe.restricted_hamiltonian.RestrictedHamiltonian((h1e, h2e))\n wfn = fqe.Wavefunction([[nele, nalpha - nbeta, norb]])\n wfn.set_wfn(strategy=\"from_data\",\n raw_data={(nele, nalpha - nbeta): lih_ground})\n graph = wfn.sector((4, 0)).get_fcigraph()\n ecalc = wfn.expectationValue(elec_hamil)\n\n # Generate Guess Vecs for Davidson-Liu\n guess_vec1_coeffs = np.zeros((graph.lena(), graph.lenb()))\n guess_vec2_coeffs = np.zeros((graph.lena(), graph.lenb()))\n alpha_hf = fqe.util.init_bitstring_groundstate(2)\n beta_hf = fqe.util.init_bitstring_groundstate(2)\n alpha_hf_idx = fqe.util.init_bitstring_groundstate(2)\n beta_hf_idx = fqe.util.init_bitstring_groundstate(2)\n guess_vec1_coeffs[graph.index_alpha(alpha_hf),\n graph.index_beta(beta_hf)] = 1.0\n guess_vec2_coeffs[graph.index_alpha(alpha_hf << 1),\n graph.index_beta(beta_hf << 1)] = 1.0\n\n guess_wfn1 = copy.deepcopy(wfn)\n guess_wfn2 = copy.deepcopy(wfn)\n guess_wfn1.set_wfn(\n strategy=\"from_data\",\n raw_data={(nele, nalpha - nbeta): guess_vec1_coeffs},\n )\n guess_wfn2.set_wfn(\n strategy=\"from_data\",\n raw_data={(nele, nalpha - nbeta): guess_vec2_coeffs},\n )\n guess_vecs = [guess_wfn1, guess_wfn2]\n dl_w, dl_v = davidsonliu_fqe(elec_hamil,\n 1,\n guess_vecs,\n nele=nele,\n sz=sz,\n norb=norb)\n\n # dummy geometry\n geometry = [[\"Li\", [0, 0, 0], [\"H\", [0, 0, 1.4]]]]\n charge = 0\n multiplicity = 1\n molecule = of.MolecularData(\n geometry=geometry,\n basis=\"sto-3g\",\n charge=charge,\n multiplicity=multiplicity,\n )\n molecule.one_body_integrals = h1e\n molecule.two_body_integrals = np.einsum(\"ijlk\", -2 * h2e)\n molecular_hamiltonian = molecule.get_molecular_hamiltonian()\n molecular_hamiltonian.constant = 0\n ham_fop = of.get_fermion_operator(molecular_hamiltonian)\n ham_mat = of.get_sparse_operator(of.jordan_wigner(ham_fop)).toarray()\n\n cirq_ci = fqe.to_cirq(wfn)\n cirq_ci = cirq_ci.reshape((2**12, 1))\n assert np.isclose(cirq_ci.conj().T @ ham_mat @ cirq_ci, ecalc)\n\n hf_idx = int(\"111100000000\", 2)\n hf_idx2 = int(\"111001000000\", 2)\n hf_vec = np.zeros((2**12, 1))\n hf_vec2 = np.zeros((2**12, 1))\n hf_vec[hf_idx, 0] = 1.0\n hf_vec2[hf_idx2, 0] = 1.0\n\n # scale diagonal so vacuum has non-zero energy\n ww, vv = davidsonliu(ham_mat + np.eye(ham_mat.shape[0]),\n 1,\n guess_vecs=[hf_vec, hf_vec2])\n print(\"full mat DL \", ww.real - 1)\n print(\"GS Energy \", ecalc.real)\n print(\"DL-FQE \", dl_w.real)\n\n dl_w, dl_v = davidson_diagonalization(hamiltonian=elec_hamil,\n n_alpha=nalpha,\n n_beta=nbeta)\n print(\"API : \", dl_w.real)\n" ]
[ [ "numpy.empty" ], [ "numpy.sum", "numpy.einsum", "numpy.eye", "numpy.linalg.norm", "numpy.linalg.eigh", "numpy.zeros_like", "numpy.reciprocal", "numpy.array", "numpy.zeros", "numpy.diagonal" ] ]
hackenjoe/requirements
[ "003f1b49cb14ca4ae655b94fa003c70b2a63d8e7" ]
[ "sentimental_class.py" ]
[ "from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline\nfrom typing import List\nimport torch\nimport re\nimport numpy as np\n\nclass SentimentModel():\n def __init__(self, model_name: str):\n self.model = AutoModelForSequenceClassification.from_pretrained(model_name)\n self.tokenizer = AutoTokenizer.from_pretrained(model_name)\n self.clean_chars = re.compile(r'[^A-Za-züöäÖÜÄß ]', re.MULTILINE)\n self.clean_http_urls = re.compile(r'https*\\S+', re.MULTILINE)\n self.clean_at_mentions = re.compile(r'@\\S+', re.MULTILINE)\n self.classes = {'1':'negative Bewertung',\n '2':'negative Bewertung',\n '3':'neutrale Bewertung',\n '4':'positive Bewertung',\n '5':'positive Bewertung'}\n\n def predict_sentiment(self, texts: str) -> str:\n texts = [self.clean_text(text) for text in texts]\n input_ids = self.tokenizer(texts, padding=True, truncation=True, add_special_tokens=True)\n input_ids = torch.tensor(input_ids[\"input_ids\"])\n with torch.no_grad():\n logits = self.model(input_ids) \n label_ids = torch.argmax(logits[0], axis=1)\n labels = [self.model.config.id2label[label_id] for label_id in label_ids.tolist()]\n return [self.classes.get(item, item) for item in [i[0] for i in labels]]\n\n def replace_numbers(self, text: str) -> str:\n return text.replace(\"0\",\" null\").replace(\"1\",\" eins\").replace(\"2\",\" zwei\")\\\n .replace(\"3\",\" drei\").replace(\"4\",\" vier\").replace(\"5\",\" fünf\")\\\n .replace(\"6\",\" sechs\").replace(\"7\",\" sieben\").replace(\"8\",\" acht\").replace(\"9\",\" neun\") \n \n def get_classes(self) -> str:\n return self.classes\n\n def clean_text(self, text: str) -> str: \n text = text.replace(\"\\n\", \" \") \n text = self.clean_http_urls.sub('',text)\n text = self.clean_at_mentions.sub('',text) \n text = self.replace_numbers(text) \n text = self.clean_chars.sub('', text) \n text = ' '.join(text.split()) \n text = text.strip().lower()\n return text\n" ]
[ [ "torch.argmax", "torch.no_grad", "torch.tensor" ] ]